repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
sign-topic
sign-topic-main/tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import tests.utils as test_utils import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, ) class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, eos]), }, { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, w1, eos]), }, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.report_accuracy = False self.args.probs = ( torch.FloatTensor( [ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ] ) .unsqueeze(0) .expand(2, 3, 7) ) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
4,629
36.33871
88
py
sign-topic
sign-topic-main/tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules import ConvTBC class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual( output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data ) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual( conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data ) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual( input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data ) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
1,745
30.745455
82
py
sign-topic
sign-topic-main/tests/test_lm_context_window.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import MonolingualDataset from fairseq.tasks.language_modeling import LanguageModelingConfig, LanguageModelingTask from tests import utils as test_utils class TestLMContextWindow(unittest.TestCase): def test_eval_dataloader(self): dictionary = test_utils.dummy_dictionary(10) assert len(dictionary) == 14 # 4 extra special symbols assert dictionary.pad() == 1 dataset = test_utils.TestDataset( [ torch.tensor([4, 5, 6, 7], dtype=torch.long), torch.tensor([8, 9, 10, 11], dtype=torch.long), torch.tensor([12, 13], dtype=torch.long), ] ) dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary) config = LanguageModelingConfig(tokens_per_sample=4) task = LanguageModelingTask(config, dictionary) eval_dataloader = task.eval_lm_dataloader( dataset=dataset, batch_size=1, context_window=2, ) batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1] assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11] assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13] assert batch["target"][0].tolist() == [1, 1, 12, 13] if __name__ == "__main__": unittest.main()
1,838
33.055556
88
py
sign-topic
sign-topic-main/tests/test_amp_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from torch.cuda.amp import GradScaler, autocast from fairseq.optim import build_optimizer @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScalingAMP(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda() self.params = list(self.model.parameters()) self.namespace_dls = argparse.Namespace( optimizer="adam", lr=[0.1], adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, threshold_loss_scale=1, min_loss_scale=1e-4, ) self.scaler = GradScaler( init_scale=1, growth_interval=1, ) def run_iter(self, model, params, optimizer): optimizer.zero_grad() with autocast(): y = model(self.x) loss = self.loss_fn(y, self.target) self.scaler.scale(loss).backward() self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) self.scaler.unscale_(optimizer) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) self.scaler.step(optimizer) self.scaler.update() self.assertEqual( model.weight, torch.tensor([[3.1]], device="cuda:0", requires_grad=True), ) self.assertEqual( model.bias, torch.tensor([5.1], device="cuda:0", requires_grad=True), ) self.assertEqual(self.scaler.get_scale(), 2.0) def test_automatic_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer)
2,411
30.736842
87
py
sign-topic
sign-topic-main/tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import TokenBlockDataset class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none") self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=3, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) def test_4billion_tokens(self): """Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745""" data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000 ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) ds[-1] # __getitem__ works start, end = ds.slice_indices[-1] assert end > 4294967295 # data must be sufficiently large to overflow uint32 assert not isinstance( end + 1, float ) # this would also raise, since np.uint64(1) + 1 => 2.0 if __name__ == "__main__": unittest.main()
3,629
38.032258
103
py
sign-topic
sign-topic-main/tests/test_transformer.py
import argparse import unittest from typing import Any, Dict, Sequence import torch from fairseq.models import transformer from tests.test_roberta import FakeTask def mk_sample(tok: Sequence[int] = None, batch_size: int = 2) -> Dict[str, Any]: if not tok: tok = [10, 11, 12, 13, 14, 15, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def mk_transformer(**extra_args: Any): overrides = { # Use characteristics dimensions "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } overrides.update(extra_args) # Overrides the defaults from the parser args = argparse.Namespace(**overrides) transformer.tiny_architecture(args) torch.manual_seed(0) task = FakeTask(args) return transformer.TransformerModel.build_model(args, task) class TransformerTestCase(unittest.TestCase): def test_forward_backward(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=12) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward() def test_different_encoder_decoder_embed_dim(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=16) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward()
1,942
28.439394
80
py
sign-topic
sign-topic-main/tests/test_multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(input): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
3,109
31.395833
79
py
sign-topic
sign-topic-main/tests/test_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import os import string import tempfile import unittest import torch from fairseq import tokenizer from fairseq.data import Dictionary class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ "A B C D", "B C D", "C D", "D", ] ref_ids1 = list( map( torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ], ) ) ref_ids2 = list( map( torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ], ) ) # build dictionary d = Dictionary() for line in txt: d.encode_line(line, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(dictionary.encode_line(line, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode="w") as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) def test_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999 #fairseq:overwrite\n" "<s> 999 #fairseq:overwrite\n" "</s> 999 #fairseq:overwrite\n" ", 999\n" "▁de 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index("<pad>"), 1) self.assertEqual(d.index("foo"), 3) self.assertEqual(d.index("<unk>"), 4) self.assertEqual(d.index("<s>"), 5) self.assertEqual(d.index("</s>"), 6) self.assertEqual(d.index(","), 7) self.assertEqual(d.index("▁de"), 8) def test_no_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n" ) d = Dictionary() with self.assertRaisesRegex(RuntimeError, "Duplicate"): d.add_from_file(dict_file) def test_space(self): # for example, character models treat space as a symbol dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n") d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index(" "), 4) self.assertEqual(d.index("a"), 5) self.assertEqual(d.index("b"), 6) def test_add_file_to_dict(self): counts = {} num_lines = 100 per_line = 10 with tempfile.TemporaryDirectory("test_sampling") as data_dir: filename = os.path.join(data_dir, "dummy.txt") with open(filename, "w", encoding="utf-8") as data: for c in string.ascii_letters: line = f"{c} " * per_line for _ in range(num_lines): data.write(f"{line}\n") counts[c] = per_line * num_lines per_line += 5 dict = Dictionary() Dictionary.add_file_to_dictionary( filename, dict, tokenizer.tokenize_line, 10 ) dict.finalize(threshold=0, nwords=-1, padding_factor=8) for c in string.ascii_letters: count = dict.get_count(dict.index(c)) self.assertEqual( counts[c], count, f"{c} count is {count} but should be {counts[c]}" ) if __name__ == "__main__": unittest.main()
4,545
30.136986
87
py
sign-topic
sign-topic-main/tests/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ] ) right_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ] ) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ] ) left_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ] ) right_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ] ) right_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ] ) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad), ) def test_clip_grad_norm_(self): params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, 0.0) params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)] for p in params: p.grad = torch.full((5,), fill_value=2.0) grad_norm = utils.clip_grad_norm_(params, 1.0) exp_grad_norm = torch.full((15,), fill_value=2.0).norm() self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, exp_grad_norm) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) def test_resolve_max_positions_with_tuple(self): resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000) self.assertEqual(resolved, (2000, 100, 2000)) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == "__main__": unittest.main()
3,295
27.66087
78
py
sign-topic
sign-topic-main/tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("there") embedder = CharacterTokenEmbedder( vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2 ) test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
1,678
33.265306
81
py
sign-topic
sign-topic-main/tests/gpu/test_ema_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch from fairseq.models.ema import EMA class DummyModule(torch.nn.Module): def __init__(self) -> None: """LightningModule for testing purposes Args: epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum validation loss for testing purposes (zero based). If None this is ignored. Defaults to None. """ super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x) @dataclass class EMAConfig(object): ema_decay: float = 0.99 ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestEMAGPU(unittest.TestCase): def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None): diff = x.float() - y.float() diff_norm = torch.norm(diff) other_norm = torch.norm(y.float()) if msg is None: msg = "|input - other| > {} + {} * |other|".format(atol, rtol) self.assertLessEqual( diff_norm, atol + rtol * other_norm, msg=msg, ) def test_ema(self): model = DummyModule().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig() ema = EMA(model, config) # set decay ema._set_decay(config.ema_decay) self.assertEqual(ema.get_decay(), config.ema_decay) # get model self.assertEqual(ema.get_model(), ema.model) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # EMA step x = torch.randn(32).cuda() y = model(x) loss = y.sum() loss.backward() optimizer.step() ema.step(model) ema_state_dict = ema.get_model().state_dict() for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema_state_dict[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # Load EMA into model model2 = DummyModule().cuda() ema.reverse(model2) for key, param in model2.state_dict().items(): ema_param = ema_state_dict[key] self.assertTrue(torch.allclose(ema_param, param)) def test_ema_fp32(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertIn(key, ema.fp32_params) # EMA update is done in fp32, and hence the EMA param must be # closer to the EMA update done in fp32 than in fp16. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), ) self.assertTorchAllClose( ema_param, ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ).half(), ) def test_ema_fp16(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=False) ema = EMA(model, config) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue # EMA update is done in fp16, and hence the EMA param must be # closer to the EMA update done in fp16 than in fp32. self.assertLessEqual( torch.norm( ema_param.float() - ( config.ema_decay * prev_param + (1 - config.ema_decay) * param ).float() ), torch.norm( ema_param.float() - ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) .half() .float() ), ) self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) if __name__ == "__main__": unittest.main()
6,796
30.613953
109
py
sign-topic
sign-topic-main/tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_language_model, train_translation_model, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMultiGPU(unittest.TestCase): @staticmethod def parse_logs(logfile): logs = [] for ln in open(logfile, "r").readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs @property def world_size(self): return torch.cuda.device_count() def train_flags(self, mu): return [ "--memory-efficient-fp16", "--update-freq", "1", "--seed", "1", "--log-format", "json", "--max-update", str(mu), "--tokens-per-sample", "20", "--batch-size", "2", "--share-decoder-input-output-embed", "--optimizer", "adam", "--max-valid-steps", "1", "--pad-to-fixed-length", "--sample-break-mode", "none", ] def _test_resume_multilingual_training( self, extra_clargs, arch="transformer_lm_gpt2_tiny" ): languages = ["en_XX", "fr_XX", "zh_CN"] save_interval = 5 mu = 10 flags = ( self.train_flags(mu) + ["--save-interval-updates", str(save_interval), "--log-interval", "1"] + extra_clargs ) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data( data_dir, num_examples=int( mu * 20 * self.world_size * 1.5 ), # make sure enough data for max updates languages=languages, ) preprocess_lm_data(data_dir, languages) train_language_model( data_dir, arch, flags + ["--log-file", log], task="multilingual_language_modeling", world_size=self.world_size, ) log2 = os.path.join(data_dir, "resume.log") ckpt_name = f"checkpoint_1_{save_interval}.pt" restore_file = os.path.join(data_dir, ckpt_name) train_language_model( data_dir, arch, flags + ["--log-file", log2, "--restore-file", restore_file, "--no-save"], task="multilingual_language_modeling", world_size=self.world_size, ) l1 = self.parse_logs(log) assert ( int(l1[-1]["train_num_updates"]) == mu ), f"The first run did not complete {mu} updates. Add more data" l2 = self.parse_logs(log2) if int(l2[0]["num_updates"]) != save_interval + 1: all_ckpt_files = [ x for x in os.listdir(data_dir) if x.endswith(".pt") ] import shutil shutil.move(data_dir, "last_failed_resume") raise AssertionError( f"Likely failed to load {ckpt_name}. {all_ckpt_files} \n LOGS: {l1} \n\n {l2}. " ) for k in [ "train_loss", "train_num_updates", "train_ppl", "train_gnorm", ]: from_scratch, resumed = float(l1[-1][k]), float(l2[-1][k]) # This fails without rounding! assert ( from_scratch == resumed ), f"difference at {k} {from_scratch} != {resumed}" @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fp16_multigpu(self): self._test_multigpu("test_fp16", ["--fp16"]) def test_slowmo_multigpu(self): self._test_multigpu( "test_slowmo", ["--ddp-backend", "slowmo", "--nprocs-per-node", "1"] ) def test_slowmo_single_node_multigpu(self): self._test_multigpu( "test_slowmo_single_node", ["--ddp-backend", "slowmo", "--nprocs-per-node", "2"], ) def _test_multigpu(self, test_name, test_args): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory(test_name) as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", test_args + ["--log-file", log], world_size=min(torch.cuda.device_count(), 2), ) generate_main(data_dir) assert os.path.exists(log) @staticmethod def parse_logs(logfile): logs = [] for ln in open(logfile, "r").readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs def test_resume_training_fsdp(self): self._test_resume_training(["--ddp-backend", "fully_sharded"]) def test_resume_training_fsdp_sharded_state(self): self._test_resume_training( ["--ddp-backend", "fully_sharded", "--use-sharded-state"] ) def test_resume_training_noc10d(self): self._test_resume_training([]) def _test_resume_training(self, extra_clargs, arch="fconv_iwslt_de_en"): flags = [ "--fp16", "--log-format", "json", "--max-update", "10", "--save-interval-updates", "2", "--log-interval", "1", ] + extra_clargs world_size = min(torch.cuda.device_count(), 2) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch, flags + ["--log-file", log], world_size=world_size, ) log2 = os.path.join(data_dir, "resume.log") restore_file = os.path.join(data_dir, "checkpoint_1_2.pt") train_translation_model( data_dir, arch, flags + ["--log-file", log2, "--restore-file", restore_file], world_size=world_size, ) l1 = self.parse_logs(log) l2 = self.parse_logs(log2) assert int(l2[0]["num_updates"]) == 3, f"{l1}\n\n {l2}" for k in [ "train_loss", "train_num_updates", "train_ppl", "train_gnorm", ]: from_scratch, resumed = l1[-1][k], l2[-1][k] assert ( from_scratch == resumed ), f"difference at {k} {from_scratch} != {resumed}" def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_amp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--amp"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--amp", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) gen_config = [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ] # non-ensemble generation generate_main(data_dir, gen_config) # ensemble generation generate_main( data_dir, gen_config, path=os.pathsep.join( [ os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "checkpoint_last.pt"), ] ), ) def test_fsdp_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--log-file", log, "--ddp-backend", "fully_sharded"], world_size=world_size, ) generate_main(data_dir) assert os.path.exists(log) def test_fsdp_sharded_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--log-file", log, "--ddp-backend", "fully_sharded", "--use-sharded-state", ], world_size=world_size, ) generate_main(data_dir, ["--checkpoint-shard-count", str(world_size)]) assert os.path.exists(log) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) @unittest.skipIf( int(torch.__version__[2]) < 10, reason="quantized kernels are only supported on CPU" ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
20,506
33.698816
104
py
sign-topic
sign-topic-main/tests/distributed/test_distributed_timeout_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import signal import time import unittest import torch from torch import nn from fairseq.distributed import DistributedTimeoutWrapper class ModuleWithDelay(nn.Module): def __init__(self, delay): super().__init__() self.delay = delay def forward(self, x): time.sleep(self.delay) return x class TestDistributedTimeoutWrapper(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_no_timeout(self): module = DistributedTimeoutWrapper(ModuleWithDelay(1), 0, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() def test_timeout_safe(self): module = DistributedTimeoutWrapper(ModuleWithDelay(1), 10, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() def test_timeout_killed(self): with self.assertRaises(KeyboardInterrupt): module = DistributedTimeoutWrapper(ModuleWithDelay(5), 1, signal.SIGINT) module(torch.rand(5)) module.stop_timeout() if __name__ == "__main__": unittest.main()
1,349
24.471698
84
py
sign-topic
sign-topic-main/tests/distributed/test_module_proxy_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from torch import nn from fairseq.distributed import ModuleProxyWrapper from .utils import objects_are_equal class MockDDPWrapper(nn.Module): """A simple wrapper with an interface similar to DistributedDataParallel.""" def __init__(self, module): super().__init__() self.module = module def forward(self, x): return self.module(x) class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(5, 10) self.xyz = "hello" def forward(self, x): return self.linear(x) def get_xyz(self): return self.xyz class TestModuleProxyWrapper(unittest.TestCase): def _get_module(self): module = Model() wrapped_module = MockDDPWrapper(module) wrapped_module = ModuleProxyWrapper(wrapped_module) return wrapped_module, module def test_getattr_forwarding(self): wrapped_module, module = self._get_module() assert module.xyz == "hello" assert module.get_xyz() == "hello" assert wrapped_module.xyz == "hello" wrapped_module.xyz = "world" assert wrapped_module.xyz == "world" assert module.get_xyz() == "hello" def test_state_dict(self): wrapped_module, module = self._get_module() assert objects_are_equal(wrapped_module.state_dict(), module.state_dict()) def test_load_state_dict(self): wrapped_module, module = self._get_module() wrapped_module.load_state_dict(module.state_dict()) input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) def test_forward(self): wrapped_module, module = self._get_module() input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) if __name__ == "__main__": unittest.main()
2,085
26.813333
82
py
sign-topic
sign-topic-main/tests/distributed/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import tempfile import torch def spawn_and_init(fn, world_size, args=None): if args is None: args = () with tempfile.NamedTemporaryFile(delete=False) as tmp_file: torch.multiprocessing.spawn( fn=functools.partial(init_and_run, fn, args), args=( world_size, tmp_file.name, ), nprocs=world_size, join=True, ) def distributed_init(rank, world_size, tmp_file): torch.distributed.init_process_group( backend="nccl", init_method="file://{}".format(tmp_file), world_size=world_size, rank=rank, ) torch.cuda.set_device(rank) def init_and_run(fn, args, rank, world_size, tmp_file): distributed_init(rank, world_size, tmp_file) group = torch.distributed.new_group() fn(rank, group, *args) def objects_are_equal(a, b) -> bool: if type(a) is not type(b): return False if isinstance(a, dict): if set(a.keys()) != set(b.keys()): return False for k in a.keys(): if not objects_are_equal(a[k], b[k]): return False return True elif isinstance(a, (list, tuple, set)): if len(a) != len(b): return False return all(objects_are_equal(x, y) for x, y in zip(a, b)) elif torch.is_tensor(a): return ( a.size() == b.size() and a.dtype == b.dtype and a.device == b.device and torch.all(a == b) ) else: return a == b
1,765
25.757576
65
py
sign-topic
sign-topic-main/tests/distributed/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import functools import random import unittest from multiprocessing import Manager import torch import torch.nn as nn from omegaconf import OmegaConf from fairseq import optim from fairseq.distributed import utils as distributed_utils class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(cfg, args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank cfg.distributed_training.distributed_rank = args.distributed_rank if cfg.distributed_training.distributed_world_size > 1: distributed_utils.distributed_init(cfg) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF(cfg=cfg.bmuf, optimizer=optimizer) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(cfg, args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size cfg = OmegaConf.create() cfg.optimization = OmegaConf.create() cfg.common = OmegaConf.create() cfg.distributed_training = OmegaConf.create() cfg.dataset = OmegaConf.create() cfg.bmuf = OmegaConf.create() cfg.optimizer = OmegaConf.create() cfg.bmuf.global_sync_iter = args.global_sync_iter cfg.bmuf.block_momentum = args.block_momentum cfg.bmuf.block_lr = args.block_lr cfg.dataset.batch_size = args.batch_size cfg.optimization.lr = args.lr cfg.optimizer.momentum = args.momentum cfg.optimizer.weight_decay = args.weight_decay cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.use_nbm = args.use_nbm cfg.bmuf.average_sync = args.average_sync cfg.common.model_parallel_size = args.model_parallel_size cfg.distributed_training.distributed_backend = args.distributed_backend cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.distributed_training.distributed_init_method = args.distributed_init_method cfg.distributed_training.distributed_port = args.distributed_port return cfg, args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, cfg, args, iterations): results = Manager().dict() torch.multiprocessing.spawn( fn=functools.partial(single_gpu_training, cfg, args), args=(iterations, results), nprocs=args.distributed_world_size, join=True, ) return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup cfg, args = setup_args() iterations = 1 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync cfg, args = setup_args() args.warmup_iterations = 20 cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration cfg, args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.global_sync_iter = args.global_sync_iter iterations = 25 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 cfg, args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
7,027
33.282927
88
py
sign-topic
sign-topic-main/tests/distributed/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import sys import unittest import torch from fairseq.distributed import utils as dist_utils from .utils import objects_are_equal, spawn_and_init class DistributedTest(unittest.TestCase): def setUp(self): if not torch.cuda.is_available(): raise unittest.SkipTest("CUDA not available, skipping test") if sys.platform == "win32": raise unittest.SkipTest("NCCL doesn't support Windows, skipping test") if torch.cuda.device_count() < 2: raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping") class TestBroadcastObject(DistributedTest): def test_str(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, "hello world" ), world_size=2, ) def test_tensor(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, torch.rand(5), ), world_size=2, ) def test_complex(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int().cuda(), }, ), world_size=2, ) @staticmethod def _test_broadcast_object(ref_obj, rank, group): obj = dist_utils.broadcast_object( ref_obj if rank == 0 else None, src_rank=0, group=group ) assert objects_are_equal(ref_obj, obj) class TestAllGatherList(DistributedTest): def test_str_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, "hello world", ), world_size=2, ) def test_tensor_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, torch.rand(5), ), world_size=2, ) def test_complex_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int(), }, ), world_size=2, ) @staticmethod def _test_all_gather_list_equality(ref_obj, rank, group): objs = dist_utils.all_gather_list(ref_obj, group) for obj in objs: assert objects_are_equal(ref_obj, obj) def test_rank_tensor(self): spawn_and_init( TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2 ) @staticmethod def _test_all_gather_list_rank_tensor(rank, group): obj = torch.tensor([rank]) objs = dist_utils.all_gather_list(obj, group) for i, obj in enumerate(objs): assert obj.item() == i if __name__ == "__main__": unittest.main()
3,656
28.256
82
py
sign-topic
sign-topic-main/tests/speech_recognition/test_data_utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from examples.speech_recognition.data import data_utils class DataUtilsTest(unittest.TestCase): def test_normalization(self): sample_len1 = torch.tensor( [ [ -0.7661, -1.3889, -2.0972, -0.9134, -0.7071, -0.9765, -0.8700, -0.8283, 0.7512, 1.3211, 2.1532, 2.1174, 1.2800, 1.2633, 1.6147, 1.6322, 2.0723, 3.1522, 3.2852, 2.2309, 2.5569, 2.2183, 2.2862, 1.5886, 0.8773, 0.8725, 1.2662, 0.9899, 1.1069, 1.3926, 1.2795, 1.1199, 1.1477, 1.2687, 1.3843, 1.1903, 0.8355, 1.1367, 1.2639, 1.4707, ] ] ) out = data_utils.apply_mv_norm(sample_len1) assert not torch.isnan(out).any() assert (out == sample_len1).all()
1,736
26.571429
65
py
sign-topic
sign-topic-main/tests/speech_recognition/asr_test_base.py
#!/usr/bin/env python3 import argparse import os import unittest from inspect import currentframe, getframeinfo import numpy as np import torch from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq.data import data_utils as fairseq_data_utils from fairseq.data.dictionary import Dictionary from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqModel, ) from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 # /////////////////////////////////////////////////////////////////////////// # utility function to setup dummy dict/task/input # /////////////////////////////////////////////////////////////////////////// def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.tgt_dict = self.dictionary @property def target_dictionary(self): return self.dictionary def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def get_dummy_input(T=100, D=80, B=5, K=100): forward_input = {} # T max sequence length # D feature vector dimension # B batch size # K target dimension size feature = torch.randn(B, T, D) # this (B, T, D) layout is just a convention, you can override it by # write your own _prepare_forward_input function src_lengths = torch.from_numpy( np.random.randint(low=1, high=T, size=B, dtype=np.int64) ) src_lengths[0] = T # make sure the maximum length matches prev_output_tokens = [] for b in range(B): token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1) tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64) prev_output_tokens.append(torch.from_numpy(tokens)) prev_output_tokens = fairseq_data_utils.collate_tokens( prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False, ) src_lengths, sorted_order = src_lengths.sort(descending=True) forward_input["src_tokens"] = feature.index_select(0, sorted_order) forward_input["src_lengths"] = src_lengths forward_input["prev_output_tokens"] = prev_output_tokens return forward_input def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)): """ This only provides an example to generate dummy encoder output """ (T, B, D) = encoder_out_shape encoder_out = {} encoder_out["encoder_out"] = torch.from_numpy( np.random.randn(*encoder_out_shape).astype(np.float32) ) seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B)) # some dummy mask encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand( B, -1 ) >= seq_lengths.view(B, 1).expand(-1, T) encoder_out["encoder_padding_mask"].t_() # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate # whether encoder_out[t, b] is valid (=0) or not (=1) return encoder_out def _current_postion_info(): cf = currentframe() frameinfo = " (at {}:{})".format( os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno ) return frameinfo def check_encoder_output(encoder_output, batch_size=None): """we expect encoder_output to be a dict with the following key/value pairs: - encoder_out: a Torch.Tensor - encoder_padding_mask: a binary Torch.Tensor """ if not isinstance(encoder_output, dict): msg = ( "FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info() ) return False, msg if "encoder_out" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_out" + _current_postion_info() ) return False, msg if "encoder_padding_mask" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_padding_mask" + _current_postion_info() ) return False, msg if not isinstance(encoder_output["encoder_out"], torch.Tensor): msg = "encoder_out must be a torch.Tensor" + _current_postion_info() return False, msg if encoder_output["encoder_out"].dtype != torch.float32: msg = "encoder_out must have float32 dtype" + _current_postion_info() return False, msg mask = encoder_output["encoder_padding_mask"] if mask is not None: if not isinstance(mask, torch.Tensor): msg = ( "encoder_padding_mask must be a torch.Tensor" + _current_postion_info() ) return False, msg if mask.dtype != torch.uint8 and ( not hasattr(torch, "bool") or mask.dtype != torch.bool ): msg = ( "encoder_padding_mask must have dtype of uint8" + _current_postion_info() ) return False, msg if mask.dim() != 2: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)" + _current_postion_info() ) return False, msg if batch_size is not None and mask.size(1) != batch_size: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, with size(1)" + " being the batch size" + _current_postion_info() ) return False, msg return True, None def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None # /////////////////////////////////////////////////////////////////////////// # Base Test class # /////////////////////////////////////////////////////////////////////////// class TestBaseFairseqModelBase(unittest.TestCase): """ This class is used to facilitate writing unittest for any class derived from `BaseFairseqModel`. """ @classmethod def setUpClass(cls): if cls is TestBaseFairseqModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model): self.assertTrue(isinstance(model, BaseFairseqModel)) self.model = model def setupInput(self): pass def setUp(self): self.model = None self.forward_input = None pass class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase): """ base code to test FairseqEncoderDecoderModel (formally known as `FairseqModel`) must be derived from this base class """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderDecoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg="This class only tests for FairseqModel subclasses", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input def setUp(self): super().setUp() def test_forward(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) # for FairseqEncoderDecoderModel, forward returns a tuple of two # elements, the first one is a Torch.Tensor succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderModelBase(TestBaseFairseqModelBase): """ base class to test FairseqEncoderModel """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, FairseqEncoderModel), msg="This class is only used for testing FairseqEncoderModel", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): super().setUp() def test_forward(self): if self.forward_input and self.model: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.model.forward(**self.forward_input) # we expect forward_output to be a dict with the following # key/value pairs: # - encoder_out: a Torch.Tensor # - encoder_padding_mask: a binary Torch.Tensor succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderBase(unittest.TestCase): """ base class to test FairseqEncoder """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpEncoder(self, encoder): self.assertTrue( isinstance(encoder, FairseqEncoder), msg="This class is only used for test FairseqEncoder", ) self.encoder = encoder def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): self.encoder = None self.forward_input = None def test_forward(self): if self.encoder and self.forward_input: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.encoder.forward(**self.forward_input) succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output class TestFairseqDecoderBase(unittest.TestCase): """ base class to test FairseqDecoder """ @classmethod def setUpClass(cls): if cls is TestFairseqDecoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpDecoder(self, decoder): self.assertTrue( isinstance(decoder, FairseqDecoder), msg="This class is only used for test FairseqDecoder", ) self.decoder = decoder def setUpInput(self, input=None): self.forward_input = get_dummy_encoder_output() if input is None else input def setUpPrevOutputTokens(self, tokens=None): if tokens is None: self.encoder_input = get_dummy_input() self.prev_output_tokens = self.encoder_input["prev_output_tokens"] else: self.prev_output_tokens = tokens def setUp(self): self.decoder = None self.forward_input = None self.prev_output_tokens = None def test_forward(self): if ( self.decoder is not None and self.forward_input is not None and self.prev_output_tokens is not None ): forward_output = self.decoder.forward( prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input, ) succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_input = forward_output class DummyEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @classmethod def build_model(cls, args, task): return cls(DummyEncoder()) def get_logits(self, net_output): # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as # F.binary_cross_entropy_with_logits combines sigmoid and CE return torch.log( torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"]) ) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample) lprobs.batch_first = True return lprobs class DummyEncoder(FairseqEncoder): def __init__(self): super().__init__(None) def forward(self, src_tokens, src_lengths): mask, max_len = lengths_to_encoder_padding_mask(src_lengths) return {"encoder_out": src_tokens, "encoder_padding_mask": mask} class CrossEntropyCriterionTestBase(unittest.TestCase): @classmethod def setUpClass(cls): if cls is CrossEntropyCriterionTestBase: raise unittest.SkipTest("Skipping base class test case") super().setUpClass() def setUpArgs(self): args = argparse.Namespace() args.sentence_avg = False args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion return args def setUp(self): args = self.setUpArgs() self.model = DummyEncoderModel(encoder=DummyEncoder()) self.criterion = self.criterion_cls.build_criterion(args, task=DummyTask(args)) def get_src_tokens(self, correct_prediction, aggregate): """ correct_prediction: True if the net_output (src_tokens) should predict the correct target aggregate: True if the criterion expects net_output (src_tokens) aggregated across time axis """ predicted_idx = 0 if correct_prediction else 1 if aggregate: src_tokens = torch.zeros((2, 2), dtype=torch.float) for b in range(2): src_tokens[b][predicted_idx] = 1.0 else: src_tokens = torch.zeros((2, 10, 2), dtype=torch.float) for b in range(2): for t in range(10): src_tokens[b][t][predicted_idx] = 1.0 return src_tokens def get_target(self, soft_target): if soft_target: target = torch.zeros((2, 2), dtype=torch.float) for b in range(2): target[b][0] = 1.0 else: target = torch.zeros((2, 10), dtype=torch.long) return target def get_test_sample(self, correct, soft_target, aggregate): src_tokens = self.get_src_tokens(correct, aggregate) target = self.get_target(soft_target) L = src_tokens.size(1) return { "net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])}, "target": target, "ntokens": src_tokens.size(0) * src_tokens.size(1), }
19,469
33.892473
87
py
sign-topic
sign-topic-main/tests/speech_recognition/test_collaters.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np import torch from examples.speech_recognition.data.collaters import Seq2SeqCollater class TestSeq2SeqCollator(unittest.TestCase): def test_collate(self): eos_idx = 1 pad_idx = 0 collater = Seq2SeqCollater( feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx ) # 2 frames in the first sample and 3 frames in the second one frames1 = np.array([[7, 8], [9, 10]]) frames2 = np.array([[1, 2], [3, 4], [5, 6]]) target1 = np.array([4, 2, 3, eos_idx]) target2 = np.array([3, 2, eos_idx]) sample1 = {"id": 0, "data": [frames1, target1]} sample2 = {"id": 1, "data": [frames2, target2]} batch = collater.collate([sample1, sample2]) # collate sort inputs by frame's length before creating the batch self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) self.assertEqual(batch["ntokens"], 7) self.assertTensorEqual( batch["net_input"]["src_tokens"], torch.tensor( [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] ), ) self.assertTensorEqual( batch["net_input"]["prev_output_tokens"], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), ) self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) self.assertTensorEqual( batch["target"], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), ) self.assertEqual(batch["nsentences"], 2) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
2,048
33.728814
87
py
sign-topic
sign-topic-main/tests/speech/test_tts_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tqdm import tqdm from fairseq import utils from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestTTSTransformer(TestFairseqSpeech): def setUp(self): self.set_up_ljspeech() @torch.no_grad() def test_ljspeech_tts_transformer_checkpoint(self): models, cfg, task, generator = self.download_and_load_checkpoint( "ljspeech_transformer_g2p.pt", arg_overrides={ "config_yaml": "cfg_ljspeech_g2p.yaml", "vocoder": "griffin_lim", "fp16": False, }, ) batch_iterator = self.get_batch_iterator(task, "ljspeech_test", 65_536, 1024) progress = tqdm(batch_iterator, total=len(batch_iterator)) mcd, n_samples = 0.0, 0 for sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypos = generator.generate(models[0], sample, has_targ=True) rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], sr=task.sr, ) mcd += sum(d.item() for d, _ in rets) n_samples += len(sample["id"].tolist()) mcd = round(mcd / n_samples, 1) reference_mcd = 3.3 print(f"MCD: {mcd} (reference: {reference_mcd})") self.assertAlmostEqual(mcd, reference_mcd, delta=0.1) if __name__ == "__main__": unittest.main()
1,831
32.925926
85
py
sign-topic
sign-topic-main/tests/speech/test_dualinput_s2t_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from argparse import Namespace from collections import namedtuple from pathlib import Path import torch from tqdm import tqdm import fairseq from fairseq import utils from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.scoring.bleu import SacrebleuScorer from fairseq.tasks import import_tasks from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestDualInputS2TTransformer(TestFairseqSpeech): def setUp(self): self.set_up_mustc_de_fbank() def import_user_module(self): user_dir = ( Path(fairseq.__file__).parent.parent / "examples/speech_text_joint_to_text" ) Arg = namedtuple("Arg", ["user_dir"]) arg = Arg(user_dir.__str__()) utils.import_user_module(arg) @torch.no_grad() def test_mustc_de_fbank_dualinput_s2t_transformer_checkpoint(self): self.import_user_module() checkpoint_filename = "checkpoint_ave_10.pt" path = self.download(self.base_url, self.root, checkpoint_filename) models, cfg, task = load_model_ensemble_and_task( [path.as_posix()], arg_overrides={ "data": self.root.as_posix(), "config_yaml": "config.yaml", "load_pretrain_speech_encoder": "", "load_pretrain_text_encoder_last": "", "load_pretrain_decoder": "", "beam": 10, "nbest": 1, "lenpen": 1.0, "load_speech_only": True, }, ) if self.use_cuda: for model in models: model.cuda() generator = task.build_generator(models, cfg) test_split = "tst-COMMON" task.load_dataset(test_split) batch_iterator = task.get_batch_iterator( dataset=task.dataset(test_split), max_tokens=250_000, max_positions=(10_000, 1_024), num_workers=1, ).next_epoch_itr(shuffle=False) tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer_args = { "sacrebleu_tokenizer": "13a", "sacrebleu_lowercase": False, "sacrebleu_char_level": False, } scorer = SacrebleuScorer(Namespace(**scorer_args)) progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator)) for batch_idx, sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypo = task.inference_step(generator, models, sample) for i, sample_id in enumerate(sample["id"].tolist()): tgt_tokens = ( utils.strip_pad(sample["target"][i, :], task.tgt_dict.pad()) .int() .cpu() ) tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece") hypo_str = task.tgt_dict.string( hypo[i][0]["tokens"].int().cpu(), "sentencepiece" ) if batch_idx == 0 and i < 3: print(f"T-{sample_id} {tgt_str}") print(f"D-{sample_id} {hypo_str}") scorer.add_string(tgt_str, hypo_str) reference_bleu = 27.3 result = scorer.result_string() print(result + f" (reference: {reference_bleu})") res_bleu = float(result.split()[2]) self.assertAlmostEqual(res_bleu, reference_bleu, delta=0.3) if __name__ == "__main__": unittest.main()
3,977
34.837838
87
py
sign-topic
sign-topic-main/tests/speech/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import os import re import unittest from pathlib import Path from tqdm import tqdm from typing import List, Dict, Optional import torch from fairseq.checkpoint_utils import load_model_ensemble_and_task from fairseq.scoring.wer import WerScorer from fairseq.scoring.bleu import SacrebleuScorer from fairseq import utils S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq" class TestFairseqSpeech(unittest.TestCase): @classmethod def download(cls, base_url: str, out_root: Path, filename: str): url = f"{base_url}/{filename}" path = out_root / filename if not path.exists(): torch.hub.download_url_to_file(url, path.as_posix(), progress=True) return path def _set_up(self, dataset_id: str, s3_dir: str, data_filenames: List[str]): self.use_cuda = torch.cuda.is_available() self.root = Path.home() / ".cache" / "fairseq" / dataset_id self.root.mkdir(exist_ok=True, parents=True) os.chdir(self.root) self.base_url = ( s3_dir if re.search("^https:", s3_dir) else f"{S3_BASE_URL}/{s3_dir}" ) for filename in data_filenames: self.download(self.base_url, self.root, filename) def set_up_librispeech(self): self._set_up( "librispeech", "s2t/librispeech", [ "cfg_librispeech.yaml", "spm_librispeech_unigram10000.model", "spm_librispeech_unigram10000.txt", "librispeech_test-other.tsv", "librispeech_test-other.zip", ], ) def set_up_ljspeech(self): self._set_up( "ljspeech", "s2/ljspeech", [ "cfg_ljspeech_g2p.yaml", "ljspeech_g2p_gcmvn_stats.npz", "ljspeech_g2p.txt", "ljspeech_test.tsv", "ljspeech_test.zip", ], ) def set_up_sotasty_es_en(self): self._set_up( "sotasty_es_en", "s2t/big/es-en", [ "cfg_es_en.yaml", "spm_bpe32768_es_en.model", "spm_bpe32768_es_en.txt", "sotasty_es_en_test_ted.tsv", "sotasty_es_en_test_ted.zip", ], ) def set_up_mustc_de_fbank(self): self._set_up( "mustc_de_fbank", "https://dl.fbaipublicfiles.com/joint_speech_text_4_s2t/must_c/en_de", [ "config.yaml", "spm.model", "dict.txt", "src_dict.txt", "tgt_dict.txt", "tst-COMMON.tsv", "tst-COMMON.zip", ], ) def download_and_load_checkpoint( self, checkpoint_filename: str, arg_overrides: Optional[Dict[str, str]] = None ): path = self.download(self.base_url, self.root, checkpoint_filename) _arg_overrides = arg_overrides or {} _arg_overrides["data"] = self.root.as_posix() models, cfg, task = load_model_ensemble_and_task( [path.as_posix()], arg_overrides=_arg_overrides ) if self.use_cuda: for model in models: model.cuda() generator = task.build_generator(models, cfg) return models, cfg, task, generator @classmethod def get_batch_iterator(cls, task, test_split, max_tokens, max_positions): task.load_dataset(test_split) return task.get_batch_iterator( dataset=task.dataset(test_split), max_tokens=max_tokens, max_positions=max_positions, num_workers=1, ).next_epoch_itr(shuffle=False) @classmethod def get_wer_scorer( cls, tokenizer="none", lowercase=False, remove_punct=False, char_level=False ): scorer_args = { "wer_tokenizer": tokenizer, "wer_lowercase": lowercase, "wer_remove_punct": remove_punct, "wer_char_level": char_level, } return WerScorer(Namespace(**scorer_args)) @classmethod def get_bleu_scorer(cls, tokenizer="13a", lowercase=False, char_level=False): scorer_args = { "sacrebleu_tokenizer": tokenizer, "sacrebleu_lowercase": lowercase, "sacrebleu_char_level": char_level, } return SacrebleuScorer(Namespace(**scorer_args)) @torch.no_grad() def librispeech_s2t_test_base(self, ckpt_name, reference_wer): models, cfg, task, generator = self.download_and_load_checkpoint( ckpt_name, arg_overrides={"config_yaml": "cfg_librispeech.yaml"}, ) if not self.use_cuda: return batch_iterator = self.get_batch_iterator( task, "librispeech_test-other", 65_536, (4_096, 1_024) ) scorer = self.get_wer_scorer() progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator)) for batch_idx, sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypo = task.inference_step(generator, models, sample) for i, sample_id in enumerate(sample["id"].tolist()): tgt_tokens = ( utils.strip_pad(sample["target"][i, :], task.tgt_dict.pad()) .int() .cpu() ) tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece") hypo_str = task.tgt_dict.string( hypo[i][0]["tokens"].int().cpu(), "sentencepiece" ) if batch_idx == 0 and i < 3: print(f"T-{sample_id} {tgt_str}") print(f"H-{sample_id} {hypo_str}") scorer.add_string(tgt_str, hypo_str) print(scorer.result_string() + f" (reference: {reference_wer})") self.assertAlmostEqual(scorer.score(), reference_wer, delta=0.3)
6,218
34.335227
86
py
sign-topic
sign-topic-main/tests/speech/test_fastspeech2.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tqdm import tqdm from fairseq import utils from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion from tests.speech import TestFairseqSpeech @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestFastSpeech2(TestFairseqSpeech): def setUp(self): self.set_up_ljspeech() @torch.no_grad() def test_ljspeech_fastspeech2_checkpoint(self): models, cfg, task, generator = self.download_and_load_checkpoint( "ljspeech_fastspeech2_g2p.pt", arg_overrides={ "config_yaml": "cfg_ljspeech_g2p.yaml", "vocoder": "griffin_lim", "fp16": False, }, ) batch_iterator = self.get_batch_iterator(task, "ljspeech_test", 65_536, 4_096) progress = tqdm(batch_iterator, total=len(batch_iterator)) mcd, n_samples = 0.0, 0 for sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypos = generator.generate(models[0], sample, has_targ=True) rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], sr=task.sr, ) mcd += sum(d.item() for d, _ in rets) n_samples += len(sample["id"].tolist()) mcd = round(mcd / n_samples, 1) reference_mcd = 3.2 print(f"MCD: {mcd} (reference: {reference_mcd})") self.assertAlmostEqual(mcd, reference_mcd, delta=0.1) if __name__ == "__main__": unittest.main()
1,825
32.814815
86
py
sign-topic
sign-topic-main/tests/speech/test_xm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from tqdm import tqdm from fairseq import utils from tests.speech import TestFairseqSpeech class TestXMTransformer(TestFairseqSpeech): def setUp(self): self.set_up_sotasty_es_en() @torch.no_grad() def test_sotasty_es_en_600m_checkpoint(self): models, cfg, task, generator = self.download_and_load_checkpoint( "xm_transformer_600m_es_en_md.pt", arg_overrides={"config_yaml": "cfg_es_en.yaml"}, ) if not self.use_cuda: return batch_iterator = self.get_batch_iterator( task, "sotasty_es_en_test_ted", 3_000_000, (1_000_000, 1_024) ) scorer = self.get_bleu_scorer() progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator)) for batch_idx, sample in progress: sample = utils.move_to_cuda(sample) if self.use_cuda else sample hypo = task.inference_step(generator, models, sample) for i, sample_id in enumerate(sample["id"].tolist()): tgt_tokens = ( utils.strip_pad(sample["target"][i, :], task.tgt_dict.pad()) .int() .cpu() ) tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece") hypo_str = task.tgt_dict.string( hypo[i][0]["tokens"].int().cpu(), "sentencepiece" ) if batch_idx == 0 and i < 3: print(f"T-{sample_id} {tgt_str}") print(f"H-{sample_id} {hypo_str}") scorer.add_string(tgt_str, hypo_str) reference_bleu = 31.7 print(f"{scorer.result_string()} (reference: {reference_bleu})") self.assertAlmostEqual(scorer.score(), reference_bleu, delta=0.2) if __name__ == "__main__": unittest.main()
2,051
35
80
py
sign-topic
sign-topic-main/fairseq/checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import collections import contextlib import logging import os import re import time import traceback from collections import OrderedDict from pathlib import Path from typing import Any, Dict, Optional, Union import numpy as np import torch from omegaconf import DictConfig, OmegaConf, open_dict from fairseq.data import data_utils from fairseq.dataclass.configs import CheckpointConfig from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, overwrite_args_by_name, ) from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP from fairseq.file_io import PathManager from fairseq.models import FairseqDecoder, FairseqEncoder logger = logging.getLogger(__name__) def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): from fairseq import meters # only one worker should attempt to create the required dir if trainer.data_parallel_rank == 0: os.makedirs(cfg.save_dir, exist_ok=True) prev_best = getattr(save_checkpoint, "best", val_loss) if val_loss is not None: best_function = max if cfg.maximize_best_checkpoint_metric else min save_checkpoint.best = best_function(val_loss, prev_best) if cfg.no_save: return trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state if not trainer.should_save_checkpoint_on_current_rank: if trainer.always_call_state_dict_during_save_checkpoint: trainer.state_dict() return write_timer = meters.StopwatchMeter() write_timer.start() epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") def is_better(a, b): return a >= b if cfg.maximize_best_checkpoint_metric else a <= b suffix = trainer.checkpoint_suffix checkpoint_conds = collections.OrderedDict() checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 ) checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( not end_of_epoch and cfg.save_interval_updates > 0 and updates % cfg.save_interval_updates == 0 ) checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( not hasattr(save_checkpoint, "best") or is_better(val_loss, save_checkpoint.best) ) if val_loss is not None and cfg.keep_best_checkpoints > 0: worst_best = getattr(save_checkpoint, "best", None) chkpts = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if len(chkpts) > 0: p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) # add random digits to resolve ties with data_utils.numpy_seed(epoch, updates, val_loss): rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) checkpoint_conds[ "checkpoint.best_{}_{:.3f}{}{}.pt".format( cfg.best_checkpoint_metric, val_loss, rand_sfx, suffix ) ] = worst_best is None or is_better(val_loss, worst_best) checkpoint_conds[ "checkpoint_last{}.pt".format(suffix) ] = not cfg.no_last_checkpoints extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} if hasattr(save_checkpoint, "best"): extra_state.update({"best": save_checkpoint.best}) checkpoints = [ os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond ] if len(checkpoints) > 0: trainer.save_checkpoint(checkpoints[0], extra_state) for cp in checkpoints[1:]: if cfg.write_checkpoints_asynchronously: # TODO[ioPath]: Need to implement a delayed asynchronous # file copying/moving feature. logger.warning( f"ioPath is not copying {checkpoints[0]} to {cp} " "since async write mode is on." ) else: assert PathManager.copy( checkpoints[0], cp, overwrite=True ), f"Failed to copy {checkpoints[0]} to {cp}" write_timer.stop() logger.info( "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( checkpoints[0], epoch, updates, val_loss, write_timer.sum ) ) if not end_of_epoch and cfg.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order if cfg.keep_interval_updates_pattern == -1: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) ) else: checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), keep_match=True, ) checkpoints = [ x[0] for x in checkpoints if x[1] % cfg.keep_interval_updates_pattern != 0 ] for old_chk in checkpoints[cfg.keep_interval_updates :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_last_epochs > 0: # remove old epoch checkpoints; checkpoints are sorted in descending order checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) ) for old_chk in checkpoints[cfg.keep_last_epochs :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) if cfg.keep_best_checkpoints > 0: # only keep the best N checkpoints according to validation metric checkpoints = checkpoint_paths( cfg.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( cfg.best_checkpoint_metric, suffix ), ) if not cfg.maximize_best_checkpoint_metric: checkpoints = checkpoints[::-1] for old_chk in checkpoints[cfg.keep_best_checkpoints :]: if os.path.lexists(old_chk): os.remove(old_chk) elif PathManager.exists(old_chk): PathManager.rm(old_chk) def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): """ Load a checkpoint and restore the training iterator. *passthrough_args* will be passed through to ``trainer.get_train_iterator``. """ reset_optimizer = cfg.reset_optimizer reset_lr_scheduler = cfg.reset_lr_scheduler optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) reset_meters = cfg.reset_meters reset_dataloader = cfg.reset_dataloader if cfg.finetune_from_model is not None and ( reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader ): raise ValueError( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" ) suffix = trainer.checkpoint_suffix if ( cfg.restore_file == "checkpoint_last.pt" ): # default value of restore_file is 'checkpoint_last.pt' checkpoint_path = os.path.join( cfg.save_dir, "checkpoint_last{}.pt".format(suffix) ) first_launch = not PathManager.exists(checkpoint_path) if cfg.finetune_from_model is not None and first_launch: # if there is no last checkpoint to restore, start the finetune from pretrained model # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. if PathManager.exists(cfg.finetune_from_model): checkpoint_path = cfg.finetune_from_model reset_optimizer = True reset_lr_scheduler = True reset_meters = True reset_dataloader = True logger.info( f"loading pretrained model from {checkpoint_path}: " "optimizer, lr scheduler, meters, dataloader will be reset" ) else: raise ValueError( f"--funetune-from-model {cfg.finetune_from_model} does not exist" ) elif suffix is not None: checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") else: checkpoint_path = cfg.restore_file if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: raise ValueError( "--finetune-from-model and --restore-file (non-default value) " "can not be specified together: " + str(cfg) ) extra_state = trainer.load_checkpoint( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, reset_meters=reset_meters, ) if ( extra_state is not None and "best" in extra_state and not reset_optimizer and not reset_meters ): save_checkpoint.best = extra_state["best"] if extra_state is not None and not reset_dataloader: # restore iterator from checkpoint itr_state = extra_state["train_iterator"] epoch_itr = trainer.get_train_iterator( epoch=itr_state["epoch"], load_dataset=True, **passthrough_args ) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator( epoch=1, load_dataset=True, **passthrough_args ) trainer.lr_step(epoch_itr.epoch) return extra_state, epoch_itr def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): """Loads a checkpoint to CPU (with upgrading for backward compatibility). If doing single-GPU training or if the checkpoint is only being loaded by at most one process on each node (current default behavior is for only rank 0 to read the checkpoint from disk), load_on_all_ranks should be False to avoid errors from torch.distributed not having been initialized or torch.distributed.barrier() hanging. If all processes on each node may be loading the checkpoint simultaneously, load_on_all_ranks should be set to True to avoid I/O conflicts. There's currently no support for > 1 but < all processes loading the checkpoint on each node. """ local_path = PathManager.get_local_path(path) # The locally cached file returned by get_local_path() may be stale for # remote files that are periodically updated/overwritten (ex: # checkpoint_last.pt) - so we remove the local copy, sync across processes # (if needed), and then download a fresh copy. if local_path != path and PathManager.path_requires_pathmanager(path): try: os.remove(local_path) except FileNotFoundError: # With potentially multiple processes removing the same file, the # file being missing is benign (missing_ok isn't available until # Python 3.8). pass if load_on_all_ranks: torch.distributed.barrier() local_path = PathManager.get_local_path(path) with open(local_path, "rb") as f: state = torch.load(f, map_location=torch.device("cpu")) if "args" in state and state["args"] is not None and arg_overrides is not None: args = state["args"] for arg_name, arg_val in arg_overrides.items(): setattr(args, arg_name, arg_val) if "cfg" in state and state["cfg"] is not None: # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils old_primitive = _utils.is_primitive_type _utils.is_primitive_type = lambda _: True state["cfg"] = OmegaConf.create(state["cfg"]) _utils.is_primitive_type = old_primitive OmegaConf.set_struct(state["cfg"], True) if arg_overrides is not None: overwrite_args_by_name(state["cfg"], arg_overrides) state = _upgrade_state_dict(state) return state def load_model_ensemble( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): """Loads an ensemble of models. Args: filenames (List[str]): checkpoint files to load arg_overrides (Dict[str,Any], optional): override model args that were used during model training task (fairseq.tasks.FairseqTask, optional): task to use for loading """ assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble, args, _task = load_model_ensemble_and_task( filenames, arg_overrides, task, strict, suffix, num_shards, state, ) return ensemble, args def get_maybe_sharded_checkpoint_filename( filename: str, suffix: str, shard_idx: int, num_shards: int ) -> str: orig_filename = filename filename = filename.replace(".pt", suffix + ".pt") fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt" model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt" if PathManager.exists(fsdp_filename): return fsdp_filename elif num_shards > 1: return model_parallel_filename else: return filename def load_model_ensemble_and_task( filenames, arg_overrides: Optional[Dict[str, Any]] = None, task=None, strict=True, suffix="", num_shards=1, state=None, ): assert state is None or len(filenames) == 1 from fairseq import tasks assert not ( strict and num_shards > 1 ), "Cannot load state dict with strict=True and checkpoint shards > 1" ensemble = [] cfg = None for filename in filenames: orig_filename = filename model_shard_state = {"shard_weights": [], "shard_metadata": []} assert num_shards > 0 st = time.time() for shard_idx in range(num_shards): filename = get_maybe_sharded_checkpoint_filename( orig_filename, suffix, shard_idx, num_shards ) if not PathManager.exists(filename): raise IOError("Model file not found: {}".format(filename)) if state is None: state = load_checkpoint_to_cpu(filename, arg_overrides) if "args" in state and state["args"] is not None: cfg = convert_namespace_to_omegaconf(state["args"]) elif "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] else: raise RuntimeError( f"Neither args nor cfg exist in state keys = {state.keys()}" ) if task is None: task = tasks.setup_task(cfg.task) if "task_state" in state: task.load_state_dict(state["task_state"]) if "fsdp_metadata" in state and num_shards > 1: model_shard_state["shard_weights"].append(state["model"]) model_shard_state["shard_metadata"].append(state["fsdp_metadata"]) # check FSDP import before the code goes too far if not has_FSDP: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if shard_idx == num_shards - 1: consolidated_model_state = FSDP.consolidate_shard_weights( shard_weights=model_shard_state["shard_weights"], shard_metadata=model_shard_state["shard_metadata"], ) model = task.build_model(cfg.model) if ( "optimizer_history" in state and len(state["optimizer_history"]) > 0 and "num_updates" in state["optimizer_history"][-1] ): model.set_num_updates( state["optimizer_history"][-1]["num_updates"] ) model.load_state_dict( consolidated_model_state, strict=strict, model_cfg=cfg.model ) else: # model parallel checkpoint or unsharded checkpoint model = task.build_model(cfg.model) if ( "optimizer_history" in state and len(state["optimizer_history"]) > 0 and "num_updates" in state["optimizer_history"][-1] ): model.set_num_updates(state["optimizer_history"][-1]["num_updates"]) model.load_state_dict( state["model"], strict=strict, model_cfg=cfg.model ) # reset state so it gets loaded for the next model in ensemble state = None if shard_idx % 10 == 0 and shard_idx > 0: elapsed = time.time() - st logger.info( f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard" ) # build model for ensemble ensemble.append(model) return ensemble, cfg, task def load_model_ensemble_and_task_from_hf_hub( model_id, cache_dir: Optional[str] = None, arg_overrides: Optional[Dict[str, Any]] = None, **kwargs: Any, ): try: from huggingface_hub import snapshot_download except ImportError: raise ImportError( "You need to install huggingface_hub to use `load_from_hf_hub`. " "See https://pypi.org/project/huggingface-hub/ for installation." ) library_name = "fairseq" cache_dir = cache_dir or (Path.home() / ".cache" / library_name).as_posix() cache_dir = snapshot_download( model_id, cache_dir=cache_dir, library_name=library_name, **kwargs ) _arg_overrides = arg_overrides or {} _arg_overrides["data"] = cache_dir return load_model_ensemble_and_task( [p.as_posix() for p in Path(cache_dir).glob("*.pt")], arg_overrides=_arg_overrides, ) def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = PathManager.ls(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = float(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) if keep_match: return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)] else: return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def torch_persistent_save(obj, filename, async_write: bool = False): if async_write: with PathManager.opena(filename, "wb") as f: _torch_persistent_save(obj, f) else: if PathManager.supports_rename(filename): # do atomic save with PathManager.open(filename + ".tmp", "wb") as f: _torch_persistent_save(obj, f) PathManager.rename(filename + ".tmp", filename) else: # fallback to non-atomic save with PathManager.open(filename, "wb") as f: _torch_persistent_save(obj, f) def _torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, "wb") as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if i == 2: logger.error(traceback.format_exc()) raise def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" # add optimizer_history if "optimizer_history" not in state: state["optimizer_history"] = [ {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} ] state["last_optimizer_state"] = state["optimizer"] del state["optimizer"] del state["best_loss"] # move extra_state into sub-dictionary if "epoch" in state and "extra_state" not in state: state["extra_state"] = { "epoch": state["epoch"], "batch_offset": state["batch_offset"], "val_loss": state["val_loss"], } del state["epoch"] del state["batch_offset"] del state["val_loss"] # reduce optimizer history's memory usage (only keep the last state) if "optimizer" in state["optimizer_history"][-1]: state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] for optim_hist in state["optimizer_history"]: del optim_hist["optimizer"] # record the optimizer class name if "optimizer_name" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" # move best_loss into lr_scheduler_state if "lr_scheduler_state" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["lr_scheduler_state"] = { "best": state["optimizer_history"][-1]["best_loss"] } del state["optimizer_history"][-1]["best_loss"] # keep track of number of updates if "num_updates" not in state["optimizer_history"][-1]: state["optimizer_history"][-1]["num_updates"] = 0 # use stateful training data iterator if "train_iterator" not in state["extra_state"]: state["extra_state"]["train_iterator"] = { "epoch": state["extra_state"]["epoch"], "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), } # backward compatibility, cfg updates if "args" in state and state["args"] is not None: # old model checkpoints may not have separate source/target positions if hasattr(state["args"], "max_positions") and not hasattr( state["args"], "max_source_positions" ): state["args"].max_source_positions = state["args"].max_positions state["args"].max_target_positions = state["args"].max_positions # default to translation task if not hasattr(state["args"], "task"): state["args"].task = "translation" # --raw-text and --lazy-load are deprecated if getattr(state["args"], "raw_text", False): state["args"].dataset_impl = "raw" elif getattr(state["args"], "lazy_load", False): state["args"].dataset_impl = "lazy" # epochs start at 1 if state["extra_state"]["train_iterator"] is not None: state["extra_state"]["train_iterator"]["epoch"] = max( state["extra_state"]["train_iterator"].get("epoch", 1), 1 ) # --remove-bpe ==> --postprocess if hasattr(state["args"], "remove_bpe"): state["args"].post_process = state["args"].remove_bpe # --min-lr ==> --stop-min-lr if hasattr(state["args"], "min_lr"): state["args"].stop_min_lr = state["args"].min_lr del state["args"].min_lr # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion if hasattr(state["args"], "criterion") and state["args"].criterion in [ "binary_cross_entropy", "kd_binary_cross_entropy", ]: state["args"].criterion = "wav2vec" # remove log_keys if it's None (criteria will supply a default value of []) if hasattr(state["args"], "log_keys") and state["args"].log_keys is None: delattr(state["args"], "log_keys") # speech_pretraining => audio pretraining if ( hasattr(state["args"], "task") and state["args"].task == "speech_pretraining" ): state["args"].task = "audio_pretraining" # audio_cpc => wav2vec if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": state["args"].arch = "wav2vec" # convert legacy float learning rate to List[float] if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): state["args"].lr = [state["args"].lr] # convert task data arg to a string instead of List[string] if ( hasattr(state["args"], "data") and isinstance(state["args"].data, list) and len(state["args"].data) > 0 ): state["args"].data = state["args"].data[0] state["cfg"] = convert_namespace_to_omegaconf(state["args"]) if "cfg" in state and state["cfg"] is not None: cfg = state["cfg"] with open_dict(cfg): # any upgrades for Hydra-based configs if ( "task" in cfg and "eval_wer_config" in cfg.task and isinstance(cfg.task.eval_wer_config.print_alignment, bool) ): cfg.task.eval_wer_config.print_alignment = "hard" if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool): cfg.generation.print_alignment = ( "hard" if cfg.generation.print_alignment else None ) if ( "model" in cfg and "w2v_args" in cfg.model and cfg.model.w2v_args is not None and ( hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args ) and hasattr(cfg.model.w2v_args.task, "eval_wer_config") and cfg.model.w2v_args.task.eval_wer_config is not None and isinstance( cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool ) ): cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard" return state def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): """Prune the given state_dict if desired for LayerDrop (https://arxiv.org/abs/1909.11556). Training with LayerDrop allows models to be robust to pruning at inference time. This function prunes state_dict to allow smaller models to be loaded from a larger model and re-maps the existing state_dict for this to occur. It's called by functions that load models from checkpoints and does not need to be called directly. """ arch = None if model_cfg is not None: arch = ( model_cfg._name if isinstance(model_cfg, DictConfig) else getattr(model_cfg, "arch", None) ) if not model_cfg or arch is None or arch == "ptt_transformer": # args should not be none, but don't crash if it is. return state_dict encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) if not encoder_layers_to_keep and not decoder_layers_to_keep: return state_dict # apply pruning logger.info( "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" ) def create_pruning_pass(layers_to_keep, layer_name): keep_layers = sorted( int(layer_string) for layer_string in layers_to_keep.split(",") ) mapping_dict = {} for i in range(len(keep_layers)): mapping_dict[str(keep_layers[i])] = str(i) regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) return {"substitution_regex": regex, "mapping_dict": mapping_dict} pruning_passes = [] if encoder_layers_to_keep: pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) if decoder_layers_to_keep: pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) new_state_dict = {} for layer_name in state_dict.keys(): match = re.search(r"\.layers\.(\d+)\.", layer_name) # if layer has no number in it, it is a supporting layer, such as an # embedding if not match: new_state_dict[layer_name] = state_dict[layer_name] continue # otherwise, layer should be pruned. original_layer_number = match.group(1) # figure out which mapping dict to replace from for pruning_pass in pruning_passes: if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ "substitution_regex" ].search(layer_name): new_layer_number = pruning_pass["mapping_dict"][original_layer_number] substitution_match = pruning_pass["substitution_regex"].search( layer_name ) new_state_key = ( layer_name[: substitution_match.start(1)] + new_layer_number + layer_name[substitution_match.end(1) :] ) new_state_dict[new_state_key] = state_dict[layer_name] # Since layers are now pruned, *_layers_to_keep are no longer needed. # This is more of "It would make it work fix" rather than a proper fix. if isinstance(model_cfg, DictConfig): context = open_dict(model_cfg) else: context = contextlib.ExitStack() with context: if hasattr(model_cfg, "encoder_layers_to_keep"): model_cfg.encoder_layers_to_keep = None if hasattr(model_cfg, "decoder_layers_to_keep"): model_cfg.decoder_layers_to_keep = None return new_state_dict def load_pretrained_component_from_model( component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str, strict: bool = True, ): """ Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the provided `component` object. If state_dict fails to load, there may be a mismatch in the architecture of the corresponding `component` found in the `checkpoint` file. """ if not PathManager.exists(checkpoint): raise IOError("Model file not found: {}".format(checkpoint)) state = load_checkpoint_to_cpu(checkpoint) if isinstance(component, FairseqEncoder): component_type = "encoder" elif isinstance(component, FairseqDecoder): component_type = "decoder" else: raise ValueError( "component to load must be either a FairseqEncoder or " "FairseqDecoder. Loading other component types are not supported." ) component_state_dict = OrderedDict() for key in state["model"].keys(): if key.startswith(component_type): # encoder.input_layers.0.0.weight --> input_layers.0.0.weight component_subkey = key[len(component_type) + 1 :] component_state_dict[component_subkey] = state["model"][key] component.load_state_dict(component_state_dict, strict=strict) return component def verify_checkpoint_directory(save_dir: str) -> None: if not os.path.exists(save_dir): os.makedirs(save_dir, exist_ok=True) temp_file_path = os.path.join(save_dir, "dummy") try: with open(temp_file_path, "w"): pass except OSError as e: logger.warning( "Unable to access checkpoint save directory: {}".format(save_dir) ) raise e else: os.remove(temp_file_path) def load_ema_from_checkpoint(fpath): """Loads exponential moving averaged (EMA) checkpoint from input and returns a model with ema weights. Args: fpath: A string path of checkpoint to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() new_state = None with PathManager.open(fpath, "rb") as f: new_state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # EMA model is stored in a separate "extra state" model_params = new_state["extra_state"]["ema"] for key in list(model_params.keys()): p = model_params[key] if isinstance(p, torch.HalfTensor): p = p.float() if key not in params_dict: params_dict[key] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: raise ValueError("Key {} is repeated in EMA model params.".format(key)) if len(params_dict) == 0: raise ValueError( f"Input checkpoint path '{fpath}' does not contain " "ema model weights, is this model trained with EMA?" ) new_state["model"] = params_dict return new_state
34,264
37.543307
114
py
sign-topic
sign-topic-main/fairseq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import contextlib import copy import importlib import logging import os import sys import warnings from itertools import accumulate from typing import Callable, Dict, List, Optional, TYPE_CHECKING import torch import torch.nn.functional as F from torch import Tensor import collections if TYPE_CHECKING: from fairseq.modules.multihead_attention import MultiheadAttention try: from amp_C import multi_tensor_l2norm multi_tensor_l2norm_available = True except ImportError: multi_tensor_l2norm_available = False try: import torch_xla.core.xla_model as xm except ImportError: xm = None logger = logging.getLogger(__name__) MANIFOLD_PATH_SEP = "|" class FileContentsAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") super(FileContentsAction, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): from fairseq.file_io import PathManager if PathManager.isfile(values): with PathManager.open(values) as f: argument = f.read().strip() else: argument = values setattr(namespace, self.dest, argument) def split_paths(paths: str, separator=os.pathsep) -> List[str]: return ( paths.split(separator) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP) ) def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): from fairseq import checkpoint_utils deprecation_warning( "utils.load_ensemble_for_inference is deprecated. " "Please use checkpoint_utils.load_model_ensemble instead." ) return checkpoint_utils.load_model_ensemble( filenames, arg_overrides=model_arg_overrides, task=task ) def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, collections.OrderedDict): # OrderedDict has attributes that needs to be preserved od = collections.OrderedDict( (key, _apply(value)) for key, value in x.items() ) od.__dict__ = x.__dict__ return od elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample) def move_to_cuda(sample, device=None): device = device or torch.cuda.current_device() def _move_to_cuda(tensor): # non_blocking is ignored if tensor is not pinned, so we can always set # to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620) return tensor.to(device=device, non_blocking=True) return apply_to_sample(_move_to_cuda, sample) def move_to_cpu(sample): def _move_to_cpu(tensor): # PyTorch has poor support for half tensors (float16) on CPU. # Move any such tensors to float32. if tensor.dtype in {torch.bfloat16, torch.float16}: tensor = tensor.to(dtype=torch.float32) return tensor.cpu() return apply_to_sample(_move_to_cpu, sample) def move_to_tpu(sample): import torch_xla.core.xla_model as xm device = xm.xla_device() def _move_to_tpu(tensor): return tensor.to(device) return apply_to_sample(_move_to_tpu, sample) def get_incremental_state( module: "MultiheadAttention", incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" return module.get_incremental_state(incremental_state, key) def set_incremental_state( module: "MultiheadAttention", incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: result = module.set_incremental_state(incremental_state, key, value) if result is not None: incremental_state = result return incremental_state def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str) and len(replace_unk) > 0: # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, "r") as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor( [float(weight) for weight in pieces[1:]] ) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return " ".join(hypo_tokens) def post_process_prediction( hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None, ): hypo_str = tgt_dict.string( hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore ) if align_dict is not None: hypo_str = replace_unk( hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string() ) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx: int, onnx_trace: bool = False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max): if not hasattr(buffered_arange, "buf"): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): buffered_arange.buf.resize_(max) torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction( src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False ): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) buffered = torch.empty(0).long() if max_len > 0: torch.arange(max_len, out=buffered) range = buffered.type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): # tpu-comment: making this a no-op for xla devices. if torch.is_tensor(tensor) and tensor.device.type == "xla": return tensor.detach() if hasattr(tensor, "item"): return tensor.item() if hasattr(tensor, "__getitem__"): return tensor[0] return tensor def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: per_device_grads = {} norms = [] for grad in grads: device = grad.device cur_device_grads = per_device_grads.get(device) if cur_device_grads is None: cur_device_grads = [] per_device_grads[device] = cur_device_grads cur_device_grads.append(grad) for device in per_device_grads.keys(): cur_device_grads = per_device_grads[device] if device.type == "cuda": # TODO(msb) return has_inf has_inf = torch.zeros((1, 1), dtype=torch.int, device=device) with torch.cuda.device(device): norm = multi_tensor_l2norm( chunk_size, has_inf, [cur_device_grads], False ) norms.append(norm[0].to(torch.cuda.current_device())) else: norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads] total_norm = torch.norm(torch.stack(norms)) return total_norm @torch.no_grad() def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor: def grad_exists(p): return p is not None and getattr(p, "grad", None) is not None if isinstance(params, torch.Tensor): params = [params] params = list(params) grads = [ p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, "expert") ] expert_grads = [ p.grad.detach() for p in params if grad_exists(p) and hasattr(p, "expert") ] if len(grads) == 0: if len(params) > 0: return params[0].new_tensor(0.0) else: return torch.tensor(0.0) if len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads + expert_grads: g.mul_(clip_coef) return total_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float("-inf")).type_as(t) def _match_types(arg1, arg2): """Convert the numerical argument to the same type as the other argument""" def upgrade(arg_number, arg_structure): if isinstance(arg_structure, tuple): return tuple([arg_number] * len(arg_structure)) elif isinstance(arg_structure, dict): arg = copy.deepcopy(arg_structure) for k in arg: arg[k] = upgrade(arg_number, arg_structure[k]) return arg else: return arg_number if isinstance(arg1, float) or isinstance(arg1, int): return upgrade(arg1, arg2), arg2 elif isinstance(arg2, float) or isinstance(arg2, int): return arg1, upgrade(arg2, arg1) return arg1, arg2 def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def map_value_update(d1, d2): updated_value = copy.deepcopy(d1) for key in d2: if key not in updated_value: updated_value[key] = d2[key] else: updated_value[key] = min(d1[key], d2[key]) return updated_value def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: max_positions, arg = _match_types(max_positions, arg) if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) elif isinstance(arg, dict): max_positions = map_value_update(max_positions, arg) else: max_positions = tuple(map(nullsafe_min, zip(max_positions, arg))) return max_positions def import_user_module(args): module_path = getattr(args, "user_dir", None) if module_path is not None: module_path = os.path.abspath(args.user_dir) if not os.path.exists(module_path) and not os.path.isfile( os.path.dirname(module_path) ): fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: fairseq_rel_path = os.path.join( os.path.dirname(__file__), "..", args.user_dir ) if os.path.exists(fairseq_rel_path): module_path = fairseq_rel_path else: raise FileNotFoundError(module_path) # ensure that user modules are only imported once import_user_module.memo = getattr(import_user_module, "memo", set()) if module_path not in import_user_module.memo: import_user_module.memo.add(module_path) module_parent, module_name = os.path.split(module_path) if module_name not in sys.modules: sys.path.insert(0, module_parent) importlib.import_module(module_name) tasks_path = os.path.join(module_path, "tasks") if os.path.exists(tasks_path): from fairseq.tasks import import_tasks import_tasks(tasks_path, f"{module_name}.tasks") models_path = os.path.join(module_path, "models") if os.path.exists(models_path): from fairseq.models import import_models import_models(models_path, f"{module_name}.models") else: raise ImportError( "Failed to import --user-dir={} because the corresponding module name " "({}) is not globally unique. Please rename the directory to " "something unique and try again.".format(module_path, module_name) ) def softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.softmax(x.float(), dim=dim) else: return F.softmax(x, dim=dim, dtype=torch.float32) def log_softmax(x, dim: int, onnx_trace: bool = False): if onnx_trace: return F.log_softmax(x.float(), dim=dim) else: return F.log_softmax(x, dim=dim, dtype=torch.float32) def get_perplexity(loss, round=2, base=2): from fairseq.logging.meters import safe_round if loss is None: return 0.0 try: return safe_round(base ** loss, round) except OverflowError: return float("inf") def deprecation_warning(message, stacklevel=3): # don't use DeprecationWarning, since it's ignored by default warnings.warn(message, stacklevel=stacklevel) def relu_squared(x: torch.Tensor): return F.relu(x).pow(2) def get_activation_fn(activation: str) -> Callable: """Returns the activation function corresponding to `activation`""" from fairseq.modules import gelu, gelu_accurate if activation == "relu": return F.relu elif activation == "relu_squared": return relu_squared elif activation == "gelu": return gelu elif activation == "gelu_fast": deprecation_warning( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x elif activation == "swish": return torch.nn.SiLU else: raise RuntimeError("--activation-fn {} not supported".format(activation)) def get_available_activation_fns() -> List: return [ "relu", "gelu", "gelu_fast", # deprecated "gelu_accurate", "tanh", "linear", ] @contextlib.contextmanager def model_eval(model): is_training = model.training model.eval() yield model.train(is_training) def has_parameters(module): try: next(module.parameters()) return True except StopIteration: return False def get_rng_state(): state = {"torch_rng_state": torch.get_rng_state()} if xm is not None: state["xla_rng_state"] = xm.get_rng_state() if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(state): torch.set_rng_state(state["torch_rng_state"]) if xm is not None: xm.set_rng_state(state["xla_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = get_rng_state() torch.manual_seed(seed) if xm is not None: xm.set_rng_state(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def __enter__(self): return self def __exit__(self, *exc): set_rng_state(self.rng_state) def parse_alignment(line): """ Parses a single line from the alingment file. Args: line (str): String containing the alignment of the format: <src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> .. <src_idx_m>-<tgt_idx_m>. All indices are 0 indexed. Returns: torch.IntTensor: packed alignments of shape (2 * m). """ alignments = line.strip().split() parsed_alignment = torch.IntTensor(2 * len(alignments)) for idx, alignment in enumerate(alignments): src_idx, tgt_idx = alignment.split("-") parsed_alignment[2 * idx] = int(src_idx) parsed_alignment[2 * idx + 1] = int(tgt_idx) return parsed_alignment def get_token_to_word_mapping(tokens, exclude_list): n = len(tokens) word_start = [int(token not in exclude_list) for token in tokens] word_idx = list(accumulate(word_start)) token_to_word = {i: word_idx[i] for i in range(n)} return token_to_word def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ( ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_invalid = ( ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1) ) src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad]) tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad]) alignment = [] if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent): attn_valid = attn[tgt_valid] attn_valid[:, src_invalid] = float("-inf") _, src_indices = attn_valid.max(dim=1) for tgt_idx, src_idx in zip(tgt_valid, src_indices): alignment.append( ( src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1, ) ) return alignment def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos): tgt_valid = ((tgt_sent != pad)).nonzero(as_tuple=False) src_valid = ((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1) alignment = [] if len(tgt_valid) != 0 and len(src_valid) != 0: attn_valid = attn[tgt_valid, src_valid] alignment = [ ["{:.6f}".format(p) for p in src_probs.tolist()] for src_probs in attn_valid ] return alignment def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def get_tpu_device(): return xm.xla_device() def tpu_data_loader(itr): import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl from fairseq.data import iterators xm.rendezvous("tpu_data_loader") # wait for all workers xm.mark_step() device = xm.xla_device() return iterators.CountingIterator( pl.ParallelLoader(itr, [device]).per_device_loader(device), start=getattr(itr, "n", 0), total=len(itr), ) def is_xla_tensor(tensor): return torch.is_tensor(tensor) and tensor.device.type == "xla" def index_put(tensor, indices, value): if is_xla_tensor(tensor): for _ in range(indices.dim(), tensor.dim()): indices = indices.unsqueeze(-1) if indices.size(-1) < tensor.size(-1): indices = indices.expand_as(tensor) tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices) else: tensor[indices] = value return tensor def xla_device_to_cpu(dat): import torch_xla.core.xla_model as xm return xm._maybe_convert_to_cpu(dat) class CudaEnvironment(object): def __init__(self): cur_device = torch.cuda.current_device() prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device)) self.name = prop.name self.major = prop.major self.minor = prop.minor self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024 @staticmethod def pretty_print_cuda_env_list(cuda_env_list): """ Given a list of CudaEnviorments, pretty print them """ num_workers = len(cuda_env_list) center = "CUDA enviroments for all {} workers".format(num_workers) banner_len = 40 - len(center) // 2 first_line = "*" * banner_len + center + "*" * banner_len logger.info(first_line) for r, env in enumerate(cuda_env_list): logger.info( "rank {:3d}: ".format(r) + "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor) + "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB) + "name = {:40s}".format(env.name) ) logger.info(first_line) def csv_str_list(x): return x.split(",") def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_str_dict(x, type=dict): if x is None: return None if isinstance(x, str): x = eval(x) return x def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def safe_hasattr(obj, k): """Returns True if the given key exists and is not None.""" return getattr(obj, k, None) is not None
26,723
30.814286
111
py
sign-topic
sign-topic-main/fairseq/hub_utils.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import os from typing import Any, Dict, Iterator, List import torch from omegaconf import open_dict from torch import nn from fairseq import utils from fairseq.data import encoders logger = logging.getLogger(__name__) def from_pretrained( model_name_or_path, checkpoint_file="model.pt", data_name_or_path=".", archive_map=None, **kwargs ): from fairseq import checkpoint_utils, file_utils if archive_map is not None: if model_name_or_path in archive_map: model_name_or_path = archive_map[model_name_or_path] if data_name_or_path is not None and data_name_or_path in archive_map: data_name_or_path = archive_map[data_name_or_path] # allow archive_map to set default arg_overrides (e.g., tokenizer, bpe) # for each model if isinstance(model_name_or_path, dict): for k, v in model_name_or_path.items(): if k == "checkpoint_file": checkpoint_file = v elif ( k != "path" # only set kwargs that don't already have overrides and k not in kwargs ): kwargs[k] = v model_name_or_path = model_name_or_path["path"] model_path = file_utils.load_archive_file(model_name_or_path) # convenience hack for loading data and BPE codes from model archive if data_name_or_path.startswith("."): kwargs["data"] = os.path.abspath(os.path.join(model_path, data_name_or_path)) else: kwargs["data"] = file_utils.load_archive_file(data_name_or_path) for file, arg in { "code": "bpe_codes", "bpecodes": "bpe_codes", "sentencepiece.bpe.model": "sentencepiece_model", "merges.txt": "bpe_merges", "vocab.json": "bpe_vocab", }.items(): path = os.path.join(model_path, file) if os.path.exists(path): kwargs[arg] = path if "user_dir" in kwargs: utils.import_user_module(argparse.Namespace(user_dir=kwargs["user_dir"])) models, args, task = checkpoint_utils.load_model_ensemble_and_task( [os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs, ) return { "args": args, "task": task, "models": models, } class GeneratorHubInterface(nn.Module): """ PyTorch Hub interface for generating sequences from a pre-trained translation or language model. """ def __init__(self, cfg, task, models): super().__init__() self.cfg = cfg self.task = task self.models = nn.ModuleList(models) self.src_dict = task.source_dictionary self.tgt_dict = task.target_dictionary # optimize model for generation for model in self.models: model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) self.align_dict = utils.load_align_dict(cfg.generation.replace_unk) self.tokenizer = encoders.build_tokenizer(cfg.tokenizer) self.bpe = encoders.build_bpe(cfg.bpe) self.max_positions = utils.resolve_max_positions( self.task.max_positions(), *[model.max_positions() for model in models] ) # this is useful for determining the device self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) @property def device(self): return self._float_tensor.device def translate( self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs ) -> List[str]: return self.sample(sentences, beam, verbose, **kwargs) def sample( self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs ) -> List[str]: if isinstance(sentences, str): return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0] tokenized_sentences = [self.encode(sentence) for sentence in sentences] batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs) return [self.decode(hypos[0]["tokens"]) for hypos in batched_hypos] def score( self, sentences: List[str], replace_newline_with_eos: bool = False, **kwargs ): if isinstance(sentences, str): return self.score( [sentences], replace_newline_with_eos=replace_newline_with_eos, **kwargs )[0] def encode(sentence): if replace_newline_with_eos: return torch.cat([self.encode(line) for line in sentence.splitlines()]) else: return self.encode(sentence) # NOTE: this doesn't support translation tasks currently tokenized_sentences = [encode(sentence) for sentence in sentences] return [ hypos[0] for hypos in self.generate( tokenized_sentences, score_reference=True, **kwargs ) ] def generate( self, tokenized_sentences: List[torch.LongTensor], beam: int = 5, verbose: bool = False, skip_invalid_size_inputs=False, inference_step_args=None, prefix_allowed_tokens_fn=None, **kwargs ) -> List[List[Dict[str, torch.Tensor]]]: if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1: return self.generate( tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs )[0] # build generator using current args as well as any kwargs gen_args = copy.deepcopy(self.cfg.generation) with open_dict(gen_args): gen_args.beam = beam for k, v in kwargs.items(): setattr(gen_args, k, v) generator = self.task.build_generator( self.models, gen_args, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, ) inference_step_args = inference_step_args or {} results = [] for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): batch = utils.apply_to_sample(lambda t: t.to(self.device), batch) translations = self.task.inference_step( generator, self.models, batch, **inference_step_args ) for id, hypos in zip(batch["id"].tolist(), translations): results.append((id, hypos)) # sort output to match input order outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])] if verbose: def getarg(name, default): return getattr(gen_args, name, getattr(self.cfg, name, default)) for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs): src_str_with_unk = self.string(source_tokens) logger.info("S\t{}".format(src_str_with_unk)) for hypo in target_hypotheses: hypo_str = self.decode(hypo["tokens"]) logger.info("H\t{}\t{}".format(hypo["score"], hypo_str)) logger.info( "P\t{}".format( " ".join( map( lambda x: "{:.4f}".format(x), hypo["positional_scores"].tolist(), ) ) ) ) if hypo["alignment"] is not None and getarg( "print_alignment", False ): logger.info( "A\t{}".format( " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo["alignment"] ] ) ) ) return outputs def encode(self, sentence: str) -> torch.LongTensor: sentence = self.tokenize(sentence) sentence = self.apply_bpe(sentence) return self.binarize(sentence) def decode(self, tokens: torch.LongTensor) -> str: sentence = self.string(tokens) sentence = self.remove_bpe(sentence) return self.detokenize(sentence) def tokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.encode(sentence) return sentence def detokenize(self, sentence: str) -> str: if self.tokenizer is not None: sentence = self.tokenizer.decode(sentence) return sentence def apply_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.encode(sentence) return sentence def remove_bpe(self, sentence: str) -> str: if self.bpe is not None: sentence = self.bpe.decode(sentence) return sentence def binarize(self, sentence: str) -> torch.LongTensor: return self.src_dict.encode_line(sentence, add_if_not_exist=False).long() def string(self, tokens: torch.LongTensor) -> str: return self.tgt_dict.string(tokens) def _build_batches( self, tokens: List[List[int]], skip_invalid_size_inputs: bool ) -> Iterator[Dict[str, Any]]: lengths = torch.LongTensor([t.numel() for t in tokens]) batch_iterator = self.task.get_batch_iterator( dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, disable_iterator_cache=True, ).next_epoch_itr(shuffle=False) return batch_iterator class BPEHubInterface(object): """PyTorch Hub interface for Byte-Pair Encoding (BPE).""" def __init__(self, bpe, **kwargs): super().__init__() args = argparse.Namespace(bpe=bpe, **kwargs) self.bpe = encoders.build_bpe(args) assert self.bpe is not None def encode(self, sentence: str) -> str: return self.bpe.encode(sentence) def decode(self, sentence: str) -> str: return self.bpe.decode(sentence) class TokenizerHubInterface(object): """PyTorch Hub interface for tokenization.""" def __init__(self, tokenizer, **kwargs): super().__init__() args = argparse.Namespace(tokenizer=tokenizer, **kwargs) self.tokenizer = encoders.build_tokenizer(args) assert self.tokenizer is not None def encode(self, sentence: str) -> str: return self.tokenizer.encode(sentence) def decode(self, sentence: str) -> str: return self.tokenizer.decode(sentence)
11,350
35.034921
88
py
sign-topic
sign-topic-main/fairseq/sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos
5,450
34.396104
101
py
sign-topic
sign-topic-main/fairseq/binarizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import typing as tp from abc import ABC, abstractmethod from collections import Counter from dataclasses import dataclass from multiprocessing import Pool import torch from fairseq.data import Dictionary, indexed_dataset from fairseq.file_chunker_utils import Chunker, find_offsets from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line logger = logging.getLogger("binarizer") @dataclass class BinarizeSummary: """ Keep track of what's going on in the binarizer """ num_seq: int = 0 replaced: tp.Optional[Counter] = None num_tok: int = 0 @property def num_replaced(self) -> int: if self.replaced is None: return 0 return sum(self.replaced.values()) @property def replaced_percent(self) -> float: return 100 * self.num_replaced / self.num_tok def __str__(self) -> str: base = f"{self.num_seq} sents, {self.num_tok} tokens" if self.replaced is None: return base return f"{base}, {self.replaced_percent:.3}% replaced" def merge(self, other: "BinarizeSummary"): replaced = None if self.replaced is not None: replaced = self.replaced if other.replaced is not None: if replaced is None: replaced = other.replaced else: replaced += other.replaced self.replaced = replaced self.num_seq += other.num_seq self.num_tok += other.num_tok class Binarizer(ABC): """ a binarizer describes how to take a string and build a tensor out of it """ @abstractmethod def binarize_line( self, line: str, summary: BinarizeSummary, ) -> torch.IntTensor: ... def _worker_prefix(output_prefix: str, worker_id: int): return f"{output_prefix}.pt{worker_id}" class FileBinarizer: """ An file binarizer can take a file, tokenize it, and binarize each line to a tensor """ @classmethod def multiprocess_dataset( cls, input_file: str, dataset_impl: str, binarizer: Binarizer, output_prefix: str, vocab_size=None, num_workers=1, ) -> BinarizeSummary: final_summary = BinarizeSummary() offsets = find_offsets(input_file, num_workers) # find_offsets returns a list of position [pos1, pos2, pos3, pos4] but we would want pairs: # [(pos1, pos2), (pos2, pos3), (pos3, pos4)] to process the chunks with start/end info # we zip the list with itself shifted by one to get all the pairs. (first_chunk, *more_chunks) = zip(offsets, offsets[1:]) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) worker_results = [ pool.apply_async( cls._binarize_chunk_and_finalize, args=( binarizer, input_file, start_offset, end_offset, _worker_prefix( output_prefix, worker_id, ), dataset_impl, ), kwds={ "vocab_size": vocab_size, } if vocab_size is not None else {}, ) for worker_id, (start_offset, end_offset) in enumerate( more_chunks, start=1 ) ] pool.close() pool.join() for r in worker_results: summ = r.get() final_summary.merge(summ) # do not close the bin file as we need to merge the worker results in final_ds, summ = cls._binarize_file_chunk( binarizer, input_file, offset_start=first_chunk[0], offset_end=first_chunk[1], output_prefix=output_prefix, dataset_impl=dataset_impl, vocab_size=vocab_size if vocab_size is not None else None, ) final_summary.merge(summ) if num_workers > 1: for worker_id in range(1, num_workers): # merge the worker outputs worker_output_prefix = _worker_prefix( output_prefix, worker_id, ) final_ds.merge_file_(worker_output_prefix) try: os.remove(indexed_dataset.data_file_path(worker_output_prefix)) os.remove(indexed_dataset.index_file_path(worker_output_prefix)) except Exception as e: logger.error( f"couldn't remove {worker_output_prefix}.*", exc_info=e ) # now we can close the file idx_file = indexed_dataset.index_file_path(output_prefix) final_ds.finalize(idx_file) return final_summary @staticmethod def _binarize_file_chunk( binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None, ) -> tp.Tuple[tp.Any, BinarizeSummary]: # (dataset builder, BinarizeSummary) """ creates a dataset builder and append binarized items to it. This function does not finalize the builder, this is useful if you want to do other things with your bin file like appending/merging other files """ bin_file = indexed_dataset.data_file_path(output_prefix) ds = indexed_dataset.make_builder( bin_file, impl=dataset_impl, vocab_size=vocab_size, ) summary = BinarizeSummary() with Chunker( PathManager.get_local_path(filename), offset_start, offset_end ) as line_iterator: for line in line_iterator: ds.add_item(binarizer.binarize_line(line, summary)) return ds, summary @classmethod def _binarize_chunk_and_finalize( cls, binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None, ): """ same as above, but also finalizes the builder """ ds, summ = cls._binarize_file_chunk( binarizer, filename, offset_start, offset_end, output_prefix, dataset_impl, vocab_size=vocab_size, ) idx_file = indexed_dataset.index_file_path(output_prefix) ds.finalize(idx_file) return summ class VocabularyDatasetBinarizer(Binarizer): """ Takes a Dictionary/Vocabulary, assign ids to each token using the dictionary encode_line function. """ def __init__( self, dict: Dictionary, tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line, append_eos: bool = True, reverse_order: bool = False, already_numberized: bool = False, ) -> None: self.dict = dict self.tokenize = tokenize self.append_eos = append_eos self.reverse_order = reverse_order self.already_numberized = already_numberized super().__init__() def binarize_line( self, line: str, summary: BinarizeSummary, ): if summary.replaced is None: summary.replaced = Counter() def replaced_consumer(word, idx): if idx == self.dict.unk_index and word != self.dict.unk_word: summary.replaced.update([word]) if self.already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if self.reverse_order: id_list.reverse() if self.append_eos: id_list.append(self.dict.eos()) ids = torch.IntTensor(id_list) else: ids = self.dict.encode_line( line=line, line_tokenizer=self.tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=self.append_eos, reverse_order=self.reverse_order, ) summary.num_seq += 1 summary.num_tok += len(ids) return ids class AlignmentDatasetBinarizer(Binarizer): """ binarize by parsing a set of alignments and packing them in a tensor (see utils.parse_alignment) """ def __init__( self, alignment_parser: tp.Callable[[str], torch.IntTensor], ) -> None: super().__init__() self.alignment_parser = alignment_parser def binarize_line( self, line: str, summary: BinarizeSummary, ): ids = self.alignment_parser(line) summary.num_seq += 1 summary.num_tok += len(ids) return ids class LegacyBinarizer: @classmethod def binarize( cls, filename: str, dico: Dictionary, consumer: tp.Callable[[torch.IntTensor], None], tokenize: tp.Callable[[str], tp.List[str]] = tokenize_line, append_eos: bool = True, reverse_order: bool = False, offset: int = 0, end: int = -1, already_numberized: bool = False, ) -> tp.Dict[str, int]: binarizer = VocabularyDatasetBinarizer( dict=dico, tokenize=tokenize, append_eos=append_eos, reverse_order=reverse_order, already_numberized=already_numberized, ) return cls._consume_file( filename, binarizer, consumer, offset_start=offset, offset_end=end, ) @classmethod def binarize_alignments( cls, filename: str, alignment_parser: tp.Callable[[str], torch.IntTensor], consumer: tp.Callable[[torch.IntTensor], None], offset: int = 0, end: int = -1, ) -> tp.Dict[str, int]: binarizer = AlignmentDatasetBinarizer(alignment_parser) return cls._consume_file( filename, binarizer, consumer, offset_start=offset, offset_end=end, ) @staticmethod def _consume_file( filename: str, binarizer: Binarizer, consumer: tp.Callable[[torch.IntTensor], None], offset_start: int, offset_end: int, ) -> tp.Dict[str, int]: summary = BinarizeSummary() with Chunker( PathManager.get_local_path(filename), offset_start, offset_end ) as line_iterator: for line in line_iterator: consumer(binarizer.binarize_line(line, summary)) return { "nseq": summary.num_seq, "nunk": summary.num_replaced, "ntok": summary.num_tok, "replaced": summary.replaced, }
11,397
28.837696
99
py
sign-topic
sign-topic-main/fairseq/sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional import sys import torch import torch.nn as nn from fairseq import search, utils from fairseq.data import data_utils from fairseq.models import FairseqIncrementalDecoder from torch import Tensor from fairseq.ngram_repeat_block import NGramRepeatBlock class SequenceGenerator(nn.Module): def __init__( self, models, tgt_dict, beam_size=1, max_len_a=0, max_len_b=200, max_len=0, min_len=1, normalize_scores=True, len_penalty=1.0, unk_penalty=0.0, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, search_strategy=None, eos=None, symbols_to_strip_from_output=None, lm_model=None, lm_weight=1.0, ): """Generates translations of a given source sentence. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models, currently support fairseq.models.TransformerModel for scripting beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length max_len (int, optional): the maximum length of the generated output (not including end-of-sentence) min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) """ super().__init__() if isinstance(models, EnsembleModel): self.model = models else: self.model = EnsembleModel(models) self.tgt_dict = tgt_dict self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() if eos is None else eos self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.max_len = max_len or self.model.max_decoder_positions() self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.temperature = temperature self.match_source_len = match_source_len if no_repeat_ngram_size > 0: self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size) else: self.repeat_ngram_blocker = None assert temperature > 0, "--temperature must be greater than 0" self.search = ( search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy ) # We only need to set src_lengths in LengthConstrainedBeamSearch. # As a module attribute, setting it would break in multithread # settings when the model is shared. self.should_set_src_lengths = ( hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths ) self.model.eval() self.lm_model = lm_model self.lm_weight = lm_weight if self.lm_model is not None: self.lm_model.eval() def cuda(self): self.model.cuda() return self @torch.no_grad() def forward( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, bos_token: Optional[int] = None, ): """Generate a batch of translations. Args: sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, prefix_tokens, bos_token=bos_token) # TODO(myleott): unused, deprecate after pytorch-translate migration def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None): """Iterate over a batched dataset and yield individual translations. Args: cuda (bool, optional): use GPU for generation timer (StopwatchMeter, optional): time generations """ for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if "net_input" not in s: continue input = s["net_input"] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != "prev_output_tokens" } if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate(encoder_input) if timer is not None: timer.stop(sum(len(h[0]["tokens"]) for h in hypos)) for i, id in enumerate(s["id"].data): # remove padding src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad) ref = ( utils.strip_pad(s["target"].data[i, :], self.pad) if s["target"] is not None else None ) yield id, src, ref, hypos[i] @torch.no_grad() def generate( self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs ) -> List[List[Dict[str, Tensor]]]: """Generate translations. Match the api of other fairseq generators. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens constraints (torch.LongTensor, optional): force decoder to include the list of constraints bos_token (int, optional): beginning of sentence token (default: self.eos) """ return self._generate(sample, **kwargs) def _generate( self, sample: Dict[str, Dict[str, Tensor]], prefix_tokens: Optional[Tensor] = None, constraints: Optional[Tensor] = None, bos_token: Optional[int] = None, ): incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(self.model.models_size) ], ) net_input = sample["net_input"] if "src_tokens" in net_input: src_tokens = net_input["src_tokens"] # length of the source text being the character length except EndOfSentence and pad src_lengths = ( (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) ) elif "source" in net_input: src_tokens = net_input["source"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) elif "features" in net_input: src_tokens = net_input["features"] src_lengths = ( net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1) if net_input["padding_mask"] is not None else torch.tensor(src_tokens.size(-1)).to(src_tokens) ) else: raise Exception( "expected src_tokens or source in net input. input keys: " + str(net_input.keys()) ) # bsz: total number of sentences in beam # Note that src_tokens may have more than 2 dimensions (i.e. audio features) bsz, src_len = src_tokens.size()[:2] beam_size = self.beam_size if constraints is not None and not self.search.supports_constraints: raise NotImplementedError( "Target-side constraints were provided, but search method doesn't support them" ) # Initialize constraints, when active self.search.init_constraints(constraints, beam_size) max_len: int = -1 if self.match_source_len: max_len = src_lengths.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), self.max_len - 1, ) assert ( self.min_len <= max_len ), "min_len cannot be larger than max_len, please adjust these!" # compute the encoder output for each beam with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"): encoder_outs = self.model.forward_encoder(net_input) # placeholder of indices for bsz * beam_size to hold tokens and accumulative scores new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order) # ensure encoder_outs is a List. assert encoder_outs is not None # initialize buffers scores = ( torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float() ) # +1 for eos; pad is never chosen for scoring tokens = ( torch.zeros(bsz * beam_size, max_len + 2) .to(src_tokens) .long() .fill_(self.pad) ) # +2 for eos and pad tokens[:, 0] = self.eos if bos_token is None else bos_token attn: Optional[Tensor] = None # A list that indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = ( torch.zeros(bsz, beam_size).to(src_tokens).eq(-1) ) # forward and backward-compatible False mask # list of completed sentences finalized = torch.jit.annotate( List[List[Dict[str, Tensor]]], [torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)], ) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step # a boolean array indicating if the sentence at the index is finished or not finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of sentences remaining # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = ( (torch.arange(0, bsz) * beam_size) .unsqueeze(1) .type_as(tokens) .to(src_tokens.device) ) cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device) reorder_state: Optional[Tensor] = None batch_idxs: Optional[Tensor] = None original_batch_idxs: Optional[Tensor] = None if "id" in sample and isinstance(sample["id"], Tensor): original_batch_idxs = sample["id"] else: original_batch_idxs = torch.arange(0, bsz).type_as(tokens) for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as( batch_idxs ) reorder_state.view(-1, beam_size).add_( corr.unsqueeze(-1) * beam_size ) original_batch_idxs = original_batch_idxs[batch_idxs] self.model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = self.model.reorder_encoder_out( encoder_outs, reorder_state ) with torch.autograd.profiler.record_function( "EnsembleModel: forward_decoder" ): lprobs, avg_attn_scores = self.model.forward_decoder( tokens[:, : step + 1], encoder_outs, incremental_states, self.temperature, ) if self.lm_model is not None: lm_out = self.lm_model(tokens[:, : step + 1]) probs = self.lm_model.get_normalized_probs( lm_out, log_probs=True, sample=None ) probs = probs[:, -1, :] * self.lm_weight lprobs += probs lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # handle max length constraint if step >= max_len: lprobs[:, : self.eos] = -math.inf lprobs[:, self.eos + 1 :] = -math.inf # handle prefix tokens (possibly with different lengths) if ( prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len ): lprobs, tokens, scores = self._prefix_tokens( step, lprobs, scores, tokens, prefix_tokens, beam_size ) elif step < self.min_len: # minimum length constraint (does not apply if using prefix_tokens) lprobs[:, self.eos] = -math.inf # Record attention scores, only support avg_attn_scores is a Tensor if avg_attn_scores is not None: if attn is None: attn = torch.empty( bsz * beam_size, avg_attn_scores.size(1), max_len + 2 ).to(scores) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) eos_bbsz_idx = torch.empty(0).to( tokens ) # indices of hypothesis ending with eos (finished sentences) eos_scores = torch.empty(0).to( scores ) # scores of hypothesis ending with eos (finished sentences) if self.should_set_src_lengths: self.search.set_src_lengths(src_lengths) if self.repeat_ngram_blocker is not None: lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step) # Shape: (batch, cand_size) cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], tokens[:, : step + 1], original_batch_idxs, ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos # Shape of eos_mask: (batch size, beam size) eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf) eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask) # only consider eos when it's among the top beam_size indices # Now we know what beam item(s) to finish # Shape: 1d list of absolute-numbered eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents: List[int] = [] if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = self.finalize_hypos( step, eos_bbsz_idx, eos_scores, tokens, scores, finalized, finished, beam_size, attn, src_lengths, max_len, ) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if self.search.stop_on_max_len and step >= max_len: break assert step < max_len, f"{step} < {max_len}" # Remove finalized sentences (ones for which {beam_size} # finished hypotheses have been generated) from the batch. if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = torch.ones( bsz, dtype=torch.bool, device=cand_indices.device ) batch_mask[finalized_sents] = False # TODO replace `nonzero(as_tuple=False)` after TorchScript supports it batch_idxs = torch.arange( bsz, device=cand_indices.device ).masked_select(batch_mask) # Choose the subset of the hypothesized constraints that will continue self.search.prune_sentences(batch_idxs) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths = src_lengths[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view( new_bsz * beam_size, attn.size(1), -1 ) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos # Rewrite the operator since the element wise or is not supported in torchscript. eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size])) active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just # the hypos with the smallest values in active_mask. # {active_hypos} indicates which {beam_size} hypotheses # from the list of {2 * beam_size} candidates were # selected. Shapes: (batch size, beam size) new_cands_to_ignore, active_hypos = torch.topk( active_mask, k=beam_size, dim=1, largest=False ) # update cands_to_ignore to ignore any finalized hypos. cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] # Make sure there is at least one active item for each sentence in the batch. assert (~cands_to_ignore).any(dim=1).all() # update cands_to_ignore to ignore any finalized hypos # {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam # can be selected more than once). active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos) active_scores = torch.gather(cand_scores, dim=1, index=active_hypos) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses # Set the tokens for each beam (can select the same row more than once) tokens[:, : step + 1] = torch.index_select( tokens[:, : step + 1], dim=0, index=active_bbsz_idx ) # Select the next token for each of them tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather( cand_indices, dim=1, index=active_hypos ) if step > 0: scores[:, :step] = torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx ) scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather( cand_scores, dim=1, index=active_hypos ) # Update constraints based on which candidates were selected for the next beam self.search.update_constraints(active_hypos) # copy attention for active hypotheses if attn is not None: attn[:, :, : step + 2] = torch.index_select( attn[:, :, : step + 2], dim=0, index=active_bbsz_idx ) # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): scores = torch.tensor( [float(elem["score"].item()) for elem in finalized[sent]] ) _, sorted_scores_indices = torch.sort(scores, descending=True) finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices] finalized[sent] = torch.jit.annotate( List[Dict[str, Tensor]], finalized[sent] ) return finalized def _prefix_tokens( self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int ): """Handle prefix tokens""" prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1)) prefix_mask = prefix_toks.ne(self.pad) lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs) lprobs[prefix_mask] = lprobs[prefix_mask].scatter( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask] ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[ :, 0, 1 : step + 1 ] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() # copy tokens, scores and lprobs from the first beam to all beams tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size) scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size) lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size) return lprobs, tokens, scores def replicate_first_beam(self, tensor, mask, beam_size: int): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) def finalize_hypos( self, step: int, bbsz_idx, eos_scores, tokens, scores, finalized: List[List[Dict[str, Tensor]]], finished: List[bool], beam_size: int, attn: Optional[Tensor], src_lengths, max_len: int, ): """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly. A sentence is finalized when {beam_size} finished items have been collected for it. Returns number of sentences (not beam items) being finalized. These will be removed from the batch and not processed further. Args: bbsz_idx (Tensor): """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors. # tokens is (batch * beam, max_len). So the index_select # gets the newly EOS rows, then selects cols 1..{step + 2} tokens_clone = tokens.index_select(0, bbsz_idx)[ :, 1 : step + 2 ] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = ( attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2] if attn is not None else None ) # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty # cum_unfin records which sentences in the batch are finished. # It helps match indexing between (a) the original sentences # in the batch and (b) the current, possibly-reduced set of # sentences. cum_unfin: List[int] = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx) unfin_idx = bbsz_idx // beam_size sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx) # Create a set of "{sent}{unfin_idx}", where # "unfin_idx" is the index in the current (possibly reduced) # list of sentences, and "sent" is the index in the original, # unreduced batch # For every finished beam item # sentence index in the current (possibly reduced) batch seen = (sent << 32) + unfin_idx unique_seen: List[int] = torch.unique(seen).tolist() if self.match_source_len: condition = step > torch.index_select(src_lengths, 0, unfin_idx) eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores) sent_list: List[int] = sent.tolist() for i in range(bbsz_idx.size()[0]): # An input sentence (among those in a batch) is finished when # beam_size hypotheses have been collected for it if len(finalized[sent_list[i]]) < beam_size: if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i] else: hypo_attn = torch.empty(0) finalized[sent_list[i]].append( { "tokens": tokens_clone[i], "score": eos_scores[i], "attention": hypo_attn, # src_len x tgt_len "alignment": torch.empty(0), "positional_scores": pos_scores[i], } ) newly_finished: List[int] = [] for unique_s in unique_seen: # check termination conditions for this sentence unique_sent: int = unique_s >> 32 unique_unfin_idx: int = unique_s - (unique_sent << 32) if not finished[unique_sent] and self.is_finished( step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size ): finished[unique_sent] = True newly_finished.append(unique_unfin_idx) return newly_finished def is_finished( self, step: int, unfin_idx: int, max_len: int, finalized_sent_len: int, beam_size: int, ): """ Check whether decoding for a sentence is finished, which occurs when the list of finalized sentences has reached the beam size, or when we reach the maximum length. """ assert finalized_sent_len <= beam_size if finalized_sent_len == beam_size or step == max_len: return True return False class EnsembleModel(nn.Module): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__() self.models_size = len(models) # method '__len__' is not supported in ModuleList for torch script self.single_model = models[0] self.models = nn.ModuleList(models) self.has_incremental: bool = False if all( hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models ): self.has_incremental = True def forward(self): pass def has_encoder(self): return hasattr(self.single_model, "encoder") def has_incremental_states(self): return self.has_incremental def max_decoder_positions(self): return min( [ m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions") ] + [sys.maxsize] ) @torch.jit.export def forward_encoder(self, net_input: Dict[str, Tensor]): if not self.has_encoder(): return None return [model.encoder.forward_torchscript(net_input) for model in self.models] @torch.jit.export def forward_decoder( self, tokens, encoder_outs: List[Dict[str, List[Tensor]]], incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], temperature: float = 1.0, ): log_probs = [] avg_attn: Optional[Tensor] = None encoder_out: Optional[Dict[str, List[Tensor]]] = None for i, model in enumerate(self.models): if self.has_encoder(): encoder_out = encoder_outs[i] # decode each model if self.has_incremental_states(): decoder_out = model.decoder.forward( tokens, encoder_out=encoder_out, incremental_state=incremental_states[i], ) else: if hasattr(model, "decoder"): decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out) else: decoder_out = model.forward(tokens) attn: Optional[Tensor] = None decoder_len = len(decoder_out) if decoder_len > 1 and decoder_out[1] is not None: if isinstance(decoder_out[1], Tensor): attn = decoder_out[1] else: attn_holder = decoder_out[1]["attn"] if isinstance(attn_holder, Tensor): attn = attn_holder elif attn_holder is not None: attn = attn_holder[0] if attn is not None: attn = attn[:, -1, :] decoder_out_tuple = ( decoder_out[0][:, -1:, :].div_(temperature), None if decoder_len <= 1 else decoder_out[1], ) probs = model.get_normalized_probs( decoder_out_tuple, log_probs=True, sample=None ) probs = probs[:, -1, :] if self.models_size == 1: return probs, attn log_probs.append(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log( self.models_size ) if avg_attn is not None: avg_attn.div_(self.models_size) return avg_probs, avg_attn @torch.jit.export def reorder_encoder_out( self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order ): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ new_outs: List[Dict[str, List[Tensor]]] = [] if not self.has_encoder(): return new_outs for i, model in enumerate(self.models): assert encoder_outs is not None new_outs.append( model.encoder.reorder_encoder_out(encoder_outs[i], new_order) ) return new_outs @torch.jit.export def reorder_incremental_state( self, incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): if not self.has_incremental_states(): return for i, model in enumerate(self.models): model.decoder.reorder_incremental_state_scripting( incremental_states[i], new_order ) class SequenceGeneratorWithAlignment(SequenceGenerator): def __init__( self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs ): """Generates translations of a given source sentence. Produces alignments following "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: left_pad_target (bool, optional): Whether or not the hypothesis should be left padded or not when they are teacher forced for generating alignments. """ super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs) self.left_pad_target = left_pad_target if print_alignment == "hard": self.extract_alignment = utils.extract_hard_alignment elif print_alignment == "soft": self.extract_alignment = utils.extract_soft_alignment @torch.no_grad() def generate(self, models, sample, **kwargs): finalized = super()._generate(sample, **kwargs) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] beam_size = self.beam_size ( src_tokens, src_lengths, prev_output_tokens, tgt_tokens, ) = self._prepare_batch_for_alignment(sample, finalized) if any(getattr(m, "full_context_alignment", False) for m in self.model.models): attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens) else: attn = [ finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0) for i in range(bsz * beam_size) ] if src_tokens.device != "cpu": src_tokens = src_tokens.to("cpu") tgt_tokens = tgt_tokens.to("cpu") attn = [i.to("cpu") for i in attn] # Process the attn matrix to extract hard alignments. for i in range(bsz * beam_size): alignment = self.extract_alignment( attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos ) finalized[i // beam_size][i % beam_size]["alignment"] = alignment return finalized def _prepare_batch_for_alignment(self, sample, hypothesis): src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.shape[0] src_tokens = ( src_tokens[:, None, :] .expand(-1, self.beam_size, -1) .contiguous() .view(bsz * self.beam_size, -1) ) src_lengths = sample["net_input"]["src_lengths"] src_lengths = ( src_lengths[:, None] .expand(-1, self.beam_size) .contiguous() .view(bsz * self.beam_size) ) prev_output_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True, ) tgt_tokens = data_utils.collate_tokens( [beam["tokens"] for example in hypothesis for beam in example], self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False, ) return src_tokens, src_lengths, prev_output_tokens, tgt_tokens class EnsembleModelWithAlignment(EnsembleModel): """A wrapper around an ensemble of models.""" def __init__(self, models): super().__init__(models) def forward_align(self, src_tokens, src_lengths, prev_output_tokens): avg_attn = None for model in self.models: decoder_out = model(src_tokens, src_lengths, prev_output_tokens) attn = decoder_out[1]["attn"][0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(self.models) > 1: avg_attn.div_(len(self.models)) return avg_attn
39,404
38.843276
110
py
sign-topic
sign-topic-main/fairseq/ngram_repeat_block.py
# Originally from Microsoft Corporation. # Licensed under the MIT License. """ Wrapper for ngram_repeat_block cuda extension """ import math import warnings from typing import Dict, List, Optional import torch from torch import nn try: from fairseq import ngram_repeat_block_cuda EXTENSION_BUILT = True except ImportError: EXTENSION_BUILT = False def is_cuda_extension_usable() -> bool: """Check whether ngram_repeat_block_cuda is built properly""" if not EXTENSION_BUILT or not torch.cuda.is_available(): return False bsz = 2 tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda") lprobs = torch.rand((8, 12), device="cuda") try: outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3) outputs = outputs + 4 # This line breaks if the extension is built incorrectly. return True except RuntimeError: warnings.warn( "NGramRepeatBlock extension must be rebuilt." 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace' ) return False class NGramRepeatBlock(nn.Module): """Wrapper class for calling ngram_repeat_block cuda extension""" def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True): super().__init__() self.use_extension = is_cuda_extension_usable() if use_extension else False self.no_repeat_ngram_size = no_repeat_ngram_size def reset_parameters(self): pass @torch.jit.unused def call_cuda_extension( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): return ngram_repeat_block_cuda.forward( tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size ) def forward( self, tokens, lprobs, bsz: int, beam_size: int, step: int, ): """ Args: tokens(Tensor): Input tokens(Bsz*beam, seq_len) lprobs(Tensor): likelihood probability, Expected to be updated in place.(Bsz*beam, vocab_size) bsz(int): batch size step(int): current step beam_size(int): beam size no_repeat_ngram_size(int): Ngram size """ msg = f"expected {bsz *beam_size} got" assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}" assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}" if self.use_extension: return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step) else: return self._no_repeat_ngram( tokens, lprobs, bsz, beam_size, step, ) def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int): """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf""" gen_ngrams: List[Dict[str, List[int]]] = [ torch.jit.annotate(Dict[str, List[int]], {}) for bbsz_idx in range(bsz * beam_size) ] cpu_tokens = tokens.cpu() for bbsz_idx in range(bsz * beam_size): gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist() for ngram in self.transpose_list( [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)] ): key = ",".join([str(x) for x in ngram[:-1]]) gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get( key, torch.jit.annotate(List[int], []) ) + [ngram[-1]] if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [ self.calculate_banned_tokens( tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx ) for bbsz_idx in range(bsz * beam_size) ] else: banned_tokens = [ torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size) ] for bbsz_idx in range(bsz * beam_size): lprobs[bbsz_idx][ torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64) ] = torch.tensor(-math.inf).to(lprobs) return lprobs @staticmethod def calculate_banned_tokens( tokens, step: int, gen_ngrams: List[Dict[str, List[int]]], no_repeat_ngram_size: int, bbsz_idx: int, ): tokens_list: List[int] = tokens[ bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1 ].tolist() # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = ",".join([str(x) for x in tokens_list]) return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], [])) @staticmethod def transpose_list(l: List[List[int]]): # GeneratorExp aren't supported in TS so ignoring the lint min_len = min([len(x) for x in l]) # noqa l2 = [[row[i] for row in l] for i in range(min_len)] return l2
5,286
34.013245
102
py
sign-topic
sign-topic-main/fairseq/options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from pathlib import Path from typing import Callable, List, Optional, Union import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, EMAConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass # this import is for backward compatibility from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa def get_preprocessing_parser(default_task="translation"): parser = get_parser("Preprocessing", default_task) add_preprocess_args(parser) return parser def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) add_ema_args(parser) return parser def get_generation_parser(interactive=False, default_task="translation"): parser = get_parser("Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_generation_args(parser) add_checkpoint_args(parser) if interactive: add_interactive_args(parser) return parser def get_speech_generation_parser(default_task="text_to_speech"): parser = get_parser("Speech Generation", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_speech_generation_args(parser) return parser def get_interactive_generation_parser(default_task="translation"): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task="language_modeling"): parser = get_parser("Evaluate Language Model", default_task) add_dataset_args(parser, gen=True) add_distributed_training_args(parser, default_world_size=1) add_eval_lm_args(parser) return parser def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") gen_parser_from_dataclass(group, CommonEvalConfig()) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False if getattr(args, "update_epoch_batch_itr", None) is None: if hasattr(args, "grouped_shuffling"): args.update_epoch_batch_itr = args.grouped_shuffling else: args.grouped_shuffling = False args.update_epoch_batch_itr = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonConfig()) from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( "--" + registry_name.replace("_", "-"), default=REGISTRY["default"], choices=REGISTRY["registry"].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument( "--task", metavar="TASK", default=default_task, choices=TASK_REGISTRY.keys(), help="task", ) # fmt: on return parser def add_preprocess_args(parser): group = parser.add_argument_group("Preprocessing") # fmt: off group.add_argument("-s", "--source-lang", default=None, metavar="SRC", help="source language") group.add_argument("-t", "--target-lang", default=None, metavar="TARGET", help="target language") group.add_argument("--trainpref", metavar="FP", default=None, help="train file prefix (also used to build dictionaries)") group.add_argument("--validpref", metavar="FP", default=None, help="comma separated, valid file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--testpref", metavar="FP", default=None, help="comma separated, test file prefixes " "(words missing from train set are replaced with <unk>)") group.add_argument("--align-suffix", metavar="FP", default=None, help="alignment file suffix") group.add_argument("--destdir", metavar="DIR", default="data-bin", help="destination dir") group.add_argument("--thresholdtgt", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--thresholdsrc", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown") group.add_argument("--tgtdict", metavar="FP", help="reuse given target dictionary") group.add_argument("--srcdict", metavar="FP", help="reuse given source dictionary") group.add_argument("--nwordstgt", metavar="N", default=-1, type=int, help="number of target words to retain") group.add_argument("--nwordssrc", metavar="N", default=-1, type=int, help="number of source words to retain") group.add_argument("--alignfile", metavar="ALIGN", default=None, help="an alignment file (optional)") parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument("--joined-dictionary", action="store_true", help="Generate joined dictionary") group.add_argument("--only-source", action="store_true", help="Only process the source language") group.add_argument("--padding-factor", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N") group.add_argument("--workers", metavar="N", default=1, type=int, help="number of parallel workers") group.add_argument("--dict-only", action='store_true', help="if true, only builds a dictionary and then exits") # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("dataset_data_loading") gen_parser_from_dataclass(group, DatasetConfig()) # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("distributed_training") if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) gen_parser_from_dataclass( group, DistributedTrainingConfig(distributed_world_size=default_world_size) ) return group def add_optimization_args(parser): group = parser.add_argument_group("optimization") # fmt: off gen_parser_from_dataclass(group, OptimizationConfig()) # fmt: on return group def add_checkpoint_args(parser): group = parser.add_argument_group("checkpoint") # fmt: off gen_parser_from_dataclass(group, CheckpointConfig()) # fmt: on return group def add_common_eval_args(group): gen_parser_from_dataclass(group, CommonEvalConfig()) def add_eval_lm_args(parser): group = parser.add_argument_group("LM Evaluation") add_common_eval_args(group) gen_parser_from_dataclass(group, EvalLMConfig()) def add_generation_args(parser): group = parser.add_argument_group("Generation") add_common_eval_args(group) gen_parser_from_dataclass(group, GenerationConfig()) return group def add_speech_generation_args(parser): group = parser.add_argument_group("Speech Generation") add_common_eval_args(group) # NOTE: remove_bpe is not needed # fmt: off group.add_argument('--eos_prob_threshold', default=0.5, type=float, help='terminate when eos probability exceeds this') # fmt: on return group def add_interactive_args(parser): group = parser.add_argument_group("Interactive") gen_parser_from_dataclass(group, InteractiveConfig()) def add_model_args(parser): group = parser.add_argument_group("Model configuration") # fmt: off # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture') # fmt: on return group def get_args( data: Union[str, Path], task: str = "translation", arch: str = "transformer", **overrides ): parser = get_training_parser(task) args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch]) for k, v in overrides.items(): setattr(args, k, v) return args def add_ema_args(parser): group = parser.add_argument_group("EMA configuration") gen_parser_from_dataclass(group, EMAConfig())
15,440
36.297101
88
py
sign-topic
sign-topic-main/fairseq/token_generation_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Implements tracking of constraints for a beam item. A list of constraints is given as a list of one or more token sequences, each of length at least one token. For example, for an input sentence > Die maschinelle Übersetzung ist schwer zu kontrollieren. We could have the constraints: * to influence * hard There are two implementations: * OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints. * UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints. The difference is that in the first, the constraints are assumed to be in order; the algorithm will permit zero or more tokens between them. In the second, the constraints are not ordered, so many orderings will be explored. The same sequence can be present any number of times, and will appear that many times in the output. """ from collections import Counter from typing import List, Optional, Set, Tuple import torch class ConstraintState: def __init__(self): pass def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = ( 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) ) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset : offset + this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]: """ Transforms *one row* of a packed constraint tensor (e.g., for one sentence in the batch) into a list of constraint tensors. """ constraint_list = [] num_constraints = constraint_tensor[0] constraints = constraint_tensor.tolist() offset = 1 for i in range(num_constraints): where = constraints.index(0, offset) constraint_list.append(constraint_tensor[offset:where]) offset = where + 1 return constraint_list class ConstraintNode: """ Represents a node in a trie managing unordered constraints. """ def __init__(self, token: int = None, parent=None): # The token associate with this node (None for the root) self.token = int(token) if token is not None else None # The parent (None at the root) self.parent = parent # Whether this node is a completed constraint self.terminal = 0 # List of child nodes self.children = {} # The cumulative number of constraints from this point in the # trie forward self.num_constraints = 0 @property def id(self): return self.token def __str__(self): term = self.terminal != 0 return f"[{self.token}].{term}#{self.num_constraints}" def __getitem__(self, key: int): return self.children.get(key, None) def next_tokens(self) -> Set[int]: """The set of child labels.""" return set(self.children.keys()) @staticmethod def create(constraints: List[List[int]]): root = ConstraintNode() for sequence in constraints: root.add_sequence(sequence) return root @staticmethod def print_graph(node: "ConstraintNode"): if len(node.children) == 0: return str(node) else: s = f"({node}" for child in node.children.values(): s += " " + ConstraintNode.print_graph(child) s += ")" return s def token_counts(self) -> Counter: """Returns a counter of the number of times each token is used in a constraint. """ token_counts = Counter() kids = list(self.children.values()) while len(kids) > 0: kid = kids.pop() token_counts[kid.id] += kid.num_constraints kids += list(kid.children.values()) return token_counts def tokens(self) -> Set[int]: """Returns the set of tokens in constraints.""" return set(self.token_counts().keys()) def add_sequence(self, sequence: List[int]): """Adds a constraint, represented as a list of integers, to the trie.""" assert len(sequence) > 0 token = int(sequence[0]) if token not in self.children: self.children[token] = ConstraintNode(token, parent=self) node = self.children[token] if len(sequence) == 1: node.terminal += 1 node.num_constraints += 1 parent = node.parent while parent is not None: parent.num_constraints += 1 parent = parent.parent else: node.add_sequence(sequence[1:]) class UnorderedConstraintState(ConstraintState): """ Records progress through the set of constraints for each item in the beam using a trie. """ def __init__(self, node: ConstraintNode, copy_from: "ConstraintState" = None): self.node = node if copy_from is None: # The root node self.root = node # The set of states in the graph that have been completed self.completed = Counter() # The... self.generated = Counter() # The list of tokens we need to generate self.needed_tokens = self.root.tokens() else: self.completed = Counter(copy_from.completed) self.generated = Counter(copy_from.generated) self.root = copy_from.root # Mark the node as generated if self.node != self.root: self.generated[node] += 1 @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) constraint_trie_root = ConstraintNode.create(constraint_list) return UnorderedConstraintState(constraint_trie_root) def __str__(self): gen_str = ",".join([str(node) for node in self.generated]) return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}" def __copy__(self): copied_state = UnorderedConstraintState(self.node, copy_from=self) return copied_state def copy(self): return self.__copy__() @property def name(self): if self.node.id is None: return "ROOT" else: return str(self.node.id) @property def is_root(self): return self.node == self.root @property def bank(self): return sum(self.generated.values()) @property def num_completed(self): """The number of constraints (not constraint tokens) that are completed. In addition to the already-completed states, we need to account for the current state, which might get marked as completed when another token is generated. """ in_final = self.node.terminal and self.completed[self.node] < self.node.terminal return sum(self.completed.values()) + in_final @property def finished(self): return self.root.num_constraints - self.num_completed == 0 @property def token_counts(self): return self.root.token_counts() @property def tokens(self): return self.root.tokens() @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" if self.node != self.root: return self.root.next_tokens().union(self.node.next_tokens()) else: return self.root.next_tokens() def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) next_state = None child = self.node[token] if child is not None and self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) def rewind(): """If we're mid-trie and an "illegal" token is chosen next, we need to reset our state to the root state. However, along the way, we need to check whether a prefix of the current trie state represents a state we could mark as completed. """ node = self.node while node != self.root: if node.terminal and self.completed[node] < node.terminal: next_state.completed[node] += 1 return next_state.generated[node] -= 1 node = node.parent # Fall off the graph, check the root if next_state is None and token in self.root.next_tokens(): child = self.root[token] # We can only traverse this edge if it's not saturated if self.generated[child] < child.num_constraints: next_state = UnorderedConstraintState(child, copy_from=self) else: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() elif next_state is None: next_state = UnorderedConstraintState(self.root, copy_from=self) # Rewind rewind() return next_state class ConstraintSequence: def __init__(self, sequences: List[List[int]]): """Represents a set of possibly multitoken constraints by concatenating them and internally recording the end points. """ self.sequences = [] self.endpoints = [] self.num_tokens = 0 self.tokens = set() for sequence in sequences: for token in sequence: self.tokens.add(token) self.num_tokens += len(sequence) self.endpoints += [False for x in range(len(sequence) - 1)] + [True] self.sequences += sequence def __getitem__(self, key: int): return self.sequences[key] def __len__(self): return len(self.sequences) def __str__(self): return str(self.sequences) class OrderedConstraintState(ConstraintState): """ Records progress through the set of linear nonbranching constraints with gaps. """ def __init__(self, sequence: ConstraintSequence, state: int = -1): self.sequence = sequence self.state = state @staticmethod def create(constraint_tensor: torch.Tensor): constraint_list = unpack_constraints(constraint_tensor) return OrderedConstraintState(ConstraintSequence(constraint_list), -1) def __str__(self): return f"{self.state}/{self.bank}x{self.num_completed}" def __copy__(self): return OrderedConstraintState(self.sequence, self.state) def copy(self): return self.__copy__() @property def num_completed(self): if self.state == -1: return 0 count = len( list(filter(lambda x: x, self.sequence.endpoints[0 : self.state + 1])) ) return count @property def is_root(self): return self.state == -1 @property def name(self): if self.state == -1: return "ROOT" else: return str(self.sequence[self.state]) @property def bank(self) -> int: return self.state + 1 @property def finished(self): return self.state + 1 == len(self.sequence) @property def token_counts(self): return self.sequence.token_counts() @property def tokens(self): return self.sequence.tokens @property def num_constraint_tokens(self): return sum(self.token_counts.values()) def next_tokens(self) -> Set[int]: """Returns the list of tokens that could come next. These are (a) all tokens extending the root state and, for non-root states, additionally all tokens extending the current state.""" tokens = set() if self.state > 0: tokens.add(self.sequence[0]) if not self.finished: tokens.add(self.sequence[self.state + 1]) return tokens def advance(self, token: int): """Reads in a token and advances the state. Here's how it works. We can advance to the next state if: - there is a matching child - its path isn't blocked A path is blocked when all constraints that are descendants of that node have already been generated, in the current state. If we are not able to advance from the current state, we "fall off the graph" and return to the root state. There, we again try to advance, checking the same criteria. In any case, when falling off the graph, we need to do some bookkeeping. We: - check whether any constraints were met (all prefixes of current state) - if one is found, mark it as completed - adjust visited nodes accordingly """ token = int(token) # print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="") if self.finished: # Accept anything next_state = self.copy() elif self.sequence[self.state + 1] == token: # Advance to the next token next_state = OrderedConstraintState(self.sequence, self.state + 1) elif self.sequence.endpoints[self.state]: # Accept anything between constraints (*) next_state = self.copy() elif token == self.sequence[0]: # Start over having generated the first token next_state = OrderedConstraintState(self.sequence, 0) else: # Start over from the root next_state = OrderedConstraintState(self.sequence, -1) return next_state
16,555
31.654832
96
py
sign-topic
sign-topic-main/fairseq/file_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Utilities for working with the local dataset cache. This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_. and `huggingface <https://github.com/huggingface>`_. """ import fnmatch import json import logging import os import shutil import tarfile import tempfile from functools import partial, wraps from hashlib import sha256 from io import open try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv( "TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch") ) ) default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq") try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)) except (AttributeError, ImportError): PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path) CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" logger = logging.getLogger(__name__) # pylint: disable=invalid-name def load_archive_file(archive_file): # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=None) except EnvironmentError: logger.info( "Archive name '{}' was not found in archive name list. " "We assumed '{}' was a path or URL but couldn't find any file " "associated to this path or URL.".format( archive_file, archive_file, ) ) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info( "loading archive file {} from cache at {}".format( archive_file, resolved_archive_file ) ) # Extract archive to temp dir and replace .tar.bz2 if necessary tempdir = None if not os.path.isdir(resolved_archive_file): tempdir = tempfile.mkdtemp() logger.info( "extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir ) ) ext = os.path.splitext(archive_file)[1][1:] with tarfile.open(resolved_archive_file, "r:" + ext) as archive: top_dir = os.path.commonprefix(archive.getnames()) archive.extractall(tempdir) os.remove(resolved_archive_file) shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file) shutil.rmtree(tempdir) return resolved_archive_file def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the URL's, delimited by a period. """ url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path_from_pm(url_or_filename): """ Tries to cache the specified URL using PathManager class. Returns the cached path if success otherwise failure. """ try: from fairseq.file_io import PathManager local_path = PathManager.get_local_path(url_or_filename) return local_path except Exception: return None def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ("http", "https", "s3"): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: cached_path = cached_path_from_pm(url_or_filename) if cached_path: return cached_path # Something unknown raise ValueError( "unable to parse {} as a URL or as a local path".format(url_or_filename) ) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): from botocore.exceptions import ClientError try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" import boto3 s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def request_wrap_timeout(func, url): import requests for attempt, timeout in enumerate([10, 20, 40, 60, 60]): try: return func(timeout=timeout) except requests.exceptions.Timeout as e: logger.warning( "Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs", url, attempt, timeout, exc_info=e, ) continue raise RuntimeError(f"Unable to fetch file {url}") def http_get(url, temp_file): import requests from tqdm import tqdm req = request_wrap_timeout(partial(requests.get, url, stream=True), url) content_length = req.headers.get("Content-Length") total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_FAIRSEQ_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: try: import requests response = request_wrap_timeout( partial(requests.head, url, allow_redirects=True), url ) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except RuntimeError: etag = None filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*") matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, "wb") as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: output_string = json.dumps(meta) meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): """ Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. """ collection = set() with open(filename, "r", encoding="utf-8") as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
11,718
30.587601
92
py
sign-topic
sign-topic-main/fairseq/incremental_decoding_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid from typing import Dict, Optional from torch import Tensor class FairseqIncrementalState(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.init_incremental_state() def init_incremental_state(self): self._incremental_state_id = str(uuid.uuid4()) def _get_full_incremental_state_key(self, key: str) -> str: return "{}.{}".format(self._incremental_state_id, key) def get_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, ) -> Optional[Dict[str, Optional[Tensor]]]: """Helper for getting incremental state for an nn.Module.""" full_key = self._get_full_incremental_state_key(key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], key: str, value: Dict[str, Optional[Tensor]], ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]: """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = self._get_full_incremental_state_key(key) incremental_state[full_key] = value return incremental_state def with_incremental_state(cls): cls.__bases__ = (FairseqIncrementalState,) + tuple( b for b in cls.__bases__ if b != FairseqIncrementalState ) return cls
1,773
33.115385
76
py
sign-topic
sign-topic-main/fairseq/search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List, Optional import torch import torch.nn as nn from fairseq.token_generation_constraints import ( ConstraintState, OrderedConstraintState, UnorderedConstraintState, ) from torch import Tensor class Search(nn.Module): def __init__(self, tgt_dict): super().__init__() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.src_lengths = torch.tensor(-1) self.supports_constraints = False self.stop_on_max_len = False def step( self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None ): """Take a single search step. Args: step: the current search step, starting at 0 lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step scores: (bsz x input_beam_size x step) the historical model scores of each hypothesis up to this point prev_output_tokens: (bsz x step) the previously generated oputput tokens original_batch_idxs: (bsz) the tensor with the batch indices, in the range [0, bsz) this is useful in case there has been applied a re-ordering and we need to know the orignal indices Return: A tuple of (scores, indices, beams) where: scores: (bsz x output_beam_size) the scores of the chosen elements; output_beam_size can be larger than input_beam_size, e.g., we may return 2*input_beam_size to account for EOS indices: (bsz x output_beam_size) the indices of the chosen elements beams: (bsz x output_beam_size) the hypothesis ids of the chosen elements, in the range [0, input_beam_size) """ raise NotImplementedError @torch.jit.export def set_src_lengths(self, src_lengths): self.src_lengths = src_lengths @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): """Initialize constraint states for constrained decoding (if supported). Args: batch_constraints: (torch.Tensor, optional) the list of constraints, in packed form beam_size: (int) the beam size Returns: *encoder_out* rearranged according to *new_order* """ pass def prune_sentences(self, batch_idxs: Tensor): """ Removes constraint states for completed sentences (if supported). This is called from sequence_generator._generate() when sentences are deleted from the batch. Args: batch_idxs: Indices of *sentences* whose constraint state should be *kept*. """ pass def update_constraints(self, active_hypos: Tensor): """ Updates the constraint states by selecting the beam items that are retained. This is called at each time step of sequence_generator._generate() when the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size. Args: active_hypos: (batch size, beam size) list of integers denoting, for each sentence, which beam candidate items should be kept. """ pass class BeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.constraint_states = None @torch.jit.export def step( self, step: int, lprobs, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # At this point, beams_buf and indices_buf are single-dim and contain relative indices return scores_buf, indices_buf, beams_buf class PrefixConstrainedBeamSearch(Search): def __init__(self, tgt_dict, prefix_allowed_tokens_fn): super().__init__(tgt_dict) self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self.stop_on_max_len = True @torch.jit.export def apply_mask(self, x, prev_output_tokens, original_batch_idxs): beam_size = x.shape[0] // original_batch_idxs.shape[0] original_batch_idxs = ( original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist() ) mask = torch.full_like(x, -math.inf) for sent_i, (sent, batch_i) in enumerate( zip(prev_output_tokens, original_batch_idxs) ): mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0 return mask @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Tensor, prev_output_tokens: Tensor, original_batch_idxs: Tensor, ): bsz, beam_size, vocab_size = lprobs.size() lprobs += self.apply_mask( lprobs.view(bsz * beam_size, 1, vocab_size), prev_output_tokens, original_batch_idxs, ).view(bsz, beam_size, vocab_size) if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(bsz, -1), k=min( # Take the best beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), ) scores_buf = top_prediction[0] indices_buf = top_prediction[1] beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) return scores_buf, indices_buf, beams_buf class LexicallyConstrainedBeamSearch(Search): """Implements lexically constrained beam search as described in Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation. Post & Vilar, NAACL 2018. https://www.aclweb.org/anthology/N18-1119/ and Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting. Hu et al, NAACL 2019. https://www.aclweb.org/anthology/N19-1090/ This is accomplished by maintaining, for each beam hypothesis, a ConstraintState object (see constraints.py) that tracks which constraints have been generated and using this information to shape the beam for each input sentence. """ def __init__(self, tgt_dict, representation): super().__init__(tgt_dict) self.representation = representation self.vocab_size = len(tgt_dict) self.num_cands = 0 self.supports_constraints = True @torch.jit.export def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): self.constraint_states = [] for constraint_tensor in batch_constraints: if self.representation == "ordered": constraint_state = OrderedConstraintState.create(constraint_tensor) elif self.representation == "unordered": constraint_state = UnorderedConstraintState.create(constraint_tensor) self.constraint_states.append([constraint_state for i in range(beam_size)]) @torch.jit.export def prune_sentences(self, batch_idxs: Tensor): self.constraint_states = [ self.constraint_states[i] for i in batch_idxs.tolist() ] @torch.jit.export def update_constraints(self, active_hypos: Tensor): if self.constraint_states: batch_size = active_hypos.size(0) for sentid in range(batch_size): self.constraint_states[sentid] = [ self.constraint_states[sentid][i] for i in active_hypos[sentid] ] @torch.jit.export def step( self, step: int, lprobs: Tensor, scores: Optional[Tensor], prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): """ A constrained step builds a large candidates list from the following: - the top 2 * {beam_size} items over the whole beam - for each item in the beam - the top {each_k} (default 1) - all next constraints We then compute the constrained state of each beam item, and assign stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so on. We then sort by (stripe, score), and truncate the list at 2 * beam size. Args: step: the decoder step lprobs: (batch size, beam size, target vocab) the target-vocab distributions for each item in the beam. Retrun: A tuple of (scores, indices, beams, constraints) where: scores: (batch, output beam size) the scores of the chosen elements indices: (batch, output beam size) the target vocab indices of the chosen elements beams: (batch, output beam size) the 0-indexed hypothesis ids of the chosen elements constraints: (batch, output beam size) the new constraint states """ each_k = 1 device = lprobs.device batch_size, beam_size, vocab_size = lprobs.size() self.num_cands = min( # Just take the k-best. We'll get another k from the 1-best from each # row, plus more from the constraints beam_size * 2, lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad ) # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items constraint_states = self.constraint_states if constraint_states and step > 0: not_finished_indices = [] for sentno, sent_constraints in enumerate(constraint_states): for beamno, state in enumerate(sent_constraints): index = sentno * beam_size + beamno if not state.finished: not_finished_indices.append(index) not_finished_indices = torch.tensor(not_finished_indices) if not_finished_indices.numel() > 0: lprobs.view(batch_size * beam_size, -1)[ not_finished_indices, self.eos ] = -math.inf if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam entry for each batch item lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis assert scores is not None lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) top_prediction = torch.topk( lprobs.view(batch_size, -1), self.num_cands, ) scores_buf, indices_buf = top_prediction # Project back into relative indices and beams beams_buf = indices_buf // vocab_size indices_buf = indices_buf.fmod(vocab_size) # Short circuit if there are no constraints in this batch if not constraint_states: return scores_buf, indices_buf, beams_buf # STEP 1: get top-1 from each hypothesis across all sentences in the batch if step > 0: top_scores, top_indices = torch.topk( lprobs.view(batch_size * beam_size, -1), k=each_k, dim=1, ) top_scores = top_scores.view(batch_size, -1) top_indices = top_indices.view(batch_size, -1) scores_buf = torch.cat((scores_buf, top_scores), dim=1) indices_buf = torch.cat((indices_buf, top_indices), dim=1) new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1) beams_buf = torch.cat((beams_buf, new_beams), dim=1) # Now, process sentences in the batch one by one. new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device) new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() for sentno, states in enumerate(constraint_states): scores, indices, beams, new_states = self.step_sentence( step, sentno, lprobs[sentno], constraint_states[sentno], beams_buf[sentno].clone(), indices_buf[sentno].clone(), scores_buf[sentno].clone(), ) new_scores_buf[sentno] = scores new_indices_buf[sentno] = indices new_beams_buf[sentno] = beams self.constraint_states[sentno] = new_states return new_scores_buf, new_indices_buf, new_beams_buf @torch.jit.export def step_sentence( self, step: int, sentno: int, lprobs: Tensor, constraint_states: List[List[ConstraintState]], beams_buf: Tensor, indices_buf: Tensor, scores_buf: Tensor, ): """Does per-sentence processing. Adds all constraints for each hypothesis to the list of candidates; then removes duplicates, sorts, and dynamically stripes across the banks. All tensor inputs are collapsed to those pertaining to a single input sentence. """ device = lprobs.device # STEP 2: Add all constraints for each beam item for beamno, state in enumerate(constraint_states): next_tokens = torch.tensor(list(state.next_tokens()), device=device).long() if next_tokens.numel() != 0: indices_buf = torch.cat((indices_buf, next_tokens)) next_beams = ( torch.tensor(beamno, device=device) .repeat(next_tokens.size(0)) .long() ) beams_buf = torch.cat((beams_buf, next_beams)) next_values = lprobs[beamno].take(next_tokens.view(-1)) scores_buf = torch.cat((scores_buf, next_values)) # At the 0th time step, there is just one beam item if step == 0: break # STEP 3: Compute the "bank" for each candidate. This is the # number of constraints it's generated. We need this so that # we can do round-robin allocation of the beam across these # banks. If C is the number of constraints, we select the best # item in bank C, then the best in bank C-1, etc, followed by # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so # on, until the maximum beam size. We accomplish this by # creating a sort key and striping across the banks. # Compute the new states for all candidates cands_size = indices_buf.size(0) constraint_states = [ constraint_states[beams_buf[i]].advance(indices_buf[i]) for i in range(cands_size) ] banks = torch.tensor([state.bank for state in constraint_states], device=device) # STEP 4: Sort num_constraint_tokens = len(state.tokens) # Sort by keys (bank, score) (i.e., sort banks together, and scores # within banks). AFAIK pytorch doesn't support either stable sort or # multi-key sorting, so we have to hack this. MAX_SCORE = -100 sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf sort_values, sort_indices = sort_key.sort(dim=0, descending=True) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] banks = banks[sort_indices] # Sort the constraints to follow suit constraint_states = [constraint_states[i] for i in sort_indices] # STEP 5: Remove duplicates. The topk calls (overall and # per-row) plus the per-row generation of constraints will # produce duplicates. Here we remove them. def roll(t): """Rolls a 1d tensor left by 1. [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3] """ return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0) # We map candidates (beam, token_id) to a single dimension. # This is then shifted by 1. We can then easily identify # duplicates and create a mask that identifies unique # extensions. uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf uniques_mask = roll(uniques_mask) != uniques_mask # Use the mask to pare down the data structures scores_buf = torch.masked_select(scores_buf, uniques_mask) indices_buf = torch.masked_select(indices_buf, uniques_mask) beams_buf = torch.masked_select(beams_buf, uniques_mask) banks = torch.masked_select(banks, uniques_mask) i = 1 for mask in uniques_mask[1:]: if not mask: constraint_states.pop(i) i += mask # STEP 6: Assign IDs round-robin across banks, sort, and # truncate. Now that the candidates are sorted by (bank, # score) and uniqed, we dynamically allocate the {beam_size} # beam by striping across the candidates. These stripes will # be used as sort keys to do round-robin selection. This is # accomplished in a single pass with offsets. Sorting by # highest-banks (furthest-along hypotheses) first ensures # progress through the constraints. # # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0 # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1 # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7 # = 0 5 10 1 6 11 13 2 7 12 3 8 # # Sorting by this then gives the following banks: # # 3 2 1 0 3 2 1 0 3 2 1 2 # # We'll take the top {beam_size} of these. stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)] stripes = torch.zeros_like(banks) cur_bank_count = -1 cur_bank = banks[0] for i, bank in enumerate(banks): if bank != cur_bank: cur_bank_count = 0 cur_bank = bank else: cur_bank_count += 1 stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count] # STEP 7: Sort by the stripes values sort_values, sort_indices = stripes.sort(dim=0) scores_buf = scores_buf[sort_indices] indices_buf = indices_buf[sort_indices] beams_buf = beams_buf[sort_indices] constraint_states = [constraint_states[i] for i in sort_indices] # STEP 8: Truncate to the candidates size! scores_buf = scores_buf[: self.num_cands] indices_buf = indices_buf[: self.num_cands] beams_buf = beams_buf[: self.num_cands] return scores_buf, indices_buf, beams_buf, constraint_states class LengthConstrainedBeamSearch(Search): def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b): super().__init__(tgt_dict) self.min_len_a = min_len_a self.min_len_b = min_len_b self.max_len_a = max_len_a self.max_len_b = max_len_b self.beam = BeamSearch(tgt_dict) self.needs_src_lengths = True def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): min_lens = self.min_len_a * self.src_lengths + self.min_len_b max_lens = self.max_len_a * self.src_lengths + self.max_len_b lprobs[step < min_lens, :, self.eos] = -math.inf lprobs[step >= max_lens, :, self.eos] = 0 return self.beam.step(step, lprobs, scores) class DiverseBeamSearch(Search): """Diverse Beam Search. See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models" for details. We only implement the Hamming Diversity penalty here, which performed best in the original paper. """ def __init__(self, tgt_dict, num_groups, diversity_strength): super().__init__(tgt_dict) self.num_groups = num_groups self.diversity_strength = -diversity_strength self.beam = BeamSearch(tgt_dict) @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if beam_size % self.num_groups != 0: raise ValueError( "DiverseBeamSearch requires --beam to be divisible by the number of groups" ) # initialize diversity penalty diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs) scores_G, indices_G, beams_G = [], [], [] for g in range(self.num_groups): lprobs_g = lprobs[:, g :: self.num_groups, :] scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None # apply diversity penalty if g > 0: lprobs_g = torch.add( lprobs_g, other=diversity_buf.unsqueeze(1), alpha=self.diversity_strength, ) else: lprobs_g = lprobs_g.contiguous() scores_buf, indices_buf, beams_buf = self.beam.step( step, lprobs_g, scores_g ) beams_buf.mul_(self.num_groups).add_(g) scores_G.append(scores_buf.clone()) indices_G.append(indices_buf.clone()) beams_G.append(beams_buf.clone()) # update diversity penalty diversity_buf.scatter_add_( 1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf) ) # interleave results from different groups scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1) indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1) beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1) return scores_buf, indices_buf, beams_buf class Sampling(Search): sampling_topk: int sampling_topp: float def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0): super().__init__(tgt_dict) self.sampling_topk = sampling_topk self.sampling_topp = sampling_topp def _sample_topp(self, lprobs): """Sample among the smallest set of elements whose cumulative probability mass exceeds p. See `"The Curious Case of Neural Text Degeneration" (Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_. Args: lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step Return: A tuple of (trimed_probs, truncated_indices) where: trimed_probs: (bsz x input_beam_size x ?) the model's probabilities over the elements selected to sample from. The width of the third dimension is determined by top-P. truncated_indices: (bsz x input_beam_size x ?) the indices of the chosen elements. """ probs = lprobs.exp_() # sort the last dimension (vocab dimension) in descending order sorted_probs, sorted_indices = probs.sort(descending=True) # compute a mask to indicate the words to be included in the top-P set. cumsum_probs = sorted_probs.cumsum(dim=2) mask = cumsum_probs.lt(self.sampling_topp) # note that mask was computed by 'lt'. One more word needs to be included # so that the cumulative probability mass can exceed p. cumsum_mask = mask.cumsum(dim=2) last_included = cumsum_mask[:, :, -1:] last_included.clamp_(0, mask.size()[2] - 1) mask = mask.scatter_(2, last_included, 1) # truncate unnecessary dims. max_dim = last_included.max() truncated_mask = mask[:, :, : max_dim + 1] truncated_probs = sorted_probs[:, :, : max_dim + 1] truncated_indices = sorted_indices[:, :, : max_dim + 1] # trim the words that are not in top-P by setting their probabilities # to 0, so that they would not be sampled later. trim_mask = ~truncated_mask trimed_probs = truncated_probs.masked_fill_(trim_mask, 0) return trimed_probs, truncated_indices @torch.jit.export def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() if self.sampling_topp > 0: # only sample from the smallest set of words whose cumulative probability mass exceeds p probs, top_indices = self._sample_topp(lprobs) elif self.sampling_topk > 0: # only sample from top-k candidates lprobs, top_indices = lprobs.topk(self.sampling_topk) probs = lprobs.exp_() else: probs = lprobs.exp_() # dummy data to be consistent with true branch for type check top_indices = torch.empty(0).to(probs) # sample if step == 0: indices_buf = torch.multinomial( probs.view(bsz, -1), beam_size, replacement=True, ).view(bsz, beam_size) else: indices_buf = torch.multinomial( probs.view(bsz * beam_size, -1), 1, replacement=True, ).view(bsz, beam_size) if step == 0: # expand to beam size probs = probs.expand(bsz, beam_size, -1) # gather scores scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1)) scores_buf = scores_buf.log_().view(bsz, -1) # remap indices if using top-k or top-P sampling if self.sampling_topk > 0 or self.sampling_topp > 0: indices_buf = torch.gather( top_indices.expand(bsz, beam_size, -1), dim=2, index=indices_buf.unsqueeze(-1), ).squeeze(2) if step == 0: beams_buf = indices_buf.new_zeros(bsz, beam_size) else: beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1) # make scores cumulative scores_buf.add_( torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf) ) return scores_buf, indices_buf, beams_buf class DiverseSiblingsSearch(Search): """ Beam search with diverse siblings. See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details. https://arxiv.org/abs/1611.08562 1/ Calculate hypotheses for each beam 2/ Intra-sibling ordering 3/ Rewrite scores 4/ Choose top K hypotheses if diversity_rate == 0 is equivalent to BeamSearch """ def __init__(self, tgt_dict, diversity_rate): super().__init__(tgt_dict) self.diversity_rate = diversity_rate self.beam = BeamSearch(tgt_dict) def step( self, step: int, lprobs, scores, prev_output_tokens: Optional[Tensor] = None, original_batch_idxs: Optional[Tensor] = None, ): bsz, beam_size, vocab_size = lprobs.size() k = min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ) s_list: List[Tensor] i_list: List[Tensor] s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)] i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)] sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate if step == 0: return self.beam.step(step, lprobs, scores) lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) # 1/ Calculate hypotheses for each beam for i in range(beam_size): torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i])) i_list[i].fmod_(vocab_size) # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores s_list[i].sub_(sibling_score) # 4/ Choose top K hypotheses indices = torch.stack(i_list, dim=1).view(bsz, -1) final_scores = torch.empty(0).to(lprobs) final_indices = torch.LongTensor().to(device=lprobs.device) final_beams = torch.LongTensor().to(device=lprobs.device) (final_scores, final_indices) = torch.topk( torch.stack(s_list, dim=1).view(bsz, -1), k, ) final_beams = final_indices // k for i in range(bsz): final_indices[i] = indices[i][final_indices[i]] return final_scores, final_indices, final_beams
31,337
37.451534
100
py
sign-topic
sign-topic-main/fairseq/nan_detector.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch logger = logging.getLogger(__name__) class NanDetector: """ Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name """ def __init__(self, model, forward=True, backward=True): self.bhooks = [] self.fhooks = [] self.forward = forward self.backward = backward self.named_parameters = list(model.named_parameters()) self.reset() for name, mod in model.named_modules(): mod.__module_name = name self.add_hooks(mod) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): # Dump out all model gnorms to enable better debugging norm = {} gradients = {} for name, param in self.named_parameters: if param.grad is not None: grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32) norm[name] = grad_norm.item() if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any(): gradients[name] = param.grad.data if len(gradients) > 0: logger.info("Detected nan/inf grad norm, dumping norms...") logger.info(f"norms: {norm}") logger.info(f"gradients: {gradients}") self.close() def add_hooks(self, module): if self.forward: self.fhooks.append(module.register_forward_hook(self.fhook_fn)) if self.backward: self.bhooks.append(module.register_backward_hook(self.bhook_fn)) def reset(self): self.has_printed_f = False self.has_printed_b = False def _detect(self, tensor, name, backward): err = None if ( torch.is_floating_point(tensor) # single value tensors (like the loss) will not provide much info and tensor.numel() >= 2 ): with torch.no_grad(): if torch.isnan(tensor).any(): err = "NaN" elif torch.isinf(tensor).any(): err = "Inf" if err is not None: err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}" return err def _apply(self, module, inp, x, backward): if torch.is_tensor(x): if isinstance(inp, tuple) and len(inp) > 0: inp = inp[0] err = self._detect(x, module.__module_name, backward) if err is not None: if torch.is_tensor(inp) and not backward: err += ( f" input max: {inp.max().item()}, input min: {inp.min().item()}" ) has_printed_attr = "has_printed_b" if backward else "has_printed_f" logger.warning(err) setattr(self, has_printed_attr, True) elif isinstance(x, dict): for v in x.values(): self._apply(module, inp, v, backward) elif isinstance(x, list) or isinstance(x, tuple): for v in x: self._apply(module, inp, v, backward) def fhook_fn(self, module, inp, output): if not self.has_printed_f: self._apply(module, inp, output, backward=False) def bhook_fn(self, module, inp, output): if not self.has_printed_b: self._apply(module, inp, output, backward=True) def close(self): for hook in self.fhooks + self.bhooks: hook.remove()
3,755
33.458716
119
py
sign-topic
sign-topic-main/fairseq/speech_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import numpy as np from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig class SpeechGenerator(object): def __init__(self, model, vocoder, data_cfg: S2TDataConfig): self.model = model self.vocoder = vocoder stats_npz_path = data_cfg.global_cmvn_stats_npz self.gcmvn_stats = None if stats_npz_path is not None: self.gcmvn_stats = np.load(stats_npz_path) def gcmvn_denormalize(self, x): # x: B x T x C if self.gcmvn_stats is None: return x mean = torch.from_numpy(self.gcmvn_stats["mean"]).to(x) std = torch.from_numpy(self.gcmvn_stats["std"]).to(x) assert len(x.shape) == 3 and mean.shape[0] == std.shape[0] == x.shape[2] x = x * std.view(1, 1, -1).expand_as(x) return x + mean.view(1, 1, -1).expand_as(x) def get_waveform(self, feat): # T x C -> T return None if self.vocoder is None else self.vocoder(feat).squeeze(0) class AutoRegressiveSpeechGenerator(SpeechGenerator): def __init__( self, model, vocoder, data_cfg, max_iter: int = 6000, eos_prob_threshold: float = 0.5, ): super().__init__(model, vocoder, data_cfg) self.max_iter = max_iter self.eos_prob_threshold = eos_prob_threshold @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size()[:2] n_frames_per_step = model.decoder.n_frames_per_step out_dim = model.decoder.out_dim raw_dim = out_dim // n_frames_per_step # initialize encoder_out = model.forward_encoder( src_tokens, src_lengths, speaker=sample["speaker"] ) incremental_state = {} feat, attn, eos_prob = [], [], [] finished = src_tokens.new_zeros((bsz,)).bool() out_lens = src_lengths.new_zeros((bsz,)).long().fill_(self.max_iter) prev_feat_out = encoder_out["encoder_out"][0].new_zeros(bsz, 1, out_dim) for step in range(self.max_iter): cur_out_lens = out_lens.clone() cur_out_lens.masked_fill_(cur_out_lens.eq(self.max_iter), step + 1) _, cur_eos_out, cur_extra = model.forward_decoder( prev_feat_out, encoder_out=encoder_out, incremental_state=incremental_state, target_lengths=cur_out_lens, speaker=sample["speaker"], **kwargs ) cur_eos_prob = torch.sigmoid(cur_eos_out).squeeze(2) feat.append(cur_extra["feature_out"]) attn.append(cur_extra["attn"]) eos_prob.append(cur_eos_prob) cur_finished = cur_eos_prob.squeeze(1) > self.eos_prob_threshold out_lens.masked_fill_((~finished) & cur_finished, step + 1) finished = finished | cur_finished if finished.sum().item() == bsz: break prev_feat_out = cur_extra["feature_out"] feat = torch.cat(feat, dim=1) feat = model.decoder.postnet(feat) + feat eos_prob = torch.cat(eos_prob, dim=1) attn = torch.cat(attn, dim=2) alignment = attn.max(dim=1)[1] feat = feat.reshape(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1) attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) out_lens = out_lens * n_frames_per_step finalized = [ { "feature": feat[b, :out_len], "eos_prob": eos_prob[b, :out_len], "attn": attn[b, :, :out_len], "alignment": alignment[b, :out_len], "waveform": self.get_waveform(feat[b, :out_len]), } for b, out_len in zip(range(bsz), out_lens) ] if has_targ: assert sample["target"].size(-1) == out_dim tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) tgt_lens = sample["target_lengths"] * n_frames_per_step for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized class NonAutoregressiveSpeechGenerator(SpeechGenerator): @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() bsz, max_src_len = sample["net_input"]["src_tokens"].size() n_frames_per_step = model.encoder.n_frames_per_step out_dim = model.encoder.out_dim raw_dim = out_dim // n_frames_per_step feat, feat_post, out_lens, log_dur_out, _, _ = model( src_tokens=sample["net_input"]["src_tokens"], src_lengths=sample["net_input"]["src_lengths"], prev_output_tokens=sample["net_input"]["prev_output_tokens"], incremental_state=None, target_lengths=sample["target_lengths"], speaker=sample["speaker"], ) if feat_post is not None: feat = feat_post feat = feat.view(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) dur_out = torch.clamp(torch.round(torch.exp(log_dur_out) - 1).long(), min=0) def get_dur_plot_data(d): r = [] for i, dd in enumerate(d): r += [i + 1] * dd.item() return r out_lens = out_lens * n_frames_per_step finalized = [ { "feature": feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim]), "waveform": self.get_waveform( feat[b, :l] if l > 0 else feat.new_zeros([1, raw_dim]) ), "attn": feat.new_tensor(get_dur_plot_data(dur_out[b])), } for b, l in zip(range(bsz), out_lens) ] if has_targ: tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) tgt_lens = sample["target_lengths"] * n_frames_per_step for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized class TeacherForcingAutoRegressiveSpeechGenerator(AutoRegressiveSpeechGenerator): @torch.no_grad() def generate(self, model, sample, has_targ=False, **kwargs): model.eval() src_tokens = sample["net_input"]["src_tokens"] src_lens = sample["net_input"]["src_lengths"] prev_out_tokens = sample["net_input"]["prev_output_tokens"] tgt_lens = sample["target_lengths"] n_frames_per_step = model.decoder.n_frames_per_step raw_dim = model.decoder.out_dim // n_frames_per_step bsz = src_tokens.shape[0] feat, eos_prob, extra = model( src_tokens, src_lens, prev_out_tokens, incremental_state=None, target_lengths=tgt_lens, speaker=sample["speaker"], ) attn = extra["attn"] # B x T_s x T_t alignment = attn.max(dim=1)[1] feat = feat.reshape(bsz, -1, raw_dim) feat = self.gcmvn_denormalize(feat) eos_prob = eos_prob.repeat_interleave(n_frames_per_step, dim=1) attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) tgt_lens = sample["target_lengths"] * n_frames_per_step finalized = [ { "feature": feat[b, :tgt_len], "eos_prob": eos_prob[b, :tgt_len], "attn": attn[b, :, :tgt_len], "alignment": alignment[b, :tgt_len], "waveform": self.get_waveform(feat[b, :tgt_len]), } for b, tgt_len in zip(range(bsz), tgt_lens) ] if has_targ: tgt_feats = sample["target"].view(bsz, -1, raw_dim) tgt_feats = self.gcmvn_denormalize(tgt_feats) for b, (f, l) in enumerate(zip(tgt_feats, tgt_lens)): finalized[b]["targ_feature"] = f[:l] finalized[b]["targ_waveform"] = self.get_waveform(f[:l]) return finalized
8,840
37.107759
84
py
sign-topic
sign-topic-main/fairseq/iterative_refinement_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import namedtuple import numpy as np import torch from fairseq import utils DecoderOut = namedtuple( "IterativeRefinementDecoderOut", ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], ) class IterativeRefinementGenerator(object): def __init__( self, tgt_dict, models=None, eos_penalty=0.0, max_iter=10, max_ratio=2, beam_size=1, decoding_format=None, retain_dropout=False, adaptive=True, retain_history=False, reranking=False, ): """ Generates translations based on iterative refinement. Args: tgt_dict: target dictionary eos_penalty: if > 0.0, it penalized early-stopping in decoding max_iter: maximum number of refinement iterations max_ratio: generate sequences of maximum length ax, where x is the source length decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} retain_dropout: retaining dropout in the inference adaptive: decoding with early stop """ self.bos = tgt_dict.bos() self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.eos_penalty = eos_penalty self.max_iter = max_iter self.max_ratio = max_ratio self.beam_size = beam_size self.reranking = reranking self.decoding_format = decoding_format self.retain_dropout = retain_dropout self.retain_history = retain_history self.adaptive = adaptive self.models = models def generate_batched_itr( self, data_itr, maxlen_a=None, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ for sample in data_itr: if "net_input" not in sample: continue if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( self.models, sample, prefix_tokens=sample["target"][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sample["ntokens"]) for i, id in enumerate(sample["id"]): # remove padding src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) ref = utils.strip_pad(sample["target"][i, :], self.pad) yield id, src, ref, hypos[i] @torch.no_grad() def generate(self, models, sample, prefix_tokens=None, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the IterativeRefinementGenerator is not supported" ) # TODO: iterative refinement generator does not support ensemble for now. if not self.retain_dropout: for model in models: model.eval() model, reranker = models[0], None if self.reranking: assert len(models) > 1, "Assuming the last checkpoint is the reranker" assert ( self.beam_size > 1 ), "Reranking requires multiple translation for each example" reranker = models[-1] models = models[:-1] if len(models) > 1 and hasattr(model, "enable_ensemble"): assert model.allow_ensemble, "{} does not support ensembling".format( model.__class__.__name__ ) model.enable_ensemble(models) # TODO: better encoder inputs? src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len = src_tokens.size() # initialize encoder_out = model.forward_encoder([src_tokens, src_lengths]) prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) if self.beam_size > 1: assert ( model.allow_length_beam ), "{} does not support decoding with length beam.".format( model.__class__.__name__ ) # regenerate data based on length-beam length_beam_order = ( utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, length_beam_order ) prev_decoder_out = model.regenerate_length_beam( prev_decoder_out, self.beam_size ) bsz = bsz * self.beam_size sent_idxs = torch.arange(bsz) prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.retain_history: prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) finalized = [[] for _ in range(bsz)] def is_a_loop(x, y, s, a): b, l_x, l_y = x.size(0), x.size(1), y.size(1) if l_x > l_y: y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) if a is not None: a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) elif l_x < l_y: x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) return (x == y).all(1), y, s, a def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): cutoff = prev_out_token.ne(self.pad) tokens = prev_out_token[cutoff] if prev_out_score is None: scores, score = None, None else: scores = prev_out_score[cutoff] score = scores.mean() if prev_out_attn is None: hypo_attn, alignment = None, None else: hypo_attn = prev_out_attn[cutoff] alignment = hypo_attn.max(dim=1)[1] return { "steps": step, "tokens": tokens, "positional_scores": scores, "score": score, "hypo_attn": hypo_attn, "alignment": alignment, } for step in range(self.max_iter + 1): decoder_options = { "eos_penalty": self.eos_penalty, "max_ratio": self.max_ratio, "decoding_format": self.decoding_format, } prev_decoder_out = prev_decoder_out._replace( step=step, max_step=self.max_iter + 1, ) decoder_out = model.forward_decoder( prev_decoder_out, encoder_out, **decoder_options ) if self.adaptive: # terminate if there is a loop terminated, out_tokens, out_scores, out_attn = is_a_loop( prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn, ) decoder_out = decoder_out._replace( output_tokens=out_tokens, output_scores=out_scores, attn=out_attn, ) else: terminated = decoder_out.output_tokens.new_zeros( decoder_out.output_tokens.size(0) ).bool() if step == self.max_iter: # reach last iteration, terminate terminated.fill_(1) # collect finalized sentences finalized_idxs = sent_idxs[terminated] finalized_tokens = decoder_out.output_tokens[terminated] finalized_scores = decoder_out.output_scores[terminated] finalized_attn = ( None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated] ) if self.retain_history: finalized_history_tokens = [h[terminated] for h in decoder_out.history] for i in range(finalized_idxs.size(0)): finalized[finalized_idxs[i]] = [ finalized_hypos( step, finalized_tokens[i], finalized_scores[i], None if finalized_attn is None else finalized_attn[i], ) ] if self.retain_history: finalized[finalized_idxs[i]][0]["history"] = [] for j in range(len(finalized_history_tokens)): finalized[finalized_idxs[i]][0]["history"].append( finalized_hypos( step, finalized_history_tokens[j][i], None, None ) ) # check if all terminated if terminated.sum() == terminated.size(0): break # for next step not_terminated = ~terminated prev_decoder_out = decoder_out._replace( output_tokens=decoder_out.output_tokens[not_terminated], output_scores=decoder_out.output_scores[not_terminated], attn=decoder_out.attn[not_terminated] if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) else None, history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None, ) encoder_out = model.encoder.reorder_encoder_out( encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() ) sent_idxs = sent_idxs[not_terminated] prev_output_tokens = prev_decoder_out.output_tokens.clone() if self.beam_size > 1: if reranker is not None: finalized = self.rerank( reranker, finalized, [src_tokens, src_lengths], self.beam_size ) # aggregate information from length beam finalized = [ finalized[ np.argmax( [ finalized[self.beam_size * i + j][0]["score"] for j in range(self.beam_size) ] ) + self.beam_size * i ] for i in range(len(finalized) // self.beam_size) ] return finalized def rerank(self, reranker, finalized, encoder_input, beam_size): def rebuild_batch(finalized): finalized_tokens = [f[0]["tokens"] for f in finalized] finalized_maxlen = max(f.size(0) for f in finalized_tokens) final_output_tokens = ( finalized_tokens[0] .new_zeros(len(finalized_tokens), finalized_maxlen) .fill_(self.pad) ) for i, f in enumerate(finalized_tokens): final_output_tokens[i, : f.size(0)] = f return final_output_tokens final_output_tokens = rebuild_batch(finalized) final_output_tokens[ :, 0 ] = self.eos # autoregressive model assumes starting with EOS reranker_encoder_out = reranker.encoder(*encoder_input) length_beam_order = ( utils.new_arange( final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) ) .t() .reshape(-1) ) reranker_encoder_out = reranker.encoder.reorder_encoder_out( reranker_encoder_out, length_beam_order ) reranking_scores = reranker.get_normalized_probs( reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None, ) reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) reranking_masks = final_output_tokens[:, 1:].ne(self.pad) reranking_scores = ( reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) ) reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( reranking_scores ) for i in range(len(finalized)): finalized[i][0]["score"] = reranking_scores[i] return finalized
13,238
35.775
93
py
sign-topic
sign-topic-main/fairseq/trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a network across multiple GPUs. """ import contextlib import logging import sys import time from argparse import Namespace from itertools import chain from typing import Any, Dict, List import torch from fairseq import checkpoint_utils, models, optim, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics from fairseq.models.ema import build_ema from fairseq.nan_detector import NanDetector from fairseq.optim import lr_scheduler from fairseq.utils import safe_hasattr from omegaconf import OmegaConf logger = logging.getLogger(__name__) class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if ( max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0" ): raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() @property def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 @property def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or (self.is_fsdp and self.cfg.distributed_training.cpu_offload) @property def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master @property def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False @property def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" @property def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model @property def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info("Exponential Moving Average Shadow Model is initialized.") @property def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info( "NOTE: your device may support faster training with --fp16 or --amp" ) self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) @property def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" logger.info(f"Saving checkpoint to {filename}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) if self.should_save_checkpoint_on_current_rank: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: # this is the code related to AdaPrune # In short, it removes redundant heads in multi-head attention module based on heads importance provided # For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI # The idea of prune in mha can be summarized as # Fine tune model (e.g. roberta encoder) on a certain datasets with regularization # After the model is trained. User could use get_reserve_head_index and _adaptive_prune_heads functions to get the top X heads with most importance. # Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually. # User will fine tune the the new roberta encoder via the ckpt saved above # To get rid of registering different pruned version of Roberta, I use the argument --mha-heads-to-keep to prune the Roberta model into a pruned version which matches the pruned ckpt. if ( safe_hasattr(self.model, "args") and safe_hasattr(self.model.args, "mha_heads_to_keep") and self.model.args.mha_heads_to_keep != -1 ): logger.info(f"Prune model: keep {self.model.args.mha_heads_to_keep} heads for each multihead attention module") for layer in self.model.encoder.sentence_encoder.layers: reserve_head_index = layer.self_attn._get_reserve_head_index(num_heads_to_keep=self.model.args.mha_heads_to_keep) layer.self_attn._adaptive_prune_heads(reserve_head_index=reserve_head_index) layer.self_attn._set_skip_embed_dim_check() logger.info(self.model) # this is the code related to AdaPrune # In short, it removes redundant units in feedforward layer in each transformer layer based on importance # For more info, please refer to the paper: https://openreview.net/forum?id=_CMSV7FTzGI # The idea of prune in ffn can be summarized as # Fine tune model (e.g. roberta encoder) on a certain datasets with regularization # After the model is trained. User could use _get_fc_rank and _prune_fc_layer functions to get the top X units with most importance. # Then user uses the rank to prune a new roberta encoder and save the pruned ckpt manually. # User will fine tune the the new roberta encoder via the ckpt saved above # To get rid of registering different pruned version of Roberta, I use the argument --ffn-blocks-to-remove to prune the Roberta model into a pruned version which matches the pruned ckpt. if ( safe_hasattr(self.model, "args") and safe_hasattr(self.model.args, "ffn_blocks_to_remove") and self.model.args.ffn_blocks_to_remove != -1 ): logger.info(f"Prune model: remove {self.model.args.ffn_blocks_to_remove} ffn blocks for each transformer layer") for layer in self.model.encoder.sentence_encoder.layers: remove_index = layer._get_fc_rank(remove_num=self.model.args.ffn_blocks_to_remove) layer._prune_fc_layer(remove_index=remove_index) logger.info(self.model) self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] epoch = itr_state["epoch"] if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state["iterations_in_epoch"] == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore( state["model"], build_fp32_params=self.cfg.ema.ema_fp32 ) else: logger.info("Loading EMA from checkpoint") self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info("Loading EMA fp32 params from checkpoint") self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=(self.cfg.common.seed + epoch) if self.cfg.dataset.update_ordered_indices_seed else self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=self.cfg.optimization.skip_remainder_batch, grouped_shuffling=self.cfg.dataset.grouped_shuffling, update_epoch_batch_itr=self.cfg.dataset.update_epoch_batch_itr, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=False, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch @metrics.aggregate("train") def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() ( logging_outputs, ( sample_size, ooms, total_train_time, ), ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slowmo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step( samples, raise_oom ) # recursion to feed in same batch except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_slowmo"): self.model.perform_slowmo( self.optimizer.optimizer, getattr(self.optimizer, "fp32_params", None) ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slowmo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output @metrics.aggregate("valid") def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_output def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm ** 0.5 should_agg_norm = self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if "target" in sample: sample["target"] = utils.move_to_cuda( sample["target"], device=self.last_device ) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( ( torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all() ) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) def _catalog_shared_params(module, memo=None, prefix=""): if memo is None: first_call = True memo = {} else: first_call = False for name, param in module._parameters.items(): param_prefix = prefix + ("." if prefix else "") + name if param not in memo: memo[param] = [] memo[param].append(param_prefix) for name, m in module._modules.items(): if m is None: continue submodule_prefix = prefix + ("." if prefix else "") + name _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if len(x) > 1] def _get_module_by_path(module, path): path = path.split(".") for name in path: module = getattr(module, name) return module def _set_module_by_path(module, path, value): path = path.split(".") for name in path[:-1]: module = getattr(module, name) setattr(module, path[-1], value)
64,688
40.547206
202
py
sign-topic
sign-topic-main/fairseq/modules/transformer_sentence_encoder_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise class TransformerSentenceEncoderLayer(nn.Module): """ Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained models. """ def __init__( self, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, activation_fn: str = "relu", export: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, init_fn: Callable = None, ) -> None: super().__init__() if init_fn is not None: init_fn() # Initialize parameters self.embedding_dim = embedding_dim self.num_attention_heads = num_attention_heads self.attention_dropout = attention_dropout self.q_noise = q_noise self.qn_block_size = qn_block_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.activation_dropout_module = FairseqDropout( activation_dropout, module_name=self.__class__.__name__ ) # Initialize blocks self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = self.build_self_attention( self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the self attention layer self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export) self.fc1 = self.build_fc1( self.embedding_dim, ffn_embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) self.fc2 = self.build_fc2( ffn_embedding_dim, self.embedding_dim, q_noise=q_noise, qn_block_size=qn_block_size, ) # layer norm associated with the position wise feed-forward NN self.final_layer_norm = LayerNorm(self.embedding_dim, export=export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, num_attention_heads, dropout, self_attention, q_noise, qn_block_size, ): return MultiheadAttention( embed_dim, num_attention_heads, dropout=dropout, self_attention=True, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, x: torch.Tensor, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, ): """ LayerNorm is applied either before or after the self-attention/ffn modules similar to the original Transformer implementation. """ residual = x x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask, ) x = self.dropout_module(x) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = residual + x x = self.final_layer_norm(x) return x, attn
4,326
29.907143
84
py
sign-topic
sign-topic-main/fairseq/modules/learned_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Optional import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from torch import Tensor class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): super().__init__(num_embeddings, embedding_dim, padding_idx) self.onnx_trace = False if self.padding_idx is not None: self.max_positions = self.num_embeddings - self.padding_idx - 1 else: self.max_positions = self.num_embeddings def forward( self, input: Tensor, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, positions: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" assert (positions is None) or ( self.padding_idx is None ), "If positions is pre-computed then padding_idx should not be set." if positions is None: if incremental_state is not None: # positions is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX positions = torch.zeros( (1, 1), device=input.device, dtype=input.dtype ).fill_(int(self.padding_idx + input.size(1))) else: positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, )
2,259
35.451613
94
py
sign-topic
sign-topic-main/fairseq/modules/sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch from .multihead_attention import MultiheadAttention class SparseMultiheadAttention(MultiheadAttention): """Sparse Multi-Headed Attention. "Generating Long Sequences with Sparse Transformers". Implements fixed factorized self attention, where l=stride and c=expressivity. A(1) includes all words in the stride window and A(2) takes a summary of c words from the end of each stride window. If is_bidirectional=False, we do not include any words past the current word, as in the paper. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True, ): super().__init__( embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention, ) self.is_bidirectional = is_bidirectional self.stride = stride self.expressivity = expressivity assert self.stride > 0 and self.stride >= self.expressivity # Used for Ai(2) calculations - beginning of [l-c, l] range def compute_checkpoint(self, word_index): if word_index % self.stride == 0 and word_index != 0: checkpoint_index = word_index - self.expressivity else: checkpoint_index = ( math.floor(word_index / self.stride) * self.stride + self.stride - self.expressivity ) return checkpoint_index # Computes Ai(2) def compute_subset_summaries(self, absolute_max): checkpoint_index = self.compute_checkpoint(0) subset_two = set() while checkpoint_index <= absolute_max - 1: summary = set( range( checkpoint_index, min(checkpoint_index + self.expressivity + 1, absolute_max), ) ) subset_two = subset_two.union(summary) checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) return subset_two # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf def compute_fixed_attention_subset(self, word_index, tgt_len): # +1s account for range function; [min, max) -> [min, max] if not self.is_bidirectional: absolute_max = word_index + 1 else: absolute_max = tgt_len # Subset 1 - whole window rounded_index = ( math.floor((word_index + self.stride) / self.stride) * self.stride ) if word_index % self.stride == 0 and word_index != 0: subset_one = set( range(word_index - self.stride, min(absolute_max, word_index + 1)) ) else: subset_one = set( range( max(0, rounded_index - self.stride), min(absolute_max, rounded_index + 1), ) ) # Subset 2 - summary per window # If bidirectional, subset 2 is the same for every index subset_two = set() if not self.is_bidirectional: subset_two = self.compute_subset_summaries(absolute_max) return subset_one.union(subset_two) # Compute sparse mask - if bidirectional, can pre-compute and store def buffered_sparse_mask(self, tensor, tgt_len, src_len): assert tgt_len > self.stride sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) # If bidirectional, subset 2 is the same for every index subset_summaries = set() if self.is_bidirectional: subset_summaries = self.compute_subset_summaries(tgt_len) for i in range(tgt_len): fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) fixed_attention_subset = fixed_attention_subset.union(subset_summaries) included_word_indices = torch.LongTensor(list(fixed_attention_subset)) sparse_mask[i].index_fill_(0, included_word_indices, 0) return sparse_mask.type_as(tensor) def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) sparse_mask = sparse_mask.unsqueeze(0).expand( bsz * self.num_heads, tgt_len, src_len ) attn_weights += sparse_mask
4,931
33.978723
86
py
sign-topic
sign-topic-main/fairseq/modules/multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import Tensor, nn from torch.nn import Parameter @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False self.skip_embed_dim_check = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def _get_reserve_head_index(self, num_heads_to_keep: int): k_proj_heads_norm = [] q_proj_heads_norm = [] v_proj_heads_norm = [] for i in range(self.num_heads): start_idx = i * self.head_dim end_idx = (i + 1) * self.head_dim k_proj_heads_norm.append( torch.sum( torch.abs( self.k_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.k_proj.bias[start_idx:end_idx])).tolist() ) q_proj_heads_norm.append( torch.sum( torch.abs( self.q_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.q_proj.bias[start_idx:end_idx])).tolist() ) v_proj_heads_norm.append( torch.sum( torch.abs( self.v_proj.weight[ start_idx:end_idx, ] ) ).tolist() + torch.sum(torch.abs(self.v_proj.bias[start_idx:end_idx])).tolist() ) heads_norm = [] for i in range(self.num_heads): heads_norm.append( k_proj_heads_norm[i] + q_proj_heads_norm[i] + v_proj_heads_norm[i] ) sorted_head_index = sorted( range(self.num_heads), key=lambda k: heads_norm[k], reverse=True ) reserve_head_index = [] for i in range(num_heads_to_keep): start = sorted_head_index[i] * self.head_dim end = (sorted_head_index[i] + 1) * self.head_dim reserve_head_index.append((start, end)) return reserve_head_index def _adaptive_prune_heads(self, reserve_head_index: List[Tuple[int, int]]): new_q_weight = [] new_q_bias = [] new_k_weight = [] new_k_bias = [] new_v_weight = [] new_v_bias = [] new_out_proj_weight = [] for ele in reserve_head_index: start_idx, end_idx = ele new_q_weight.append( self.q_proj.weight[ start_idx:end_idx, ] ) new_q_bias.append(self.q_proj.bias[start_idx:end_idx]) new_k_weight.append( self.k_proj.weight[ start_idx:end_idx, ] ) new_k_bias.append(self.k_proj.bias[start_idx:end_idx]) new_v_weight.append( self.v_proj.weight[ start_idx:end_idx, ] ) new_v_bias.append(self.v_proj.bias[start_idx:end_idx]) new_out_proj_weight.append(self.out_proj.weight[:, start_idx:end_idx]) new_q_weight = torch.cat(new_q_weight).detach() new_k_weight = torch.cat(new_k_weight).detach() new_v_weight = torch.cat(new_v_weight).detach() new_out_proj_weight = torch.cat(new_out_proj_weight, dim=-1).detach() new_q_weight.requires_grad = True new_k_weight.requires_grad = True new_v_weight.requires_grad = True new_out_proj_weight.requires_grad = True new_q_bias = torch.cat(new_q_bias).detach() new_q_bias.requires_grad = True new_k_bias = torch.cat(new_k_bias).detach() new_k_bias.requires_grad = True new_v_bias = torch.cat(new_v_bias).detach() new_v_bias.requires_grad = True self.q_proj.weight = torch.nn.Parameter(new_q_weight) self.q_proj.bias = torch.nn.Parameter(new_q_bias) self.k_proj.weight = torch.nn.Parameter(new_k_weight) self.k_proj.bias = torch.nn.Parameter(new_k_bias) self.v_proj.weight = torch.nn.Parameter(new_v_weight) self.v_proj.bias = torch.nn.Parameter(new_v_bias) self.out_proj.weight = torch.nn.Parameter(new_out_proj_weight) self.num_heads = len(reserve_head_index) self.embed_dim = self.head_dim * self.num_heads self.q_proj.out_features = self.embed_dim self.k_proj.out_features = self.embed_dim self.v_proj.out_features = self.embed_dim def _set_skip_embed_dim_check(self): self.skip_embed_dim_check = True def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len if not self.skip_embed_dim_check: assert ( embed_dim == self.embed_dim ), f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() # The Multihead attention implemented in pytorch forces strong dimension check # for input embedding dimention and K,Q,V projection dimension. # Since pruning will break the dimension check and it is not easy to modify the pytorch API, # it is preferred to bypass the pytorch MHA when we need to skip embed_dim_check and not self.skip_embed_dim_check ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, self.embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
24,959
38.122257
104
py
sign-topic
sign-topic-main/fairseq/modules/transpose_last.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ transpose last 2 dimensions of the input """ import torch.nn as nn class TransposeLast(nn.Module): def __init__(self, deconstruct_idx=None): super().__init__() self.deconstruct_idx = deconstruct_idx def forward(self, x): if self.deconstruct_idx is not None: x = x[self.deconstruct_idx] return x.transpose(-2, -1)
550
25.238095
65
py
sign-topic
sign-topic-main/fairseq/modules/same_pad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn class SamePad(nn.Module): def __init__(self, kernel_size, causal=False): super().__init__() if causal: self.remove = kernel_size - 1 else: self.remove = 1 if kernel_size % 2 == 0 else 0 def forward(self, x): if self.remove > 0: x = x[:, :, : -self.remove] return x
552
24.136364
65
py
sign-topic
sign-topic-main/fairseq/modules/linearized_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from .conv_tbc import ConvTBC from typing import Dict, Optional from torch import Tensor @with_incremental_state class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def state_dict(self, destination=None, prefix="", keep_vars=False): state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars) # don't store redundant _linearized_weight in checkpoints if prefix + "_linearized_weight" in state: del state[prefix + "_linearized_weight"] return state def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" if prefix + "_linearized_weight" in state_dict: del state_dict[prefix + "_linearized_weight"] @torch.jit.export def forward( self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, ): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = self.conv_tbc(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[: -self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) @torch.jit.unused def reorder_incremental_state( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order, ): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) @torch.jit.unused def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ): return utils.get_incremental_state(self, incremental_state, "input_buffer") @torch.jit.unused def _set_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer, ): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) @torch.jit.unused def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) return weight.view(self.out_channels, -1) return self._linearized_weight @torch.jit.unused def _clear_linearized_weight(self, *args): self._linearized_weight = None
4,744
36.65873
83
py
sign-topic
sign-topic-main/fairseq/modules/downsampled_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.scalar_bias import scalar_bias class SingleHeadAttention(nn.Module): """ Single-head attention that supports Gating and Downsampling """ def __init__( self, out_channels, embed_dim, head_dim, head_index, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, num_heads=1, ): super().__init__() self.embed_dim = embed_dim self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_index = head_index self.head_dim = head_dim self.project_input = project_input self.gated = gated self.downsample = downsample self.num_heads = num_heads self.projection = None k_layers = [] v_layers = [] if self.downsample: k_layers.append(Downsample(self.head_index)) v_layers.append(Downsample(self.head_index)) out_proj_size = self.head_dim else: out_proj_size = self.head_dim * self.num_heads if self.gated: k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias)) else: k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias) v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias)) self.in_proj_k = nn.Sequential(*k_layers) self.in_proj_v = nn.Sequential(*v_layers) if self.downsample: self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias) else: self.out_proj = Linear(out_proj_size, out_channels, bias=bias) self.scaling = self.head_dim ** -0.5 def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ src_len, bsz, out_channels = key.size() tgt_len = query.size(0) assert list(query.size()) == [tgt_len, bsz, out_channels] assert key.size() == value.size() if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.downsample: size = bsz else: size = bsz * self.num_heads k = key v = value q = query if self.project_input: q = self.in_proj_q(q) k = self.in_proj_k(k) v = self.in_proj_v(v) src_len = k.size()[0] q *= self.scaling if not self.downsample: q = q.view(tgt_len, size, self.head_dim) k = k.view(src_len, size, self.head_dim) v = v.view(src_len, size, self.head_dim) q = q.transpose(0, 1) k = k.transpose(0, 1) v = v.transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) if mask_future_timesteps: assert ( query.size() == key.size() ), "mask_future_timesteps only applies to self-attention" attn_weights *= torch.tril( attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(), diagonal=-1, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) attn_weights += torch.triu( attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(), diagonal=0, )[:, :: self.head_index + 1 if self.downsample else 1].unsqueeze(0) tgt_size = tgt_len if use_scalar_bias: attn_weights = scalar_bias(attn_weights, 2) v = scalar_bias(v, 1) tgt_size += 1 if key_padding_mask is not None: # don't attend to padding symbols if key_padding_mask.max() > 0: if self.downsample: attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len) else: attn_weights = attn_weights.view( size, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), -math.inf, ) attn_weights = attn_weights.view(size, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_weights = self.dropout_module(attn_weights) attn = torch.bmm(attn_weights, v) if self.downsample: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) attn = self.out_proj(attn) return attn, attn_weights class DownsampledMultiHeadAttention(nn.ModuleList): """ Multi-headed attention with Gating and Downsampling """ def __init__( self, out_channels, embed_dim, num_heads, dropout=0.0, bias=True, project_input=True, gated=False, downsample=False, ): self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.downsample = downsample self.gated = gated self.project_input = project_input assert self.head_dim * num_heads == embed_dim if self.downsample: attention_heads = [] for index in range(self.num_heads): attention_heads.append( SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, index, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) ) super().__init__(modules=attention_heads) self.out_proj = Linear(embed_dim, out_channels, bias=bias) else: # either we have a list of attention heads, or just one attention head # if not being downsampled, we can do the heads with one linear layer instead of separate ones super().__init__() self.attention_module = SingleHeadAttention( out_channels, self.embed_dim, self.head_dim, 1, dropout, bias, self.project_input, self.gated, self.downsample, self.num_heads, ) def forward( self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, use_scalar_bias=False, ): src_len, bsz, embed_dim = key.size() tgt_len = query.size(0) assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() tgt_size = tgt_len if use_scalar_bias: tgt_size += 1 attn = [] attn_weights = [] if self.downsample: for attention_head_number in range(self.num_heads): # call the forward of each attention head _attn, _attn_weight = self[attention_head_number]( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn = self.out_proj(full_attn) return full_attn, attn_weights[0].clone() else: _attn, _attn_weight = self.attention_module( query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias, ) attn.append(_attn) attn_weights.append(_attn_weight) full_attn = torch.cat(attn, dim=2) full_attn_weights = torch.cat(attn_weights) full_attn_weights = full_attn_weights.view( bsz, self.num_heads, tgt_size, src_len ) full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads return full_attn, full_attn_weights class Downsample(nn.Module): """ Selects every nth element, where n is the index """ def __init__(self, index): super().__init__() self.index = index def forward(self, x): return x[:: self.index + 1] def Linear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return nn.utils.weight_norm(m) def GatedLinear(in_features, out_features, dropout=0.0, bias=True): """Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units""" return nn.Sequential( Linear(in_features, out_features * 4, dropout, bias), nn.GLU(), Linear(out_features * 2, out_features * 2, dropout, bias), nn.GLU(), Linear(out_features, out_features, dropout, bias), )
10,673
32.671924
106
py
sign-topic
sign-topic-main/fairseq/modules/base_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch import sys from fairseq import utils from fairseq.distributed import utils as distributed_utils from fairseq.modules.layer_norm import LayerNorm class BaseLayer(nn.Module): def __init__(self, args): super().__init__() self.num_workers = distributed_utils.get_data_parallel_world_size() expert_centroids = torch.empty(self.num_workers, args.decoder_embed_dim) torch.nn.init.orthogonal_(expert_centroids, gain=0.1) self.register_parameter( "expert_centroids", torch.nn.Parameter(expert_centroids) ) self.expert_network = nn.Sequential( *([BaseSublayer(args) for _ in range(args.base_sublayers)]) ) self.expert_id = distributed_utils.get_data_parallel_rank() self.shuffle = args.base_shuffle self.cpp = self.load_assignment() # Add a special attribute to the expert parameters, so we know not to sync their gradients for param in self.expert_network.parameters(): param.expert = True def forward(self, input_features, *args, **kwargs): features = input_features.reshape(-1, input_features.size(-1)) is_training = input_features.requires_grad if self.shuffle and is_training: # Send each token to a random worker, to break correlations within the batch shuffle_sort = torch.randperm(features.size(0), device=features.device) features = All2All.apply(features[shuffle_sort]) with torch.no_grad(): # Compute similarity of each token to each expert, for routing token_expert_affinities = features.matmul( self.expert_centroids.transpose(0, 1) ) # Compute which token goes to which expert sort_by_expert, input_splits, output_splits = ( self.balanced_assignment(token_expert_affinities) if is_training else self.greedy_assignment(token_expert_affinities) ) # Swap these tokens for the right ones for our expert routed_features = All2All.apply( features[sort_by_expert], output_splits, input_splits ) if routed_features.size(0) > 0: # Mix in the expert network based on how appropriate it is for these tokens alpha = torch.sigmoid( routed_features.mv(self.expert_centroids[self.expert_id]) ).unsqueeze(1) routed_features = ( alpha * self.expert_network(routed_features) + (1 - alpha) * routed_features ) # Return to original worker and ordering result = All2All.apply(routed_features, input_splits, output_splits)[ self.inverse_sort(sort_by_expert) ] if self.shuffle and is_training: # Undo shuffling result = All2All.apply(result)[self.inverse_sort(shuffle_sort)] # Return additional Nones for compatibility with TransformerDecoderLayer return result.view(input_features.size()), None, None def inverse_sort(self, order): # Creates an index that undoes a sort: xs==xs[order][inverse_sort(order)] return torch.empty_like(order).scatter_( 0, order, torch.arange(0, order.size(0), device=order.device) ) def balanced_assignment(self, scores): ok = scores.isfinite() if not ok.all(): # NaNs here can break the assignment algorithm scores[~ok] = scores[ok].min() return self.cpp.balanced_assignment(scores), None, None # Assigns each token to the top k experts def greedy_assignment(self, scores, k=1): token_to_workers = torch.topk(scores, dim=1, k=k, largest=True).indices.view(-1) token_to_workers, sort_ordering = torch.sort(token_to_workers) worker2token = sort_ordering // k # Find how many tokens we're sending to each other worker (being careful for sending 0 tokens to some workers) output_splits = torch.zeros( (self.num_workers,), dtype=torch.long, device=scores.device ) workers, counts = torch.unique_consecutive(token_to_workers, return_counts=True) output_splits[workers] = counts # Tell other workers how many tokens to expect from us input_splits = All2All.apply(output_splits) return worker2token, input_splits.tolist(), output_splits.tolist() def load_assignment(self): try: from fairseq import libbase return libbase except ImportError as e: sys.stderr.write( "ERROR: missing libbase. run `python setup.py build_ext --inplace`\n" ) raise e class BaseSublayer(nn.Module): def __init__(self, args): super().__init__() self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") or "relu" ) self.norm = LayerNorm(args.decoder_embed_dim, export=False) self.ff1 = torch.nn.Linear(args.decoder_embed_dim, args.decoder_ffn_embed_dim) self.ff2 = torch.nn.Linear(args.decoder_ffn_embed_dim, args.decoder_embed_dim) self.ff2.weight.data.zero_() def forward(self, xs): return xs + self.ff2(self.activation_fn(self.ff1(self.norm(xs)))) # Wraps torch.distributed.all_to_all_single as a function that supports autograd class All2All(torch.autograd.Function): @staticmethod def forward(ctx, xs, input_splits=None, output_splits=None): ctx.input_splits = input_splits ctx.output_splits = output_splits ys = ( torch.empty_like(xs) if output_splits is None else xs.new_empty(size=[sum(output_splits)] + list(xs.size()[1:])) ) torch.distributed.all_to_all_single( ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits ) return ys @staticmethod def backward(ctx, grad_output): result = ( torch.empty_like(grad_output) if ctx.input_splits is None else grad_output.new_empty( size=[sum(ctx.input_splits)] + list(grad_output.size()[1:]) ) ) torch.distributed.all_to_all_single( result, grad_output, output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits, ) return result, None, None
6,693
38.146199
118
py
sign-topic
sign-topic-main/fairseq/modules/kmeans_attention.py
import torch import torch.nn as nn import torch.nn.functional as F import math from inspect import isfunction from operator import mul from functools import reduce, wraps from aml.multimodal_video.utils.einops.lib import rearrange, repeat from aml.multimodal_video.utils.einops.lib.layers.torch import Rearrange from fairseq.modules.local_attention import LocalAttention # constants TOKEN_SELF_ATTN_VALUE = -5e4 KMEAN_INIT_ITERS = 10 # helper functions def exists(val): return val is not None def identity(x, *args, **kwargs): return x def default(x, d): if not exists(x): return d if not isfunction(d) else d() return x def cast_tuple(x): return x if isinstance(x, tuple) else (x,) def cache_fn(f): cache = None @wraps(f) def cached_fn(*args, **kwargs): nonlocal cache if exists(cache): return cache cache = f(*args, **kwargs) return cache return cached_fn def to(t): return {"device": t.device, "dtype": t.dtype} def find_modules(nn_module, type): return [module for module in nn_module.modules() if isinstance(module, type)] def is_empty(t): return t.nelement() == 0 def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def batched_index_select(values, indices): last_dim = values.shape[-1] return values.gather(2, expand_dim(indices, -1, last_dim)) def merge_dims(ind_from, ind_to, tensor): shape = list(tensor.shape) arr_slice = slice(ind_from, ind_to + 1) shape[arr_slice] = [reduce(mul, shape[arr_slice])] return tensor.reshape(*shape) def expand_dim(t, dim, k): t = t.unsqueeze(dim) expand_shape = [-1] * len(t.shape) expand_shape[dim] = k return t.expand(*expand_shape) def scatter_mean(src, t, index, dim, eps=1e-5): numer = src.scatter_add(dim, index, t) denom = src.scatter_add(dim, index, torch.ones_like(t)) return numer / (denom + eps) def split_at_index(dim, index, t): pre_slices = (slice(None),) * dim l = (*pre_slices, slice(None, index)) r = (*pre_slices, slice(index, None)) return t[l], t[r] def reshape_dim(t, dim, split_dims): shape = list(t.shape) num_dims = len(shape) dim = (dim + num_dims) % num_dims shape[dim : dim + 1] = split_dims return t.reshape(shape) def ema(old, new, decay): if not exists(old): return new return old * decay + new * (1 - decay) def ema_inplace(moving_avg, new, decay): if is_empty(moving_avg): moving_avg.data.copy_(new) return moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) # helper classes def map_first_tuple_or_el(x, fn): if isinstance(x, tuple): return (fn(x[0]),) + x[1:] return fn(x) class Chunk(nn.Module): def __init__(self, chunks, fn, along_dim=-1): super().__init__() self.dim = along_dim self.chunks = chunks self.fn = fn def forward(self, x, **kwargs): if self.chunks <= 1: return self.fn(x, **kwargs) chunks = x.chunk(self.chunks, dim=self.dim) return torch.cat([self.fn(c, **kwargs) for c in chunks], dim=self.dim) class PreNorm(nn.ModuleList): def __init__(self, norm_class, dim, fn): super().__init__() self.norm = norm_class(dim) self.fn = fn def forward(self, x, **kwargs): x = self.norm(x) return self.fn(x, **kwargs) class ReZero(nn.Module): def __init__(self, fn): super().__init__() self.residual_weight = nn.Parameter(torch.zeros(1)) self.fn = fn def forward(self, x, **kwargs): x = self.fn(x, **kwargs) return map_first_tuple_or_el(x, lambda t: t * self.residual_weight) class ScaleNorm(nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.g = nn.Parameter(torch.ones(1)) self.eps = eps def forward(self, x): def norm(t): n = torch.norm(t, dim=-1, keepdim=True).clamp(min=self.eps) return t / n * self.g return map_first_tuple_or_el(x, norm) class ProjectInOut(nn.Module): def __init__(self, fn, dim_in, dim_out, project_out=True): super().__init__() self.fn = fn self.project_in = nn.Linear(dim_in, dim_out) self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity def forward(self, x, **kwargs): x = self.project_in(x) x, loss = self.fn(x, **kwargs) x = self.project_out(x) return x, loss class MatrixMultiply(nn.Module): def __init__(self, tensor, transpose=False): super().__init__() self.tensor = tensor self.transpose = transpose def forward(self, x): tensor = self.tensor if self.transpose: tensor = tensor.t() return x @ tensor # positional embeddings class DepthWiseConv1d(nn.Module): def __init__(self, dim_in, dim_out, kernel_size, stride=1, bias=True, causal=False): super().__init__() self.padding = ( ((kernel_size - 1), 0) if causal else (kernel_size // 2, kernel_size // 2) ) self.net = nn.Sequential( nn.Conv1d( dim_in, dim_in, kernel_size=kernel_size, groups=dim_in, stride=stride, bias=bias, ), nn.Conv1d(dim_in, dim_out, 1, bias=bias), ) def forward(self, x): x = F.pad(x, self.padding, value=0.0) return self.net(x) class FixedPositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) position = torch.arange(0, max_seq_len, dtype=torch.float) sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) self.register_buffer("emb", emb) def forward(self, x): return self.emb[None, : x.shape[1], :].to(x) def rotate_every_two(x): x = rearrange(x, "... (d j) -> ... d j", j=2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return rearrange(x, "... d j -> ... (d j)") def apply_rotary_pos_emb(q, k, sinu_pos): sinu_pos = rearrange(sinu_pos, "() n (j d) -> n j d", j=2) sin, cos = sinu_pos.unbind(dim=-2) sin, cos = map(lambda t: repeat(t, "b n -> b (n j)", j=2), (sin, cos)) q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k)) return q, k # kmeans related function and class def update_kmeans_on_backwards(module): module.kmean_modules = find_modules(module, Kmeans) def hook(_, grad_in, grad_out): for m in module.kmean_modules: m.update() return module.register_backward_hook(hook) def similarity(x, means): return torch.einsum("bhld,hcd->bhlc", x, means) def dists_and_buckets(x, means): dists = similarity(x, means) _, buckets = torch.max(dists, dim=-1) return dists, buckets def batched_bincount(index, num_classes, dim=-1): shape = list(index.shape) shape[dim] = num_classes out = index.new_zeros(shape) out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype)) return out def kmeans_iter(x, means, buckets=None): b, h, _, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1] if not exists(buckets): _, buckets = dists_and_buckets(x, means) bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True) zero_mask = bins.long() == 0 means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype) means_.scatter_add_(-2, expand_dim(buckets, -1, d), x) means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype) means = torch.where(zero_mask.unsqueeze(-1), means, means_) means = means.squeeze(0) return means def distribution(dists, window_size): _, topk_indices = dists.topk(k=window_size, dim=-2) indices = topk_indices.transpose(-2, -1) return indices.reshape(*indices.size()[:2], -1) class Kmeans(nn.Module): def __init__( self, num_heads, head_dim, num_clusters, ema_decay=0.999, commitment=1e-4 ): super().__init__() self.commitment = commitment self.ema_decay = ema_decay self.register_buffer("means", torch.randn(num_heads, num_clusters, head_dim)) self.register_buffer("initted", torch.tensor(False)) self.num_new_means = 0 self.new_means = None @torch.no_grad() def init(self, x): if self.initted: return _, h, _, d, device, _ = *x.shape, x.device, x.dtype num_clusters = self.means.shape[1] means = x.transpose(0, 1).contiguous().view(h, -1, d) num_samples = means.shape[1] if num_samples >= num_clusters: indices = torch.randperm(num_samples, device=device)[:num_clusters] else: indices = torch.randint(0, num_samples, (num_clusters,), device=device) means = means[:, indices] for _ in range(KMEAN_INIT_ITERS): means = kmeans_iter(x, means) self.num_new_means = 0 self.means.data.copy_(means) self.initted.data.copy_(torch.tensor(True)) @torch.no_grad() def update(self, new_means=None): new_means = default(new_means, self.new_means) assert exists(new_means), "new kmeans has not been supplied" ema_inplace(self.means, new_means, self.ema_decay) del self.new_means self.new_means = None self.num_new_means = 0 def forward(self, x, update_means=False): self.init(x) b, dtype = x.shape[0], x.dtype means = self.means.type(dtype) x = F.normalize(x, 2, dim=-1).type(dtype) with torch.no_grad(): dists, buckets = dists_and_buckets(x, means) routed_means = batched_index_select(expand_dim(means, 0, b), buckets) loss = F.mse_loss(x, routed_means) * self.commitment if update_means: with torch.no_grad(): means = kmeans_iter(x, means, buckets) self.new_means = ema( self.new_means, means, self.num_new_means / (self.num_new_means + 1) ) self.num_new_means += 1 return dists, loss # kmeans attention class class KmeansAttention(nn.Module): def __init__( self, num_clusters, window_size, num_heads, head_dim, causal=False, dropout=0.0, ema_decay=0.999, commitment=1e-4, context_window_size=None, receives_context=False, num_mem_kv=0, shared_qk=False, ): super().__init__() self.num_heads = num_heads self.num_clusters = num_clusters self.head_dim = head_dim self.window_size = window_size self.context_window_size = default(context_window_size, window_size) self.causal = causal self.shared_qk = shared_qk self.receives_context = receives_context self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment) self.dropout = nn.Dropout(dropout) self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0) self.mem_key = nn.Parameter( torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim) ) self.mem_value = nn.Parameter( torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim) ) def forward(self, q, k, v, query_mask=None, key_mask=None, **kwargs): b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = ( *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype, ) is_reverse = kwargs.pop("_reverse", False) out = torch.zeros_like(q, dtype=dtype) update_kmeans = self.training and not is_reverse key_mask = ( default(key_mask, query_mask) if not self.receives_context else key_mask ) kv_wsz = wsz if not self.receives_context else c_wsz wsz = min(wsz, t) kv_wsz = min(kv_wsz, kv_t) if not self.shared_qk or self.receives_context: dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans) q_dists, k_dists = split_at_index(2, t, dists) indices = distribution(q_dists, wsz) kv_indices = distribution(k_dists, kv_wsz) else: dists, aux_loss = self.kmeans(q, update_kmeans) k = F.normalize(k, dim=-1).to(q) indices = distribution(dists, wsz) kv_indices = indices q = batched_index_select(q, indices) k = batched_index_select(k, kv_indices) v = batched_index_select(v, kv_indices) reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d) q, k, v = map(reshape_with_window, (q, k, v)) m_k, m_v = map( lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value) ) k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v))) dots = torch.einsum("bhnid,bhnjd->bhnij", q, k) * (d ** -0.5) mask_value = max_neg_value(dots) if exists(query_mask) or exists(key_mask): query_mask = default( query_mask, lambda: torch.ones((b, t), device=device).bool() ) key_mask = default( key_mask, lambda: torch.ones((b, kv_t), device=device).bool() ) q_mask = expand_dim(query_mask, 1, h).gather(2, indices) kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices) q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask)) mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=1) dots.masked_fill_(~mask, mask_value) del mask if self.causal: q_mask, kv_mask = map( lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices) ) mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=1) dots.masked_fill_(~mask, mask_value) del mask if self.shared_qk: q_mask, kv_mask = map( lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices) ) mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :] mask = F.pad(mask, (self.num_mem_kv, 0), value=0) dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE) del mask dots = dots.softmax(dim=-1) dots = self.dropout(dots) bo = torch.einsum("bhcij,bhcjd->bhcid", dots, v) so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype) out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2) return out, aux_loss # feedforward class GELU_(nn.Module): def forward(self, x): return ( 0.5 * x * ( 1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))) ) ) GELU = nn.GELU if hasattr(nn, "GELU") else GELU_ class FeedForward(nn.Module): def __init__(self, dim, mult=4, dropout=0.0, activation=None, glu=False): super().__init__() activation = default(activation, GELU) self.glu = glu self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1)) self.act = activation() self.dropout = nn.Dropout(dropout) self.w2 = nn.Linear(dim * mult, dim) def forward(self, x, **kwargs): if not self.glu: x = self.w1(x) x = self.act(x) else: x, v = self.w1(x).chunk(2, dim=-1) x = self.act(x) * v x = self.dropout(x) x = self.w2(x) return x # self attention class SelfAttention(nn.Module): def __init__( self, dim, max_seq_len, heads, local_attn_heads, window_size, dim_head=None, local_attn_window_size=None, local_attn_radius_blocks=1, causal=False, attn_dropout=0.0, dropout=0.0, kmeans_ema_decay=0.999, commitment_factor=1e-4, receives_context=False, context_window_size=None, rel_pos_emb=True, num_mem_kv=0, shared_qk=False, conv_query_kernel=9, ): super().__init__() assert ( dim_head or (dim % heads) == 0 ), "hidden dimension must be divisible by number of heads" assert ( max_seq_len % window_size ) == 0, "maximum sequence length must be divisible by the target window size" assert ( local_attn_heads <= heads ), "number of local attention heads must be less than total heads" assert not ( receives_context and local_attn_heads > 0 ), "local attention cannot be used for self attention with context" assert not ( receives_context and causal ), "contextual attention layer cannot be causal" local_attn_window_size = default(local_attn_window_size, window_size) context_window_size = default(context_window_size, window_size) self.shared_qk = shared_qk self.receives_context = receives_context self.heads = heads self.local_attn_heads = local_attn_heads self.global_attn_heads = heads - local_attn_heads self.causal = causal self.window_size = window_size dim_head = default(dim_head, dim // heads) dim_heads = dim_head * heads self.dim_head = dim_head num_clusters = max_seq_len // window_size # local local_dim_heads = dim_head * self.local_attn_heads if self.local_attn_heads > 0: rel_pos_emb_config = (dim_head, local_attn_heads) if rel_pos_emb else None self.local_attn = LocalAttention( dim=dim_head, window_size=local_attn_window_size, causal=causal, dropout=attn_dropout, rel_pos_emb_config=rel_pos_emb_config, look_backward=local_attn_radius_blocks, look_forward=0 if causal else local_attn_radius_blocks, ) self.local_to_qkv = nn.Linear(dim, 3 * local_dim_heads) # global global_dim_heads = dim_head * self.global_attn_heads if self.global_attn_heads > 0: self.global_attn = KmeansAttention( num_clusters, window_size, self.global_attn_heads, dim_head, causal=causal, dropout=attn_dropout, ema_decay=kmeans_ema_decay, commitment=commitment_factor, receives_context=receives_context, num_mem_kv=num_mem_kv, shared_qk=shared_qk, ) self.to_q = nn.Sequential( Rearrange("b n c -> b c n"), DepthWiseConv1d(dim, global_dim_heads, conv_query_kernel, causal=causal), Rearrange("b c n -> b n c"), ) self.to_v = nn.Linear(dim, global_dim_heads, bias=False) if not self.shared_qk: self.to_k = nn.Linear(dim, global_dim_heads, bias=False) # out self.to_out = nn.Linear(dim_heads, dim, bias=False) self.dropout = nn.Dropout(dropout) def forward( self, query, key, value, context=None, key_padding_mask=None, context_mask=None, pos_emb=None, **kwargs ): assert not ( self.receives_context and not exists(context) ), "context must be passed if self attention is set to receive context" input_mask = key_padding_mask x = query.transpose(0, 1) b, t, _, h, dh = *x.shape, self.heads, self.dim_head has_local, has_global = map( lambda x: x > 0, (self.local_attn_heads, self.global_attn_heads) ) split_heads = ( lambda v: reshape_dim(v, -1, (-1, dh)).transpose(1, 2).contiguous() ) if has_local: local_qkv = self.local_to_qkv(x).chunk(3, dim=-1) lq, lk, lv = map(split_heads, local_qkv) if has_global: kv_input = x if not self.receives_context else context q, v = self.to_q(x), self.to_v(kv_input) if not self.shared_qk: k = self.to_k(kv_input) else: k = self.to_q(kv_input) if self.receives_context else q q, k, v = map(split_heads, (q, k, v)) out = [] total_loss = torch.tensor(0.0, requires_grad=True, **to(x)) if has_local: local_out = self.local_attn(lq, lk, lv, input_mask=input_mask) out.append(local_out) if has_global: if not self.receives_context and exists(pos_emb): q, k = apply_rotary_pos_emb(q, k, pos_emb) global_out, loss = self.global_attn( q, k, v, query_mask=input_mask, key_mask=context_mask ) total_loss = total_loss + loss out.append(global_out) out = torch.cat(out, dim=1) out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1) out = self.dropout(out.transpose(0, 1)) # out = self.to_out(out) return out, total_loss
21,842
28.319463
88
py
sign-topic
sign-topic-main/fairseq/modules/quant_noise.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
4,005
36.092593
87
py
sign-topic
sign-topic-main/fairseq/modules/gelu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch import torch.nn as nn def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
706
26.192308
87
py
sign-topic
sign-topic-main/fairseq/modules/lstm_cell_with_zoneout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn class LSTMCellWithZoneOut(nn.Module): """ Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations https://arxiv.org/abs/1606.01305 """ def __init__( self, prob: float, input_size: int, hidden_size: int, bias: bool = True ): super(LSTMCellWithZoneOut, self).__init__() self.lstm_cell = nn.LSTMCell(input_size, hidden_size, bias=bias) self.prob = prob if prob > 1.0 or prob < 0.0: raise ValueError( "zoneout probability must be in the range from " "0.0 to 1.0." ) def zoneout(self, h, next_h, prob): if isinstance(h, tuple): return tuple([self.zoneout(h[i], next_h[i], prob) for i in range(len(h))]) if self.training: mask = h.new_zeros(*h.size()).bernoulli_(prob) return mask * h + (1 - mask) * next_h return prob * h + (1 - prob) * next_h def forward(self, x, h): return self.zoneout(h, self.lstm_cell(x, h), self.prob)
1,220
31.131579
86
py
sign-topic
sign-topic-main/fairseq/modules/positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from .learned_positional_embedding import LearnedPositionalEmbedding from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding def PositionalEmbedding( num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool = False, ): if learned: # if padding_idx is specified then offset the embedding ids by # this index and adjust num_embeddings appropriately # TODO: The right place for this offset would be inside # LearnedPositionalEmbedding. Move this there for a cleaner implementation. if padding_idx is not None: num_embeddings = num_embeddings + padding_idx + 1 m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) if padding_idx is not None: nn.init.constant_(m.weight[padding_idx], 0) else: m = SinusoidalPositionalEmbedding( embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, ) return m
1,295
35
83
py
sign-topic
sign-topic-main/fairseq/modules/fairseq_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Optional import torch.nn as nn import torch.nn.functional as F logger = logging.getLogger(__name__) class FairseqDropout(nn.Module): def __init__(self, p, module_name=None): super().__init__() self.p = p self.module_name = module_name self.apply_during_inference = False def forward(self, x, inplace: bool = False): if self.p > 0 and (self.training or self.apply_during_inference): return F.dropout(x, p=self.p, training=True, inplace=inplace) else: return x def make_generation_fast_( self, name: str, retain_dropout: bool = False, retain_dropout_modules: Optional[List[str]] = None, **kwargs ): if retain_dropout: if retain_dropout_modules is not None and self.module_name is None: logger.warning( "Cannot enable dropout during inference for module {} " "because module_name was not set".format(name) ) elif ( retain_dropout_modules is None # if None, apply to all modules or self.module_name in retain_dropout_modules ): logger.info( "Enabling dropout during inference for module: {}".format(name) ) self.apply_during_inference = True else: logger.info("Disabling dropout for module: {}".format(name))
1,703
31.769231
83
py
sign-topic
sign-topic-main/fairseq/modules/cross_entropy.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction="mean"): lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) return F.nll_loss( lprobs, target, ignore_index=ignore_index, reduction=reduction, ) try: import xentropy_cuda from apex.contrib import xentropy def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): if logits.device == torch.device("cpu"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction) else: if not getattr(cross_entropy, "_has_logged_once", False): logger.info("using fused cross entropy") cross_entropy._has_logged_once = True half_to_float = logits.dtype == torch.half losses = xentropy.SoftmaxCrossEntropyLoss.apply( logits, target, 0.0, ignore_index, half_to_float, ) if reduction == "sum": return losses.sum() elif reduction == "mean": if ignore_index >= 0: return losses.sum() / target.ne(ignore_index).sum() else: return losses.mean() elif reduction == "none": return losses else: raise NotImplementedError except ImportError: def cross_entropy(logits, target, ignore_index=-100, reduction="mean"): return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
1,871
30.2
82
py
sign-topic
sign-topic-main/fairseq/modules/adaptive_input.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List import torch from fairseq.modules.quant_noise import quant_noise from torch import nn class AdaptiveInput(nn.Module): def __init__( self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int], q_noise: float = 0, qn_block_size: int = 8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert ( vocab_size == cutoff[-1] ), "cannot specify cutoff larger than vocab size" self.cutoff = cutoff self.embedding_dim = output_dim self.padding_idx = padding_idx self.embeddings = nn.ModuleList() for i in range(len(self.cutoff)): prev = self.cutoff[i - 1] if i > 0 else 0 size = self.cutoff[i] - prev dim = int(initial_dim // (factor ** i)) seq = nn.Sequential( nn.Embedding(size, dim, self.padding_idx), quant_noise( nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size ), ) self.embeddings.append(seq) self.padding_idx = None self.padding_idx = padding_idx def init_weights(m): if isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) elif hasattr(m, "weight"): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer("_float_tensor", torch.FloatTensor(1)) def weights_for_band(self, band: int): return self.embeddings[band][0].weight, self.embeddings[band][1].weight def forward(self, input: torch.Tensor): result = self._float_tensor.new(input.shape + (self.embedding_dim,)) for i in range(len(self.cutoff)): mask = input.lt(self.cutoff[i]) if i > 0: mask.mul_(input.ge(self.cutoff[i - 1])) chunk_input = input[mask] - self.cutoff[i - 1] else: chunk_input = input[mask] if mask.any(): result[mask] = self.embeddings[i](chunk_input) return result
2,566
30.691358
82
py
sign-topic
sign-topic-main/fairseq/modules/gumbel_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class GumbelVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, temp, groups, combine_groups, vq_dim, time_first, activation=nn.GELU(), weight_proj_depth=1, weight_proj_factor=1, ): """Vector quantization using gumbel softmax Args: dim: input dimension (channels) num_vars: number of quantized vectors per group temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor) groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1 weight_proj_depth: number of layers (with activation in between) to project input before computing logits weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of projections by this factor """ super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.time_first = time_first assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim)) nn.init.uniform_(self.vars) if weight_proj_depth > 1: def block(input_dim, output_dim): return nn.Sequential(nn.Linear(input_dim, output_dim), activation) inner_dim = self.input_dim * weight_proj_factor self.weight_proj = nn.Sequential( *[ block(self.input_dim if i == 0 else inner_dim, inner_dim) for i in range(weight_proj_depth - 1) ], nn.Linear(inner_dim, groups * num_vars), ) else: self.weight_proj = nn.Linear(self.input_dim, groups * num_vars) nn.init.normal_(self.weight_proj.weight, mean=0, std=1) nn.init.zeros_(self.weight_proj.bias) if isinstance(temp, str): import ast temp = ast.literal_eval(temp) assert len(temp) == 3, f"{temp}, {len(temp)}" self.max_temp, self.min_temp, self.temp_decay = temp self.curr_temp = self.max_temp self.codebook_indices = None def set_num_updates(self, num_updates): self.curr_temp = max( self.max_temp * self.temp_decay ** num_updates, self.min_temp ) def get_codebook_indices(self): if self.codebook_indices is None: from itertools import product p = [range(self.num_vars)] * self.groups inds = list(product(*p)) self.codebook_indices = torch.tensor( inds, dtype=torch.long, device=self.vars.device ).flatten() if not self.combine_groups: self.codebook_indices = self.codebook_indices.view( self.num_vars ** self.groups, -1 ) for b in range(1, self.groups): self.codebook_indices[:, b] += self.num_vars * b self.codebook_indices = self.codebook_indices.flatten() return self.codebook_indices def codebook(self): indices = self.get_codebook_indices() return ( self.vars.squeeze(0) .index_select(0, indices) .view(self.num_vars ** self.groups, -1) ) def sample_from_codebook(self, b, n): indices = self.get_codebook_indices() indices = indices.view(-1, self.groups) cb_size = indices.size(0) assert ( n < cb_size ), f"sample size {n} is greater than size of codebook {cb_size}" sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,)) indices = indices[sample_idx] z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1) return z def to_codebook_index(self, indices): res = indices.new_full(indices.shape[:-1], 0) for i in range(self.groups): exponent = self.groups - i - 1 res += indices[..., i] * (self.num_vars ** exponent) return res def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars * self.groups} if not self.time_first: x = x.transpose(1, 2) bsz, tsz, fsz = x.shape x = x.reshape(-1, fsz) x = self.weight_proj(x) x = x.view(bsz * tsz * self.groups, -1) _, k = x.max(-1) hard_x = ( x.new_zeros(*x.shape) .scatter_(-1, k.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() avg_probs = torch.softmax( x.view(bsz * tsz, self.groups, -1).float(), dim=-1 ).mean(dim=0) result["prob_perplexity"] = torch.exp( -torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1) ).sum() result["temp"] = self.curr_temp if self.training: x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x) else: x = hard_x x = x.view(bsz * tsz, -1) vars = self.vars if self.combine_groups: vars = vars.repeat(1, self.groups, 1) if produce_targets: result["targets"] = ( x.view(bsz * tsz * self.groups, -1) .argmax(dim=-1) .view(bsz, tsz, self.groups) .detach() ) x = x.unsqueeze(-1) * vars x = x.view(bsz * tsz, self.groups, self.num_vars, -1) x = x.sum(-2) x = x.view(bsz, tsz, -1) if not self.time_first: x = x.transpose(1, 2) # BTC -> BCT result["x"] = x return result
6,899
32.823529
117
py
sign-topic
sign-topic-main/fairseq/modules/vggblock.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals from collections.abc import Iterable from itertools import repeat import torch import torch.nn as nn def _pair(v): if isinstance(v, Iterable): assert len(v) == 2, "len(v) != 2" return v return tuple(repeat(v, 2)) def infer_conv_output_dim(conv_op, input_dim, sample_inchannel): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim) # N x C x H x W # N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim x = conv_op(x) # N x C x H x W x = x.transpose(1, 2) # N x H x C x W bsz, seq = x.size()[:2] per_channel_dim = x.size()[3] # bsz: N, seq: H, CxW the rest return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim class VGGBlock(torch.nn.Module): """ VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf Args: in_channels: (int) number of input channels (typically 1) out_channels: (int) number of output channels conv_kernel_size: convolution channels pooling_kernel_size: the size of the pooling window to take a max over num_conv_layers: (int) number of convolution layers input_dim: (int) input dimension conv_stride: the stride of the convolving kernel. Can be a single number or a tuple (sH, sW) Default: 1 padding: implicit paddings on both sides of the input. Can be a single number or a tuple (padH, padW). Default: None layer_norm: (bool) if layer norm is going to be applied. Default: False Shape: Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features) """ def __init__( self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False, ): assert ( input_dim is not None ), "Need input_dim for LayerNorm and infer_conv_output_dim" super(VGGBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_kernel_size = _pair(conv_kernel_size) self.pooling_kernel_size = _pair(pooling_kernel_size) self.num_conv_layers = num_conv_layers self.padding = ( tuple(e // 2 for e in self.conv_kernel_size) if padding is None else _pair(padding) ) self.conv_stride = _pair(conv_stride) self.layers = nn.ModuleList() for layer in range(num_conv_layers): conv_op = nn.Conv2d( in_channels if layer == 0 else out_channels, out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding, ) self.layers.append(conv_op) if layer_norm: conv_output_dim, per_channel_dim = infer_conv_output_dim( conv_op, input_dim, in_channels if layer == 0 else out_channels ) self.layers.append(nn.LayerNorm(per_channel_dim)) input_dim = per_channel_dim self.layers.append(nn.ReLU()) if self.pooling_kernel_size is not None: pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True) self.layers.append(pool_op) self.total_output_dim, self.output_dim = infer_conv_output_dim( pool_op, input_dim, out_channels ) def forward(self, x): for i, _ in enumerate(self.layers): x = self.layers[i](x) return x
4,057
33.683761
88
py
sign-topic
sign-topic-main/fairseq/modules/character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Tuple import torch import torch.nn.functional as F from fairseq.data import Dictionary from torch import nn CHAR_PAD_IDX = 0 CHAR_EOS_IDX = 257 logger = logging.getLogger(__name__) class CharacterTokenEmbedder(torch.nn.Module): def __init__( self, vocab: Dictionary, filters: List[Tuple[int, int]], char_embed_dim: int, word_embed_dim: int, highway_layers: int, max_char_len: int = 50, char_inputs: bool = False, ): super(CharacterTokenEmbedder, self).__init__() self.onnx_trace = False self.embedding_dim = word_embed_dim self.max_char_len = max_char_len self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0) self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim)) self.eos_idx, self.unk_idx = 0, 1 self.char_inputs = char_inputs self.convolutions = nn.ModuleList() for width, out_c in filters: self.convolutions.append( nn.Conv1d(char_embed_dim, out_c, kernel_size=width) ) last_dim = sum(f[1] for f in filters) self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None self.projection = nn.Linear(last_dim, word_embed_dim) assert ( vocab is not None or char_inputs ), "vocab must be set if not using char inputs" self.vocab = None if vocab is not None: self.set_vocab(vocab, max_char_len) self.reset_parameters() def prepare_for_onnx_export_(self): self.onnx_trace = True def set_vocab(self, vocab, max_char_len): word_to_char = torch.LongTensor(len(vocab), max_char_len) truncated = 0 for i in range(len(vocab)): if i < vocab.nspecial: char_idxs = [0] * max_char_len else: chars = vocab[i].encode() # +1 for padding char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars)) if len(char_idxs) > max_char_len: truncated += 1 char_idxs = char_idxs[:max_char_len] word_to_char[i] = torch.LongTensor(char_idxs) if truncated > 0: logger.info( "truncated {} words longer than {} characters".format( truncated, max_char_len ) ) self.vocab = vocab self.word_to_char = word_to_char @property def padding_idx(self): return Dictionary().pad() if self.vocab is None else self.vocab.pad() def reset_parameters(self): nn.init.xavier_normal_(self.char_embeddings.weight) nn.init.xavier_normal_(self.symbol_embeddings) nn.init.xavier_uniform_(self.projection.weight) nn.init.constant_( self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.0 ) nn.init.constant_(self.projection.bias, 0.0) def forward( self, input: torch.Tensor, ): if self.char_inputs: chars = input.view(-1, self.max_char_len) pads = chars[:, 0].eq(CHAR_PAD_IDX) eos = chars[:, 0].eq(CHAR_EOS_IDX) if eos.any(): if self.onnx_trace: chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars) else: chars[eos] = 0 unk = None else: flat_words = input.view(-1) chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as( input ) pads = flat_words.eq(self.vocab.pad()) eos = flat_words.eq(self.vocab.eos()) unk = flat_words.eq(self.vocab.unk()) word_embs = self._convolve(chars) if self.onnx_trace: if pads.any(): word_embs = torch.where( pads.unsqueeze(1), word_embs.new_zeros(1), word_embs ) if eos.any(): word_embs = torch.where( eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs ) if unk is not None and unk.any(): word_embs = torch.where( unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs ) else: if pads.any(): word_embs[pads] = 0 if eos.any(): word_embs[eos] = self.symbol_embeddings[self.eos_idx] if unk is not None and unk.any(): word_embs[unk] = self.symbol_embeddings[self.unk_idx] return word_embs.view(input.size()[:2] + (-1,)) def _convolve( self, char_idxs: torch.Tensor, ): char_embs = self.char_embeddings(char_idxs) char_embs = char_embs.transpose(1, 2) # BTC -> BCT conv_result = [] for conv in self.convolutions: x = conv(char_embs) x, _ = torch.max(x, -1) x = F.relu(x) conv_result.append(x) x = torch.cat(conv_result, dim=-1) if self.highway is not None: x = self.highway(x) x = self.projection(x) return x class Highway(torch.nn.Module): """ A `Highway layer <https://arxiv.org/abs/1505.00387>`_. Adopted from the AllenNLP implementation. """ def __init__(self, input_dim: int, num_layers: int = 1): super(Highway, self).__init__() self.input_dim = input_dim self.layers = nn.ModuleList( [nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)] ) self.activation = nn.ReLU() self.reset_parameters() def reset_parameters(self): for layer in self.layers: # As per comment in AllenNLP: # We should bias the highway layer to just carry its input forward. We do that by # setting the bias on `B(x)` to be positive, because that means `g` will be biased to # be high, so we will carry the input forward. The bias on `B(x)` is the second half # of the bias vector in each Linear layer. nn.init.constant_(layer.bias[self.input_dim :], 1) nn.init.constant_(layer.bias[: self.input_dim], 0) nn.init.xavier_normal_(layer.weight) def forward(self, x: torch.Tensor): for layer in self.layers: projection = layer(x) proj_x, gate = projection.chunk(2, dim=-1) proj_x = self.activation(proj_x) gate = torch.sigmoid(gate) x = gate * x + (gate.new_tensor([1]) - gate) * proj_x return x
6,974
31.44186
97
py
sign-topic
sign-topic-main/fairseq/modules/unfold.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn.functional as F def unfold1d(x, kernel_size, padding_l, pad_value=0): """unfold T x B x C to T x B x C x K""" if kernel_size > 1: T, B, C = x.size() x = F.pad( x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value ) x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C)) else: x = x.unsqueeze(3) return x
596
28.85
84
py
sign-topic
sign-topic-main/fairseq/modules/fp32_group_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Layer norm done in fp32 (for fp16 training) """ import torch.nn as nn import torch.nn.functional as F class Fp32GroupNorm(nn.GroupNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.group_norm( input.float(), self.num_groups, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
727
27
69
py
sign-topic
sign-topic-main/fairseq/modules/adaptive_softmax.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import operator import torch import torch.nn.functional as F from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import nn class TiedLinear(nn.Module): def __init__(self, weight, transpose): super().__init__() self.weight = weight self.transpose = transpose def forward(self, input): return F.linear(input, self.weight.t() if self.transpose else self.weight) class TiedHeadModule(nn.Module): def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size): super().__init__() tied_emb, _ = weights self.num_words, emb_dim = tied_emb.size() self.word_proj = quant_noise( TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size ) if input_dim != emb_dim: self.word_proj = nn.Sequential( quant_noise( nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size ), self.word_proj, ) self.class_proj = quant_noise( nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size ) self.out_dim = self.num_words + num_classes self.register_buffer("_float_tensor", torch.FloatTensor(1)) def forward(self, input): inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1) out = self._float_tensor.new(inp_sz, self.out_dim) out[:, : self.num_words] = self.word_proj(input.view(inp_sz, -1)) out[:, self.num_words :] = self.class_proj(input.view(inp_sz, -1)) return out class AdaptiveSoftmax(nn.Module): """ This is an implementation of the efficient softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309). """ def __init__( self, vocab_size, input_dim, cutoff, dropout, factor=4.0, adaptive_inputs=None, tie_proj=False, q_noise=0, qn_block_size=8, ): super().__init__() if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] else: assert ( vocab_size == cutoff[-1] ), "cannot specify cutoff larger than vocab size" output_dim = cutoff[0] + len(cutoff) - 1 self.vocab_size = vocab_size self.cutoff = cutoff self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.input_dim = input_dim self.factor = factor self.q_noise = q_noise self.qn_block_size = qn_block_size self.lsm = nn.LogSoftmax(dim=1) if adaptive_inputs is not None: self.head = TiedHeadModule( adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size, ) else: self.head = quant_noise( nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size, ) self._make_tail(adaptive_inputs, tie_proj) def init_weights(m): if ( hasattr(m, "weight") and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule) ): nn.init.xavier_uniform_(m.weight) self.apply(init_weights) self.register_buffer("version", torch.LongTensor([1])) def _make_tail(self, adaptive_inputs=None, tie_proj=False): self.tail = nn.ModuleList() for i in range(len(self.cutoff) - 1): dim = int(self.input_dim // self.factor ** (i + 1)) tied_emb, tied_proj = ( adaptive_inputs.weights_for_band(i + 1) if adaptive_inputs is not None else (None, None) ) if tied_proj is not None: if tie_proj: proj = quant_noise( TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size, ) else: proj = quant_noise( nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size, ) if tied_emb is None: out_proj = nn.Linear( dim, self.cutoff[i + 1] - self.cutoff[i], bias=False ) else: out_proj = TiedLinear(tied_emb, transpose=False) m = nn.Sequential( proj, nn.Dropout(self.dropout_module.p), quant_noise(out_proj, self.q_noise, self.qn_block_size), ) self.tail.append(m) def upgrade_state_dict_named(self, state_dict, name): version_name = name + ".version" if version_name not in state_dict: raise Exception("This version of the model is no longer supported") def adapt_target(self, target): """ In order to be efficient, the AdaptiveSoftMax does not compute the scores for all the word of the vocabulary for all the examples. It is thus necessary to call the method adapt_target of the AdaptiveSoftMax layer inside each forward pass. """ target = target.view(-1) new_target = [target.clone()] target_idxs = [] for i in range(len(self.cutoff) - 1): mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) new_target[0][mask] = self.cutoff[0] + i if mask.any(): target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1)) new_target.append(target[mask].add(-self.cutoff[i])) else: target_idxs.append(None) new_target.append(None) return new_target, target_idxs def forward(self, input, target): """ Args: input: (b x t x d) target: (b x t) Returns: 2 lists: output for each cutoff section and new targets by cut off """ input = input.contiguous().view(-1, input.size(-1)) input = self.dropout_module(input) new_target, target_idxs = self.adapt_target(target) output = [self.head(input)] for i in range(len(target_idxs)): if target_idxs[i] is not None: output.append(self.tail[i](input.index_select(0, target_idxs[i]))) else: output.append(None) return output, new_target def get_log_prob(self, input, target): """ Computes the log probabilities for all the words of the vocabulary, given a 2D tensor of hidden vectors. """ bsz, length, dim = input.size() input = input.contiguous().view(-1, dim) if target is not None: _, target_idxs = self.adapt_target(target) else: target_idxs = None head_y = self.head(input) log_probs = head_y.new_zeros(input.size(0), self.vocab_size) head_sz = self.cutoff[0] + len(self.tail) log_probs[:, :head_sz] = self.lsm(head_y) tail_priors = log_probs[:, self.cutoff[0] : head_sz].clone() for i in range(len(self.tail)): start = self.cutoff[i] end = self.cutoff[i + 1] if target_idxs is None: tail_out = log_probs[:, start:end] tail_out.copy_(self.tail[i](input)) log_probs[:, start:end] = self.lsm(tail_out).add_( tail_priors[:, i, None] ) elif target_idxs[i] is not None: idxs = target_idxs[i] tail_out = log_probs[idxs, start:end] tail_out.copy_(self.tail[i](input[idxs])) log_probs[idxs, start:end] = self.lsm(tail_out).add_( tail_priors[idxs, i, None] ) log_probs = log_probs.view(bsz, length, -1) return log_probs
8,789
31.67658
85
py
sign-topic
sign-topic-main/fairseq/modules/conformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from typing import Optional from fairseq.modules import ( LayerNorm, MultiheadAttention, ESPNETMultiHeadedAttention, RelPositionMultiHeadedAttention, RotaryPositionMultiHeadedAttention, ) from fairseq.utils import get_activation_fn class ConvolutionModule(torch.nn.Module): """Convolution block used in the conformer block""" def __init__( self, embed_dim, channels, depthwise_kernel_size, dropout, activation_fn="swish", bias=False, export=False, ): """ Args: embed_dim: Embedding dimension channels: Number of channels in depthwise conv layers depthwise_kernel_size: Depthwise conv layer kernel size dropout: dropout value activation_fn: Activation function to use after depthwise convolution kernel bias: If bias should be added to conv layers export: If layernorm should be exported to jit """ super(ConvolutionModule, self).__init__() assert ( depthwise_kernel_size - 1 ) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding" self.layer_norm = LayerNorm(embed_dim, export=export) self.pointwise_conv1 = torch.nn.Conv1d( embed_dim, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias, ) self.glu = torch.nn.GLU(dim=1) self.depthwise_conv = torch.nn.Conv1d( channels, channels, depthwise_kernel_size, stride=1, padding=(depthwise_kernel_size - 1) // 2, groups=channels, bias=bias, ) self.batch_norm = torch.nn.BatchNorm1d(channels) self.activation = get_activation_fn(activation_fn)(channels) self.pointwise_conv2 = torch.nn.Conv1d( channels, embed_dim, kernel_size=1, stride=1, padding=0, bias=bias, ) self.dropout = torch.nn.Dropout(dropout) def forward(self, x): """ Args: x: Input of shape B X T X C Returns: Tensor of shape B X T X C """ x = self.layer_norm(x) # exchange the temporal dimension and the feature dimension x = x.transpose(1, 2) # GLU mechanism x = self.pointwise_conv1(x) # (batch, 2*channel, dim) x = self.glu(x) # (batch, channel, dim) # 1D Depthwise Conv x = self.depthwise_conv(x) x = self.batch_norm(x) x = self.activation(x) x = self.pointwise_conv2(x) x = self.dropout(x) return x.transpose(1, 2) class FeedForwardModule(torch.nn.Module): """Positionwise feed forward layer used in conformer""" def __init__( self, input_feat, hidden_units, dropout1, dropout2, activation_fn="swish", bias=True, ): """ Args: input_feat: Input feature dimension hidden_units: Hidden unit dimension dropout1: dropout value for layer1 dropout2: dropout value for layer2 activation_fn: Name of activation function bias: If linear layers should have bias """ super(FeedForwardModule, self).__init__() self.layer_norm = LayerNorm(input_feat) self.w_1 = torch.nn.Linear(input_feat, hidden_units, bias=bias) self.w_2 = torch.nn.Linear(hidden_units, input_feat, bias=bias) self.dropout1 = torch.nn.Dropout(dropout1) self.dropout2 = torch.nn.Dropout(dropout2) self.activation = get_activation_fn(activation_fn)(hidden_units) def forward(self, x): """ Args: x: Input Tensor of shape T X B X C Returns: Tensor of shape T X B X C """ x = self.layer_norm(x) x = self.w_1(x) x = self.activation(x) x = self.dropout1(x) x = self.w_2(x) return self.dropout2(x) class ConformerEncoderLayer(torch.nn.Module): """Conformer block based on https://arxiv.org/abs/2005.08100. We currently don't support relative positional encoding in MHA""" def __init__( self, embed_dim, ffn_embed_dim, attention_heads, dropout, use_fp16, depthwise_conv_kernel_size=31, activation_fn="swish", attn_type=None, pos_enc_type="abs", ): """ Args: embed_dim: Input embedding dimension ffn_embed_dim: FFN layer dimension attention_heads: Number of attention heads in MHA dropout: dropout value depthwise_conv_kernel_size: Size of kernel in depthwise conv layer in convolution module activation_fn: Activation function name to use in convulation block and feed forward block attn_type: MHA implementation from ESPNET vs fairseq pos_enc_type: Positional encoding type - abs, rope, rel_pos """ self.pos_enc_type = pos_enc_type super(ConformerEncoderLayer, self).__init__() self.ffn1 = FeedForwardModule( embed_dim, ffn_embed_dim, dropout, dropout, ) self.self_attn_layer_norm = LayerNorm(embed_dim, export=False) self.self_attn_dropout = torch.nn.Dropout(dropout) if attn_type == "espnet": if self.pos_enc_type == "rel_pos": self.self_attn = RelPositionMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, ) elif self.pos_enc_type == "rope": self.self_attn = RotaryPositionMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, precision=use_fp16 ) elif self.pos_enc_type == "abs": self.self_attn = ESPNETMultiHeadedAttention( embed_dim, attention_heads, dropout=dropout, ) else: raise Exception(f"Unsupported attention type {self.pos_enc_type}") else: # Default to fairseq MHA self.self_attn = MultiheadAttention( embed_dim, attention_heads, dropout=dropout, ) self.conv_module = ConvolutionModule( embed_dim=embed_dim, channels=embed_dim, depthwise_kernel_size=depthwise_conv_kernel_size, dropout=dropout, activation_fn=activation_fn, ) self.ffn2 = FeedForwardModule( embed_dim, ffn_embed_dim, dropout, dropout, activation_fn=activation_fn, ) self.final_layer_norm = LayerNorm(embed_dim, export=False) def forward( self, x, encoder_padding_mask: Optional[torch.Tensor], position_emb: Optional[torch.Tensor] = None, ): """ Args: x: Tensor of shape T X B X C encoder_padding_mask: Optional mask tensor positions: Returns: Tensor of shape T X B X C """ residual = x x = self.ffn1(x) x = x * 0.5 + residual residual = x x = self.self_attn_layer_norm(x) if self.pos_enc_type == "rel_pos": x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, pos_emb=position_emb, need_weights=False, ) else: x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, ) x = self.self_attn_dropout(x) x = x + residual residual = x # TBC to BTC x = x.transpose(0, 1) x = self.conv_module(x) # BTC to TBC x = x.transpose(0, 1) x = residual + x residual = x x = self.ffn2(x) x = x * 0.5 + residual x = self.final_layer_norm(x) return x, attn class ConformerWav2Vec2EncoderLayer(ConformerEncoderLayer): """Encoder layer for Wav2vec2 encoder""" def forward( self, x: torch.Tensor, self_attn_mask: torch.Tensor = None, self_attn_padding_mask: torch.Tensor = None, need_weights: bool = False, att_args=None, position_emb=None, ): return super().forward(x, self_attn_padding_mask, position_emb)
9,087
29.599327
131
py
sign-topic
sign-topic-main/fairseq/modules/espnet_multihead_attention.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Multi-Head Attention layer definition.""" import math import torch from torch import nn from fairseq.modules.rotary_positional_embedding import ( RotaryPositionalEmbedding, apply_rotary_pos_emb, ) class ESPNETMultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head: The number of heads. n_feat: The number of features. dropout: Dropout rate. """ def __init__(self, n_feat, n_head, dropout): """Construct an MultiHeadedAttention object.""" super(ESPNETMultiHeadedAttention, self).__init__() assert n_feat % n_head == 0 # We assume d_v always equals d_k self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward_qkv(self, query, key, value, **kwargs): """Transform query, key and value. Args: query: Query tensor B X T1 X C key: Key tensor B X T2 X C value: Value tensor B X T2 X C Returns: torch.Tensor: Transformed query tensor B X n_head X T1 X d_k torch.Tensor: Transformed key tensor B X n_head X T2 X d_k torch.Tensor: Transformed value tensor B X n_head X T2 X d_k """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) # (batch, head, time1, d_k) k = k.transpose(1, 2) # (batch, head, time2, d_k) v = v.transpose(1, 2) # (batch, head, time2, d_k) return q, k, v def forward_attention(self, value, scores, mask): """Compute attention context vector. Args: value: Transformed value B X n_head X T2 X d_k. scores: Attention score B X n_head X T1 X T2 mask: Mask T2 X B Returns: torch.Tensor: Transformed value B X T1 X d_model weighted by the attention score B X T1 X T2 """ n_batch = value.size(0) if mask is not None: scores = scores.masked_fill( mask.unsqueeze(1).unsqueeze(2).to(bool), float("-inf"), # (batch, head, time1, time2) ) self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) else: self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) p_attn = self.dropout(self.attn) x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) x = ( x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) ) # (batch, time1, d_model) return self.linear_out(x) # (batch, time1, d_model) def forward(self, query, key, value, key_padding_mask=None, **kwargs): """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor T X B X C key (torch.Tensor): Key tensor T X B X C value (torch.Tensor): Value tensor T X B X C mask (torch.Tensor): Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X D. """ query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None class RelPositionMultiHeadedAttention(ESPNETMultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head: The number of heads. n_feat: The number of features. dropout: Dropout rate. zero_triu: Whether to zero the upper triangular part of attention matrix. """ def __init__(self, n_feat, n_head, dropout, zero_triu=False): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_feat, n_head, dropout) self.zero_triu = zero_triu # linear transformation for positional encoding self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://arxiv.org/abs/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x): """Compute relative positional encoding. Args: x: Input tensor B X n_head X T X 2T-1 Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) x = x_padded[:, :, 1:].view_as(x)[ :, :, :, : x.size(-1) // 2 + 1 ] # only keep the positions from 0 to time2 if self.zero_triu: ones = torch.ones((x.size(2), x.size(3)), device=x.device) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, query, key, value, pos_emb, key_padding_mask=None, **kwargs): """Compute scaled dot product attention. Args: query: Query tensor T X B X C key: Key tensor T X B X C value: Value tensor T X B X C pos_emb: Positional embedding tensor B X 2T-1 X C key_padding_mask: Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X C. """ query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) pos_emb = pos_emb.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) # (batch, time1, head, d_k) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) # (batch, head, time1, d_k) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) # (batch, head, time1, d_k) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) # compute attention score # first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # (batch, head, time1, time2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) # compute matrix b and matrix d # (batch, head, time1, 2*time1-1) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) matrix_bd = self.rel_shift(matrix_bd) scores = (matrix_ac + matrix_bd) / math.sqrt( self.d_k ) # (batch, head, time1, time2) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None class RotaryPositionMultiHeadedAttention(ESPNETMultiHeadedAttention): def __init__( self, n_feat, n_head, dropout, precision, rotary_emd_base=10000, ): """Construct an RotaryPositionMultiHeadedAttention object.""" super().__init__(n_feat, n_head, dropout) precision = torch.float self.rotary_ndims = self.d_k # also try self.d_k//2 if precision == "fp16": precision = torch.half self.rotary_emb = RotaryPositionalEmbedding( self.rotary_ndims, base=rotary_emd_base, precision=precision ) def forward(self, query, key, value, key_padding_mask=None, **kwargs): """Compute rotary position attention. Args: query: Query tensor T X B X C key: Key tensor T X B X C value: Value tensor T X B X C key_padding_mask: Mask tensor T X B Returns: torch.Tensor: Output tensor T X B X D. Notes: Assumes self attn """ T, B, C = value.size() query = query.view(T, B, self.h, self.d_k) key = key.view(T, B, self.h, self.d_k) value = value.view(T, B, self.h, self.d_k) cos, sin = self.rotary_emb(value, seq_len=T) query, key = apply_rotary_pos_emb( query, key, cos, sin, offset=0 ) # offset is based on layer_past query = query.view(T, B, self.h * self.d_k) key = key.view(T, B, self.h * self.d_k) value = value.view(T, B, self.h * self.d_k) # TBD to BTD query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) scores = self.forward_attention(v, scores, key_padding_mask) scores = scores.transpose(0, 1) return scores, None
9,673
36.937255
84
py
sign-topic
sign-topic-main/fairseq/modules/rotary_positional_embedding.py
import torch class RotaryPositionalEmbedding(torch.nn.Module): def __init__(self, dim, base=10000, precision=torch.half): """Rotary positional embedding Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf Args: dim: Dimension of embedding base: Base value for exponential precision: precision to use for numerical values """ super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) self.seq_len_cached = None self.cos_cached = None self.sin_cached = None self.precision = precision def forward(self, x, seq_len=None): """ Args: x: Input x with T X B X C seq_len: Sequence length of input x """ if seq_len != self.seq_len_cached: self.seq_len_cached = seq_len t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) freqs = torch.einsum("i,j->ij", t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.cos_cached = emb.cos()[:, None, None, :] self.sin_cached = emb.sin()[:, None, None, :] return self.cos_cached, self.sin_cached # rotary pos emb helpers: def rotate_half(x): x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] return torch.cat( (-x2, x1), dim=x1.ndim - 1 ) # dim=-1 triggers a bug in earlier torch versions def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): cos, sin = ( cos[offset : q.shape[0] + offset, ...], sin[offset : q.shape[0] + offset, ...], ) return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
1,851
34.615385
81
py
sign-topic
sign-topic-main/fairseq/modules/location_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch import torch.nn.functional as F class LocationAttention(nn.Module): """ Attention-Based Models for Speech Recognition https://arxiv.org/pdf/1506.07503.pdf :param int encoder_dim: # projection-units of encoder :param int decoder_dim: # units of decoder :param int attn_dim: attention dimension :param int conv_dim: # channels of attention convolution :param int conv_kernel_size: filter size of attention convolution """ def __init__( self, attn_dim, encoder_dim, decoder_dim, attn_state_kernel_size, conv_dim, conv_kernel_size, scaling=2.0, ): super(LocationAttention, self).__init__() self.attn_dim = attn_dim self.decoder_dim = decoder_dim self.scaling = scaling self.proj_enc = nn.Linear(encoder_dim, attn_dim) self.proj_dec = nn.Linear(decoder_dim, attn_dim, bias=False) self.proj_attn = nn.Linear(conv_dim, attn_dim, bias=False) self.conv = nn.Conv1d( attn_state_kernel_size, conv_dim, 2 * conv_kernel_size + 1, padding=conv_kernel_size, bias=False, ) self.proj_out = nn.Sequential(nn.Tanh(), nn.Linear(attn_dim, 1)) self.proj_enc_out = None # cache def clear_cache(self): self.proj_enc_out = None def forward(self, encoder_out, encoder_padding_mask, decoder_h, attn_state): """ :param torch.Tensor encoder_out: padded encoder hidden state B x T x D :param torch.Tensor encoder_padding_mask: encoder padding mask :param torch.Tensor decoder_h: decoder hidden state B x D :param torch.Tensor attn_prev: previous attention weight B x K x T :return: attention weighted encoder state (B, D) :rtype: torch.Tensor :return: previous attention weights (B x T) :rtype: torch.Tensor """ bsz, seq_len, _ = encoder_out.size() if self.proj_enc_out is None: self.proj_enc_out = self.proj_enc(encoder_out) # B x K x T -> B x C x T attn = self.conv(attn_state) # B x C x T -> B x T x C -> B x T x D attn = self.proj_attn(attn.transpose(1, 2)) if decoder_h is None: decoder_h = encoder_out.new_zeros(bsz, self.decoder_dim) dec_h = self.proj_dec(decoder_h).view(bsz, 1, self.attn_dim) out = self.proj_out(attn + self.proj_enc_out + dec_h).squeeze(2) out.masked_fill_(encoder_padding_mask, -float("inf")) w = F.softmax(self.scaling * out, dim=1) c = torch.sum(encoder_out * w.view(bsz, seq_len, 1), dim=1) return c, w
2,909
33.642857
80
py
sign-topic
sign-topic-main/fairseq/modules/conv_tbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from torch.nn.modules.utils import _single from torch import Tensor class ConvTBC(torch.nn.Module): """1D convolution over an input of shape (time x batch x channel) The implementation uses gemm to perform the convolution. This implementation is faster than cuDNN for small kernel sizes. """ def __init__(self, in_channels, out_channels, kernel_size, padding=0): super(ConvTBC, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _single(kernel_size) self.padding = _single(padding) self.weight = torch.nn.Parameter( torch.Tensor(self.kernel_size[0], in_channels, out_channels) ) self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.weight) nn.init.zeros_(self.bias) def conv_tbc(self, input: Tensor): return torch.conv_tbc( input.contiguous(), self.weight, self.bias, self.padding[0] ) def forward(self, input: Tensor): return self.conv_tbc(input) def __repr__(self): s = ( "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" ", padding={padding}" ) if self.bias is None: s += ", bias=False" s += ")" return s.format(name=self.__class__.__name__, **self.__dict__)
1,683
30.185185
80
py
sign-topic
sign-topic-main/fairseq/modules/transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.modules import LayerNorm, MultiheadAttention from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.quant_noise import quant_noise from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) class TransformerEncoderLayerBase(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.encoder.normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, cfg): super().__init__() self.cfg = cfg self.embed_dim = cfg.encoder.embed_dim self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.self_attn = self.build_self_attention(self.embed_dim, cfg) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.encoder.normalize_before self.fc1 = self.build_fc1( self.embed_dim, cfg.encoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.encoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise( nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size ) def _get_fc_rank(self, remove_num: int) -> List[int]: f1_filter_param = [] for i in range(self.fc1.out_features): f1_filter_param.append(torch.sum(torch.abs(self.fc1.weight[i])) + torch.sum(torch.abs(self.fc2.weight[:, i])) + torch.abs(self.fc1.bias[i])) return sorted(range(len(f1_filter_param)), key=lambda k: f1_filter_param[k], reverse=False)[0:remove_num] def _prune_fc_layer(self, remove_index: List[int]): new_fc1_weight = [] new_fc1_bias = [] for i in range(self.fc1.out_features): if i not in remove_index: new_fc1_weight.append(self.fc1.weight[i]) new_fc1_bias.append(self.fc1.bias[i]) new_fc1_weight = torch.stack(new_fc1_weight).detach() new_fc1_weight.requires_grad = True new_fc1_bias = torch.stack(new_fc1_bias).detach() new_fc1_bias.requires_grad = True self.fc1 = quant_noise( nn.Linear(self.fc1.in_features, self.fc1.out_features - len(remove_index)), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc1.weight = torch.nn.Parameter(new_fc1_weight) self.fc1.bias = torch.nn.Parameter(new_fc1_bias) new_fc2_weight = [] new_fc2_bias = [] for i in range(self.fc2.in_features): if i not in remove_index: new_fc2_weight.append(self.fc2.weight[:, i]) new_fc2_bias = self.fc2.bias.detach() new_fc2_weight = torch.stack(new_fc2_weight, dim=-1).detach() new_fc2_weight.requires_grad = True new_fc2_bias = self.fc2.bias.detach() new_fc2_bias.requires_grad = True self.fc2 = quant_noise( nn.Linear(self.fc2.in_features - len(remove_index), self.fc2.out_features), p=self.quant_noise, block_size=self.quant_noise_block_size, ) self.fc2.weight = torch.nn.Parameter(new_fc2_weight) self.fc2.bias = torch.nn.Parameter(new_fc2_bias) def build_self_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.encoder.attention_heads, dropout=cfg.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def residual_connection(self, x, residual): return residual + x def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward( self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, seq_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`, where `tgt_len` is the length of output and `src_len` is the length of input, though here both are equal to `seq_len`. `attn_mask[tgt_i, src_j] = 1` means that when calculating the embedding for `tgt_i`, we exclude (mask out) `src_j`. This is useful for strided self-attention. Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters if attn_mask is not None: attn_mask = attn_mask.masked_fill( attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4 ) residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, _ = self.self_attn( query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, need_weights=False, # need_weights=True, attn_mask=attn_mask, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) return x # backward compatible with the legacy argparse format class TransformerEncoderLayer(TransformerEncoderLayerBase): def __init__(self, args): super().__init__(TransformerConfig.from_namespace(args)) self.args = args def build_self_attention(self, embed_dim, args): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args) ) class TransformerDecoderLayerBase(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *cfg.decoder.normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__() self.embed_dim = cfg.decoder.embed_dim self.dropout_module = FairseqDropout( cfg.dropout, module_name=self.__class__.__name__ ) self.quant_noise = cfg.quant_noise.pq self.quant_noise_block_size = cfg.quant_noise.pq_block_size self.cross_self_attention = cfg.cross_self_attention self.self_attn = self.build_self_attention( self.embed_dim, cfg, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.attn_ln = ( LayerNorm(self.embed_dim) if utils.safe_getattr(cfg, "scale_attn", False) else None ) self.nh = self.self_attn.num_heads self.head_dim = self.self_attn.head_dim scale_heads = utils.safe_getattr(cfg, "scale_heads", False) self.c_attn = ( nn.Parameter(torch.ones((self.nh,)), requires_grad=True) if scale_heads else None ) self.activation_fn = utils.get_activation_fn(activation=cfg.activation_fn) activation_dropout_p = cfg.activation_dropout if activation_dropout_p == 0: # for backwards compatibility with models that use cfg.relu_dropout activation_dropout_p = cfg.relu_dropout or 0 self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = cfg.decoder.normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = self.build_encoder_attention(self.embed_dim, cfg) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.ffn_layernorm = ( LayerNorm(cfg.decoder.ffn_embed_dim) if utils.safe_getattr(cfg, "scale_fc", False) else None ) self.w_resid = ( nn.Parameter( torch.ones( self.embed_dim, ), requires_grad=True, ) if utils.safe_getattr(cfg, "scale_resids", False) else None ) self.fc1 = self.build_fc1( self.embed_dim, cfg.decoder.ffn_embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.fc2 = self.build_fc2( cfg.decoder.ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size, ) self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export) self.need_attn = True self.onnx_trace = False def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def build_self_attention( self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False ): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, dropout=cfg.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=not cfg.cross_self_attention, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def build_encoder_attention(self, embed_dim, cfg): return MultiheadAttention( embed_dim, cfg.decoder.attention_heads, kdim=cfg.encoder.embed_dim, vdim=cfg.encoder.embed_dim, dropout=cfg.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, ) def prepare_for_onnx_export_(self): self.onnx_trace = True def residual_connection(self, x, residual): return residual + x def forward( self, x, encoder_out: Optional[torch.Tensor] = None, encoder_padding_mask: Optional[torch.Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, prev_self_attn_state: Optional[List[torch.Tensor]] = None, prev_attn_state: Optional[List[torch.Tensor]] = None, self_attn_mask: Optional[torch.Tensor] = None, self_attn_padding_mask: Optional[torch.Tensor] = None, need_attn: bool = False, need_head_weights: bool = False, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor, optional): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. need_attn (bool, optional): return attention weights need_head_weights (bool, optional): return attention weights for each head (default: return average over heads). Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if prev_self_attn_state is not None: prev_key, prev_value = prev_self_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_self_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] assert incremental_state is not None self.self_attn._set_input_buffer(incremental_state, saved_state) _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) if self.cross_self_attention and not ( incremental_state is not None and _self_attn_input_buffer is not None and "prev_key" in _self_attn_input_buffer ): if self_attn_mask is not None: assert encoder_out is not None self_attn_mask = torch.cat( (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 ) if self_attn_padding_mask is not None: if encoder_padding_mask is None: assert encoder_out is not None encoder_padding_mask = self_attn_padding_mask.new_zeros( encoder_out.size(1), encoder_out.size(0) ) self_attn_padding_mask = torch.cat( (encoder_padding_mask, self_attn_padding_mask), dim=1 ) assert encoder_out is not None y = torch.cat((encoder_out, x), dim=0) else: y = x x, attn = self.self_attn( query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) if self.c_attn is not None: tgt_len, bsz = x.size(0), x.size(1) x = x.view(tgt_len, bsz, self.nh, self.head_dim) x = torch.einsum("tbhd,h->tbhd", x, self.c_attn) x = x.reshape(tgt_len, bsz, self.embed_dim) if self.attn_ln is not None: x = self.attn_ln(x) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.self_attn_layer_norm(x) if self.encoder_attn is not None and encoder_out is not None: residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) if prev_attn_state is not None: prev_key, prev_value = prev_attn_state[:2] saved_state: Dict[str, Optional[Tensor]] = { "prev_key": prev_key, "prev_value": prev_value, } if len(prev_attn_state) >= 3: saved_state["prev_key_padding_mask"] = prev_attn_state[2] assert incremental_state is not None self.encoder_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=need_attn or (not self.training and self.need_attn), need_head_weights=need_head_weights, ) x = self.dropout_module(x) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = self.dropout_module(x) if self.w_resid is not None: residual = torch.mul(self.w_resid, residual) x = self.residual_connection(x, residual) if not self.normalize_before: x = self.final_layer_norm(x) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) assert saved_state is not None if self_attn_padding_mask is not None: self_attn_state = [ saved_state["prev_key"], saved_state["prev_value"], saved_state["prev_key_padding_mask"], ] else: self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] return x, attn, self_attn_state return x, attn, None def make_generation_fast_(self, need_attn: bool = False, **kwargs): self.need_attn = need_attn # backward compatible with the legacy argparse format class TransformerDecoderLayer(TransformerDecoderLayerBase): def __init__( self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False ): super().__init__( TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.args = args def build_self_attention( self, embed_dim, args, add_bias_kv=False, add_zero_attn=False ): return super().build_self_attention( embed_dim, TransformerConfig.from_namespace(args), add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) def build_encoder_attention(self, embed_dim, args): return super().build_encoder_attention( embed_dim, TransformerConfig.from_namespace(args), )
21,295
37.790528
152
py
sign-topic
sign-topic-main/fairseq/modules/positional_encoding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import math import torch class PositionalEncoding(nn.Module): """Positional encoding. Args: d_model: Embedding dimension. dropout_rate: Dropout rate. max_len: Maximum input length. reverse: Whether to reverse the input position. """ def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.reverse = reverse self.xscale = math.sqrt(self.d_model) self.dropout = nn.Dropout(p=dropout_rate) self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) def extend_pe(self, x): """Reset the positional encodings.""" if self.pe is not None: if self.pe.size(1) >= x.size(1): if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = torch.zeros(x.size(1), self.d_model) if self.reverse: position = torch.arange( x.size(1) - 1, -1, -1.0, dtype=torch.float32 ).unsqueeze(1) else: position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: torch.Tensor): """Add positional encoding. Args: x (torch.Tensor): Input tensor B X T X C Returns: torch.Tensor: Encoded tensor B X T X C """ self.extend_pe(x) x = x * self.xscale + self.pe[:, : x.size(1)] return self.dropout(x) class RelPositionalEncoding(nn.Module): """Relative positional encoding module (new implementation). Args: d_model: Embedding dimension. dropout_rate: Dropout rate. max_len: Maximum input length. """ def __init__(self, max_len, d_model): """Construct an PositionalEncoding object.""" super(RelPositionalEncoding, self).__init__() self.d_model = d_model self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) def extend_pe(self, x): """Reset the positional encodings.""" if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 if self.pe.size(1) >= x.size(1) * 2 - 1: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return # Suppose `i` means to the position of query vecotr and `j` means the # position of key vector. We use position relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i<j). pe_positive = torch.zeros(x.size(1), self.d_model) pe_negative = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe_positive[:, 0::2] = torch.sin(position * div_term) pe_positive[:, 1::2] = torch.cos(position * div_term) pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) # Reserve the order of positive indices and concat both positive and # negative indices. This is used to support the shifting trick # as in https://arxiv.org/abs/1901.02860 pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) pe_negative = pe_negative[1:].unsqueeze(0) pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: torch.Tensor): """Add positional encoding. Args: x : Input tensor T X B X C. Returns: torch.Tensor: Encoded tensor T X B X C. """ x = x.transpose(0, 1) # Change TBC to BTC self.extend_pe(x) pos_emb = self.pe[ :, self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1), ] pos_emb = pos_emb.transpose(0, 1) # change to TBC return pos_emb
4,950
37.084615
84
py
sign-topic
sign-topic-main/fairseq/modules/beamable_mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn class BeamableMM(nn.Module): """This module provides an optimized MM for beam decoding with attention. It leverage the fact that the source-side of the input is replicated beam times and the target-side of the input is of width one. This layer speeds up inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. """ def __init__(self, beam_size=None): super(BeamableMM, self).__init__() self.beam_size = beam_size def forward(self, input1, input2): if ( not self.training and self.beam_size is not None # test mode and input1.dim() == 3 # beam size is set and input1.size(1) # only support batched input == 1 # single time step update ): bsz, beam = input1.size(0), self.beam_size # bsz x 1 x nhu --> bsz/beam x beam x nhu input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu input2 = input2.unfold(0, beam, beam)[:, :, :, 0] # use non batched operation if bsz = beam if input1.size(0) == 1: output = torch.mm(input1[0, :, :], input2[0, :, :]) else: output = input1.bmm(input2) return output.view(bsz, 1, -1) else: return input1.bmm(input2) def set_beam_size(self, beam_size): self.beam_size = beam_size
1,763
34.28
80
py
sign-topic
sign-topic-main/fairseq/modules/layer_norm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F try: from apex.normalization import FusedLayerNorm as _FusedLayerNorm has_fused_layernorm = True class FusedLayerNorm(_FusedLayerNorm): @torch.jit.unused def forward(self, x): if not x.is_cuda: return super().forward(x) else: with torch.cuda.device(x.device): return super().forward(x) except ImportError: has_fused_layernorm = False def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): if torch.jit.is_scripting() or torch.jit.is_tracing(): export = True if not export and torch.cuda.is_available() and has_fused_layernorm: return FusedLayerNorm(normalized_shape, eps, elementwise_affine) return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm( input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps, ) return output.type_as(input)
1,524
30.122449
81
py
sign-topic
sign-topic-main/fairseq/modules/kmeans_vector_quantizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from fairseq.modules import Fp32GroupNorm class KmeansVectorQuantizer(nn.Module): def __init__( self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25 ): """Vector quantization using straight pass-through estimator (i.e. kmeans) Args: dim: input dimension (channels) num_vars: number of quantized vectors per group groups: number of groups for vector quantization combine_groups: whether to use the vectors for all groups vq_dim: dimensionality of the resulting quantized vector time_first: if true, expect input in BxTxC format, otherwise in BxCxT gamma: commitment loss coefficient """ super().__init__() self.groups = groups self.combine_groups = combine_groups self.input_dim = dim self.num_vars = num_vars self.vq_dim = vq_dim self.time_first = time_first assert ( vq_dim % groups == 0 ), f"dim {vq_dim} must be divisible by groups {groups} for concatenation" self.var_dim = vq_dim // groups num_groups = groups if not combine_groups else 1 self.embedding = nn.Parameter( 0.01 * torch.randn(num_vars, num_groups, self.var_dim) ) self.projection = nn.Sequential( nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False), Fp32GroupNorm(groups, dim), ) self.gamma = gamma self.mse_mean = nn.MSELoss(reduction="mean") def _pass_grad(self, x, y): """Manually set gradient for backward pass. for y = f(x), ensure that during the backward pass, dL/dy = dL/dx regardless of f(x). Returns: y, with the gradient forced to be dL/dy = dL/dx. """ return y.detach() + (x - x.detach()) @property def expand_embedding(self): if self.combine_groups: return self.embedding.expand(self.num_vars, self.groups, self.var_dim) return self.embedding def forward_idx(self, x): res = self.forward(x, produce_targets=True) return res["x"], res["targets"] def forward(self, x, produce_targets=False): result = {"num_vars": self.num_vars} if self.time_first: x = x.transpose(1, 2) bsz, fsz, tsz = x.shape ze = self.projection(x) ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2) d = ( (ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1)) .view(self.num_vars, bsz, tsz, self.groups, -1) .norm(dim=-1, p=2) ) idx = d.argmin(dim=0) zq = ( torch.stack( [ self.expand_embedding[idx[..., group], group] for group in range(self.groups) ], dim=-2, ) .view(bsz, tsz, self.groups * self.var_dim) .permute(0, 2, 1) ) assert ze.shape == zq.shape, (ze.shape, zq.shape) x = self._pass_grad(ze, zq) hard_x = ( idx.new_zeros(bsz * tsz * self.groups, self.num_vars) .scatter_(-1, idx.view(-1, 1), 1.0) .view(bsz * tsz, self.groups, -1) ) hard_probs = torch.mean(hard_x.float(), dim=0) result["code_perplexity"] = torch.exp( -torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1) ).sum() if produce_targets: result["targets"] = idx if self.time_first: x = x.transpose(1, 2) # BCT -> BTC result["x"] = x ze = ze.float() zq = zq.float() latent_loss = self.mse_mean(zq, ze.detach()) commitment_loss = self.mse_mean(ze, zq.detach()) result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss return result
4,170
31.585938
83
py
sign-topic
sign-topic-main/fairseq/modules/layer_drop.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ LayerDrop as described in https://arxiv.org/abs/1909.11556. """ import torch import torch.nn as nn class LayerDropModuleList(nn.ModuleList): """ A LayerDrop implementation based on :class:`torch.nn.ModuleList`. We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During evaluation we always iterate over all layers. Usage:: layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) for layer in layers: # this might iterate over layers 1 and 3 x = layer(x) for layer in layers: # this might iterate over all layers x = layer(x) for layer in layers: # this might not iterate over any layers x = layer(x) Args: p (float): probability of dropping out each layer modules (iterable, optional): an iterable of modules to add """ def __init__(self, p, modules=None): super().__init__(modules) self.p = p def __iter__(self): dropout_probs = torch.empty(len(self)).uniform_() for i, m in enumerate(super().__iter__()): if not self.training or (dropout_probs[i] > self.p): yield m
1,409
30.333333
71
py
sign-topic
sign-topic-main/fairseq/modules/dynamic_crf_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This file is to re-implemented the low-rank and beam approximation of CRF layer Proposed by: Sun, Zhiqing, et al. Fast Structured Decoding for Sequence Models https://arxiv.org/abs/1910.11555 The CRF implementation is mainly borrowed from https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py """ import numpy as np import torch import torch.nn as nn def logsumexp(x, dim=1): return torch.logsumexp(x.float(), dim=dim).type_as(x) class DynamicCRF(nn.Module): """Dynamic CRF layer is used to approximate the traditional Conditional Random Fields (CRF) $P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$ where in this function, we assume the emition scores (s) are given, and the transition score is a |V| x |V| matrix $M$ in the following two aspects: (1) it used a low-rank approximation for the transition matrix: $M = E_1 E_2^T$ (2) it used a beam to estimate the normalizing factor Z(x) """ def __init__(self, num_embedding, low_rank=32, beam_size=64): super().__init__() self.E1 = nn.Embedding(num_embedding, low_rank) self.E2 = nn.Embedding(num_embedding, low_rank) self.vocb = num_embedding self.rank = low_rank self.beam = beam_size def extra_repr(self): return "vocab_size={}, low_rank={}, beam_size={}".format( self.vocb, self.rank, self.beam ) def forward(self, emissions, targets, masks, beam=None): """ Compute the conditional log-likelihood of a sequence of target tokens given emission scores Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first targets (`~torch.LongTensor`): Sequence of target token indices ``(batch_size, seq_len) masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.Tensor`: approximated log-likelihood """ numerator = self._compute_score(emissions, targets, masks) denominator = self._compute_normalizer(emissions, targets, masks, beam) return numerator - denominator def forward_decoder(self, emissions, masks=None, beam=None): """ Find the most likely output sequence using Viterbi algorithm. Args: emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output ``(batch_size, seq_len, vocab_size)``. We assume batch-first masks (`~torch.ByteTensor`): Mask tensor with the same size as targets Returns: `~torch.LongTensor`: decoded sequence from the CRF model """ return self._viterbi_decode(emissions, masks, beam) def _compute_score(self, emissions, targets, masks=None): batch_size, seq_len = targets.size() emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2) scores = emission_scores scores[:, 1:] += transition_scores if masks is not None: scores = scores * masks.type_as(scores) return scores.sum(-1) def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None): # HACK: we include "target" which is a hueristic for training # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] if targets is not None: _emissions = emissions.scatter(2, targets[:, :, None], np.float("inf")) beam_targets = _emissions.topk(beam, 2)[1] beam_emission_scores = emissions.gather(2, beam_targets) else: beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K for i in range(1, seq_len): next_score = score[:, :, None] + beam_transition_matrix[:, i - 1] next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], next_score, score) else: score = next_score # Sum (log-sum-exp) over all possible tags return logsumexp(score, dim=1) def _viterbi_decode(self, emissions, masks=None, beam=None): # HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?) beam = beam if beam is not None else self.beam batch_size, seq_len = emissions.size()[:2] beam_emission_scores, beam_targets = emissions.topk(beam, 2) beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D beam_transition_matrix = torch.bmm( beam_transition_score1.view(-1, beam, self.rank), beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2), ) beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam) traj_tokens, traj_scores = [], [] finalized_tokens, finalized_scores = [], [] # compute the normalizer in the log-space score = beam_emission_scores[:, 0] # B x K dummy = ( torch.arange(beam, device=score.device).expand(*score.size()).contiguous() ) for i in range(1, seq_len): traj_scores.append(score) _score = score[:, :, None] + beam_transition_matrix[:, i - 1] _score, _index = _score.max(dim=1) _score = _score + beam_emission_scores[:, i] if masks is not None: score = torch.where(masks[:, i : i + 1], _score, score) index = torch.where(masks[:, i : i + 1], _index, dummy) else: score, index = _score, _index traj_tokens.append(index) # now running the back-tracing and find the best best_score, best_index = score.max(dim=1) finalized_tokens.append(best_index[:, None]) finalized_scores.append(best_score[:, None]) for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)): previous_index = finalized_tokens[-1] finalized_tokens.append(idx.gather(1, previous_index)) finalized_scores.append(scs.gather(1, previous_index)) finalized_tokens.reverse() finalized_tokens = torch.cat(finalized_tokens, 1) finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0] finalized_scores.reverse() finalized_scores = torch.cat(finalized_scores, 1) finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1] return finalized_scores, finalized_tokens
7,717
39.621053
99
py
sign-topic
sign-topic-main/fairseq/modules/scalar_bias.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ @staticmethod def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output @staticmethod def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
888
26.78125
74
py
sign-topic
sign-topic-main/fairseq/modules/transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch import torch.nn as nn from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, MultiheadAttention, PositionalEmbedding, TransformerSentenceEncoderLayer, ) from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data) class TransformerSentenceEncoder(nn.Module): """ Implementation for a Bi-directional Transformer based Sentence Encoder used in BERT/XLM style pre-trained models. This first computes the token embedding using the token embedding matrix, position embeddings (if specified) and segment embeddings (if specified). After applying the specified number of TransformerEncoderLayers, it outputs all the internal states of the encoder as well as the final representation associated with the first token (usually CLS token). Input: - tokens: B x T matrix representing sentences - segment_labels: B x T matrix representing segment label for tokens Output: - a tuple of the following: - a list of internal model states used to compute the predictions where each tensor has shape T x B x C - sentence representation associated with first input token in format B x C. """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, layerdrop: float = 0.0, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, traceable: bool = False, q_noise: float = 0.0, qn_block_size: int = 8, ) -> None: super().__init__() self.padding_idx = padding_idx self.vocab_size = vocab_size self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.layerdrop = layerdrop self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.num_segments = num_segments self.use_position_embeddings = use_position_embeddings self.apply_bert_init = apply_bert_init self.learned_pos_embedding = learned_pos_embedding self.traceable = traceable self.embed_tokens = self.build_embedding( self.vocab_size, self.embedding_dim, self.padding_idx ) self.embed_scale = embed_scale if q_noise > 0: self.quant_noise = apply_quant_noise_( nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), q_noise, qn_block_size, ) else: self.quant_noise = None self.segment_embeddings = ( nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None) if self.num_segments > 0 else None ) self.embed_positions = ( PositionalEmbedding( self.max_seq_len, self.embedding_dim, padding_idx=(self.padding_idx if offset_positions_by_padding else None), learned=self.learned_pos_embedding, ) if self.use_position_embeddings else None ) if encoder_normalize_before: self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export) else: self.emb_layer_norm = None if self.layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend( [ self.build_transformer_sentence_encoder_layer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout_module.p, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) for _ in range(num_encoder_layers) ] ) # Apply initialization of model params after building the model if self.apply_bert_init: self.apply(init_bert_params) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False if freeze_embeddings: freeze_module_params(self.embed_tokens) freeze_module_params(self.segment_embeddings) freeze_module_params(self.embed_positions) freeze_module_params(self.emb_layer_norm) for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer]) def build_embedding(self, vocab_size, embedding_dim, padding_idx): return nn.Embedding(vocab_size, embedding_dim, padding_idx) def build_transformer_sentence_encoder_layer( self, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export, q_noise, qn_block_size, ): return TransformerSentenceEncoderLayer( embedding_dim=embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, q_noise=q_noise, qn_block_size=qn_block_size, ) def forward( self, tokens: torch.Tensor, segment_labels: torch.Tensor = None, last_state_only: bool = False, positions: Optional[torch.Tensor] = None, token_embeddings: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: is_tpu = tokens.device.type == "xla" # compute padding mask. This is needed for multi-head attention padding_mask = tokens.eq(self.padding_idx) if not self.traceable and not is_tpu and not padding_mask.any(): padding_mask = None if token_embeddings is not None: x = token_embeddings else: x = self.embed_tokens(tokens) if self.embed_scale is not None: x = x * self.embed_scale if self.embed_positions is not None: x = x + self.embed_positions(tokens, positions=positions) if self.segment_embeddings is not None and segment_labels is not None: x = x + self.segment_embeddings(segment_labels) if self.quant_noise is not None: x = self.quant_noise(x) if self.emb_layer_norm is not None: x = self.emb_layer_norm(x) x = self.dropout_module(x) # account for padding while computing the representation if padding_mask is not None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) # B x T x C -> T x B x C x = x.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(x) for layer in self.layers: x, _ = layer( x, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask ) if not last_state_only: inner_states.append(x) sentence_rep = x[0, :, :] if last_state_only: inner_states = [x] if self.traceable: return torch.stack(inner_states), sentence_rep else: return inner_states, sentence_rep
10,162
33.804795
88
py
sign-topic
sign-topic-main/fairseq/modules/grad_multiply.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class GradMultiply(torch.autograd.Function): @staticmethod def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res @staticmethod def backward(ctx, grad): return grad * ctx.scale, None
442
22.315789
65
py
sign-topic
sign-topic-main/fairseq/modules/sparse_transformer_sentence_encoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.modules import TransformerSentenceEncoder from fairseq.modules.sparse_transformer_sentence_encoder_layer import ( SparseTransformerSentenceEncoderLayer, ) class SparseTransformerSentenceEncoder(TransformerSentenceEncoder): """ Sparse implementation of the TransformerSentenceEncoder - see SparseMultiheadAttention """ def __init__( self, padding_idx: int, vocab_size: int, num_encoder_layers: int = 6, embedding_dim: int = 768, ffn_embedding_dim: int = 3072, num_attention_heads: int = 8, dropout: float = 0.1, attention_dropout: float = 0.1, activation_dropout: float = 0.1, max_seq_len: int = 256, num_segments: int = 2, use_position_embeddings: bool = True, offset_positions_by_padding: bool = True, encoder_normalize_before: bool = False, apply_bert_init: bool = False, activation_fn: str = "relu", learned_pos_embedding: bool = True, embed_scale: float = None, freeze_embeddings: bool = False, n_trans_layers_to_freeze: int = 0, export: bool = False, is_bidirectional: bool = True, stride: int = 32, expressivity: int = 8, ) -> None: super().__init__( padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export, ) self.layers = nn.ModuleList( [ SparseTransformerSentenceEncoderLayer( embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity, ) for _ in range(num_encoder_layers) ] ) def freeze_module_params(m): if m is not None: for p in m.parameters(): p.requires_grad = False for layer in range(n_trans_layers_to_freeze): freeze_module_params(self.layers[layer])
3,155
31.536082
71
py
sign-topic
sign-topic-main/fairseq/modules/sinusoidal_positional_embedding.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Optional import torch import torch.onnx.operators from fairseq import utils from torch import Tensor, nn class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored. """ def __init__(self, embedding_dim, padding_idx, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx if padding_idx is not None else 0 self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx ) self.onnx_trace = False self.register_buffer("_float_tensor", torch.FloatTensor(1)) self.max_positions = int(1e5) def prepare_for_onnx_export_(self): self.onnx_trace = True @staticmethod def get_embedding( num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None ): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 1 ) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( num_embeddings, -1 ) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, positions: Optional[Any] = None, ): """Input is expected to be of size [bsz x seqlen].""" bspair = torch.onnx.operators.shape_as_tensor(input) bsz, seq_len = bspair[0], bspair[1] max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx ) self.weights = self.weights.to(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len if self.onnx_trace: return ( self.weights.index_select(index=self.padding_idx + pos, dim=0) .unsqueeze(1) .repeat(bsz, 1, 1) ) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils.make_positions( input, self.padding_idx, onnx_trace=self.onnx_trace ) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape ) return embeddings return ( self.weights.index_select(0, positions.view(-1)) .view(bsz, seq_len, -1) .detach() )
3,914
35.933962
87
py
sign-topic
sign-topic-main/fairseq/modules/lightweight_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d def LightweightConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): if torch.cuda.is_available(): try: from fairseq.modules.lightconv_layer import LightconvLayer return LightconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, ) except ImportError as e: print(e) return LightweightConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, bias=bias, ) class LightweightConv1d(nn.Module): """Lightweight Convolution assuming the input is BxCxT This is just an example that explains LightConv clearer than the TBC version. We don't use this module in the model. Args: input_size: # of channels of the input and output kernel_size: convolution channels padding: padding num_heads: number of heads used. The weight is of shape `(num_heads, 1, kernel_size)` weight_softmax: normalize the weight with softmax before the convolution Shape: Input: BxCxT, i.e. (batch_size, input_size, timesteps) Output: BxCxT, i.e. (batch_size, input_size, timesteps) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding=0, num_heads=1, weight_softmax=False, bias=False, weight_dropout=0.0, ): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.num_heads = num_heads self.padding = padding self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, input): """ input size: B x C x T output size: B x C x T """ B, C, T = input.size() H = self.num_heads weight = self.weight if self.weight_softmax: weight = F.softmax(weight, dim=-1) weight = self.weight_dropout_module(weight) # Merge every C/H entries into the batch dimension (C = self.input_size) # B x C x T -> (B * C/H) x H x T # One can also expand the weight to C x 1 x K by a factor of C/H # and do not reshape the input instead, which is slow though input = input.view(-1, H, T) output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads) output = output.view(B, C, T) if self.bias is not None: output = output + self.bias.view(1, -1, 1) return output @with_incremental_state class LightweightConv1dTBC(nn.Module): """Lightweight Convolution assuming the input is TxBxC Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution bias: use bias Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, bias=False, ): super().__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight_softmax = weight_softmax self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() self.onnx_trace = False def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, x, incremental_state=None, unfold=False): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead """ unfold = unfold or (incremental_state is not None) if unfold: output = self._forward_unfolded(x, incremental_state) else: output = self._forward_expanded(x, incremental_state) if self.bias is not None: output = output + self.bias.view(1, 1, -1) return output def prepare_for_onnx_export_(self): self.onnx_trace = True def _forward_unfolded(self, x, incremental_state): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( weight ) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) weight = ( weight.view(1, H, K).expand(T * B, H, K).contiguous().view(T * B * H, K, 1) ) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_state): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight.view(H, K) if self.weight_softmax: weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as( weight ) weight = weight.view(1, H, K).expand(T * B, H, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) P = self.padding_l if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided((B * H, T, K), (T * (T + K - 1), T + K, 1)).copy_( weight ) weight_expanded = weight_expanded.narrow(2, P, T) weight_expanded = self.weight_dropout_module(weight_expanded) output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def extra_repr(self): s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}".format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.bias is not None, ) if self.weight_dropout_module.p > 0.0: s += ", weight_dropout={}".format(self.weight_dropout_module.p) return s
10,919
34.11254
103
py
sign-topic
sign-topic-main/fairseq/modules/dynamic_convolution.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from .unfold import unfold1d def DynamicConv( input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): if torch.cuda.is_available(): try: from fairseq.modules.dynamicconv_layer import DynamicconvLayer return DynamicconvLayer( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, ) except ImportError as e: print(e) return DynamicConv1dTBC( input_size, kernel_size=kernel_size, padding_l=padding_l, num_heads=num_heads, weight_dropout=weight_dropout, weight_softmax=weight_softmax, renorm_padding=renorm_padding, bias=bias, conv_bias=conv_bias, query_size=query_size, ) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m @with_incremental_state class DynamicConv1dTBC(nn.Module): """Dynamic lightweight convolution taking T x B x C inputs Args: input_size: # of channels of the input kernel_size: convolution channels padding_l: padding to the left when using "same" padding num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size) weight_dropout: the drop rate of the DropConnect to drop the weight weight_softmax: normalize the weight with softmax before the convolution renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1) bias: use bias conv_bias: bias of the convolution query_size: specified when feeding a different input as the query in_proj: project the input and generate the filter together Shape: Input: TxBxC, i.e. (timesteps, batch_size, input_size) Output: TxBxC, i.e. (timesteps, batch_size, input_size) Attributes: weight: the learnable weights of the module of shape `(num_heads, 1, kernel_size)` bias: the learnable bias of the module of shape `(input_size)` """ def __init__( self, input_size, kernel_size=1, padding_l=None, num_heads=1, weight_dropout=0.0, weight_softmax=False, renorm_padding=False, bias=False, conv_bias=False, query_size=None, in_proj=False, ): super().__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight_softmax = weight_softmax self.renorm_padding = renorm_padding if in_proj: self.weight_linear = Linear( self.input_size, self.input_size + num_heads * kernel_size * 1 ) else: self.weight_linear = Linear( self.query_size, num_heads * kernel_size * 1, bias=bias ) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() @property def in_proj(self): return ( self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size ) def reset_parameters(self): self.weight_linear.reset_parameters() if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.0) def forward(self, x, incremental_state=None, query=None, unfold=None): """Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C args: x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size) incremental_state: A dict to keep the state unfold: unfold the input or not. If not, we use the matrix trick instead query: use the specified query to predict the conv filters """ unfold = ( x.size(0) > 512 if unfold is None else unfold ) # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None or not self.in_proj if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def _forward_unfolded(self, x, incremental_state, query): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = ( proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) ) else: weight = self.weight_linear(query).view(T * B * H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size if self.in_proj: proj = self.weight_linear(x) x = proj.narrow(2, 0, self.input_size).contiguous() weight = ( proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1) ) else: weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def extra_repr(self): s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format( self.input_size, self.kernel_size, self.padding_l, self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding, self.in_proj, ) if self.query_size != self.input_size: s += ", query_size={}".format(self.query_size) if self.weight_dropout_module.p > 0.0: s += ", weight_dropout={}".format(self.weight_dropout_module.p) return s
11,802
36.951768
132
py
sign-topic
sign-topic-main/fairseq/modules/checkpoint_activations.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools from typing import Any, Dict, List, Tuple, Union import torch import torch.utils.checkpoint as checkpoint from fairseq import utils def checkpoint_wrapper(m, offload_to_cpu=False): """ A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) """ # should I check whether original_forward has already been set? assert not hasattr( m, "precheckpoint_forward" ), "checkpoint function has already been applied?" m.precheckpoint_forward = m.forward m.forward = functools.partial( _checkpointed_forward, m.precheckpoint_forward, # original_forward offload_to_cpu, ) return m def unwrap_checkpoint(m: torch.nn.Module): """ unwrap a module and its children from checkpoint_wrapper """ for module in m.modules(): if hasattr(module, "precheckpoint_forward"): module.forward = module.precheckpoint_forward del module.precheckpoint_forward if hasattr(module, "old_deepcopy_method"): module.__deepcopy__ = module.old_deepcopy_method del module.old_deepcopy_method return m def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs): # Autograd Functions in PyTorch work best with positional args, since # the backward must return gradients (or None) for every input argument. # We can flatten keyword arguments to make this easier. kwarg_keys, flat_args = pack_kwargs(*args, **kwargs) parent_ctx_dict = {"offload": offload_to_cpu} output = CheckpointFunction.apply( original_forward, parent_ctx_dict, kwarg_keys, *flat_args ) if isinstance(output, torch.Tensor): return output else: packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) return output def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]: """ Usage:: kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) args, kwargs = unpack_kwargs(kwarg_keys, flat_args) assert args == [1, 2] assert kwargs == {"a": 3, "b": 4} """ kwarg_keys = [] flat_args = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return kwarg_keys, flat_args def unpack_kwargs( kwarg_keys: List[str], flat_args: List[Any] ) -> Tuple[List[Any], Dict[str, Any]]: if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])} return args, kwargs def split_non_tensors( mixed: Union[torch.Tensor, Tuple[Any]] ) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]: """ Usage:: x = torch.Tensor([1]) y = torch.Tensor([2]) tensors, packed_non_tensors = split_non_tensors((x, y, None, 3)) recon = unpack_non_tensors(tensors, packed_non_tensors) assert recon == (x, y, None, 3) """ if isinstance(mixed, torch.Tensor): return (mixed,), None tensors = [] packed_non_tensors = {"is_tensor": [], "objects": []} for o in mixed: if isinstance(o, torch.Tensor): packed_non_tensors["is_tensor"].append(True) tensors.append(o) else: packed_non_tensors["is_tensor"].append(False) packed_non_tensors["objects"].append(o) return tuple(tensors), packed_non_tensors def unpack_non_tensors( tensors: Tuple[torch.Tensor], packed_non_tensors: Dict[str, List[Any]], ) -> Tuple[Any]: if packed_non_tensors is None: return tensors assert isinstance(packed_non_tensors, dict) mixed = [] is_tensor_list = packed_non_tensors["is_tensor"] objects = packed_non_tensors["objects"] assert len(tensors) + len(objects) == len(is_tensor_list) obj_i = tnsr_i = 0 for is_tensor in is_tensor_list: if is_tensor: mixed.append(tensors[tnsr_i]) tnsr_i += 1 else: mixed.append(objects[obj_i]) obj_i += 1 return tuple(mixed) class CheckpointFunction(torch.autograd.Function): """Similar to the torch version, but support non-Tensor outputs. The caller is expected to provide a dict (*parent_ctx_dict*) that will hold the non-Tensor outputs. These should be combined with the Tensor *outputs* by calling ``unpack_non_tensors``. """ @staticmethod def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args): if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation checkpoint.check_backward_validity(args) ctx.run_function = run_function ctx.kwarg_keys = kwarg_keys ctx.fwd_rng_state = utils.get_rng_state() tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args) if parent_ctx_dict["offload"]: ctx.fwd_device = tuple(x.device for x in tensor_inputs) ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs) tensor_inputs = tuple( x.to(torch.device("cpu"), non_blocking=True) for x in tensor_inputs ) else: ctx.fwd_device, ctx.grad_requirements = None, None ctx.save_for_backward(*tensor_inputs) ctx.packed_non_tensor_inputs = packed_non_tensor_inputs with torch.no_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args) outputs = run_function(*unpacked_args, **unpacked_kwargs) if isinstance(outputs, torch.Tensor): return outputs else: # Autograd Functions don't like non-Tensor outputs. We can split the # non-Tensor and Tensor outputs, returning the former by reference # through *parent_ctx_dict* and returning the latter directly. outputs, packed_non_tensor_outputs = split_non_tensors(outputs) parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad(), please use .backward() if possible" ) tensor_inputs: Tuple = ctx.saved_tensors tensor_inputs = checkpoint.detach_variable(tensor_inputs) if ctx.fwd_device is not None: tensor_inputs = [ t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs) ] for i, need_grad in enumerate(ctx.grad_requirements): tensor_inputs[i].requires_grad = need_grad inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs) # Store the current states. bwd_rng_state = utils.get_rng_state() # Set the states to what it used to be before the forward pass. utils.set_rng_state(ctx.fwd_rng_state) with torch.enable_grad(): unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs) outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs) tensor_outputs, _ = split_non_tensors(outputs) # Set the states back to what it was at the start of this function. utils.set_rng_state(bwd_rng_state) # Run backward() with only Tensors that require grad outputs_with_grad = [] args_with_grad = [] for i in range(len(tensor_outputs)): if tensor_outputs[i].requires_grad: outputs_with_grad.append(tensor_outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError( "None of the outputs have requires_grad=True, " "this checkpoint() is not necessary" ) torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple( inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs ) return (None, None, None) + grads
8,825
35.320988
98
py
sign-topic
sign-topic-main/fairseq/modules/dynamicconv_layer/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="dynamicconv_layer", ext_modules=[ CUDAExtension( name="dynamicconv_cuda", sources=[ "dynamicconv_cuda.cpp", "dynamicconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
602
24.125
67
py
sign-topic
sign-topic-main/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import dynamicconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from fairseq.modules.unfold import unfold1d from torch import nn from torch.autograd import Function class dynamicconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = dynamicconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = dynamicconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class DynamicconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, renorm_padding=False, conv_bias=False, query_size=None, ): super(DynamicconvLayer, self).__init__() self.input_size = input_size self.query_size = input_size if query_size is None else query_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.renorm_padding = renorm_padding self.bias = bias self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) if conv_bias: self.conv_bias = nn.Parameter(torch.Tensor(input_size)) else: self.conv_bias = None self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.weight_linear.weight) if self.conv_bias is not None: nn.init.constant_(self.conv_bias, 0.0) nn.init.constant_(self.weight_linaer.bias, 0.0) def forward(self, x, incremental_state=None, query=None, unfold=None): T, B, C = x.size() K, H = self.kernel_size, self.num_heads # R = C // H # during inference time, incremental BMM is faster if incremental_state is not None: unfold = ( x.size(0) > 512 if unfold is None else unfold ) # use unfold mode as default for long sequence to save memory unfold = unfold or (incremental_state is not None) assert query is None if query is None: query = x if unfold: output = self._forward_unfolded(x, incremental_state, query) else: output = self._forward_expanded(x, incremental_state, query) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output # during training time, use CUDA kernel else: weight = self.weight_linear(x).view(T, B, H, K) if self.weight_softmax: weight = F.softmax(weight, dim=-1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) weight = weight.permute(1, 2, 3, 0).contiguous() self.filters = weight x = x.permute(1, 2, 0).contiguous() output = dynamicconvFunction.apply(x, weight, self.padding_l).permute( 2, 0, 1 ) if self.conv_bias is not None: output = output + self.conv_bias.view(1, 1, -1) return output def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def _forward_unfolded(self, x, incremental_state, query): """The conventional implementation of convolutions. Unfolding the input by having a window shifting to the right.""" T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) # renorm_padding is only implemented in _forward_expanded assert not self.renorm_padding or incremental_state is not None if incremental_state is not None: input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) else: padding_l = self.padding_l if K > T and padding_l == K - 1: weight = weight.narrow(1, K - T, T) K, padding_l = T, T - 1 # unfold the input: T x B x C --> T' x B x C x K x_unfold = unfold1d(x, K, padding_l, 0) x_unfold = x_unfold.view(T * B * H, R, K) if self.weight_softmax and not self.renorm_padding: weight = F.softmax(weight, dim=1) weight = weight.narrow(1, 0, K) if incremental_state is not None: weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) if self.weight_softmax and self.renorm_padding: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 output = output.view(T, B, C) return output def _forward_expanded(self, x, incremental_stat, query): """Turn the convolution filters into band matrices and do matrix multiplication. This is faster when the sequence is short, but less memory efficient. This is not used in the decoder during inference. """ T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H assert R * H == C == self.input_size weight = self.weight_linear(query).view(T * B * H, -1) if not self.renorm_padding: if self.weight_softmax: weight = F.softmax(weight, dim=1) weight = self.weight_dropout_module(weight, inplace=False) weight = weight.narrow(1, 0, K).contiguous() weight = weight.view(T, B * H, K).transpose(0, 1) x = x.view(T, B * H, R).transpose(0, 1) if self.weight_softmax and self.renorm_padding: # turn the convolution filters into band matrices weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, self.padding_l, T) # normalize the weight over valid positions like self-attention weight_expanded = F.softmax(weight_expanded, dim=2) weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) else: P = self.padding_l # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length if K > T and P == K - 1: weight = weight.narrow(2, K - T, T) K, P = T, T - 1 # turn the convolution filters into band matrices weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) weight_expanded.as_strided( (B * H, T, K), (T * (T + K - 1), T + K, 1) ).copy_(weight) weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T output = torch.bmm(weight_expanded, x) output = output.transpose(0, 1).contiguous().view(T, B, C) return output
8,922
38.135965
117
py
sign-topic
sign-topic-main/fairseq/modules/quantization/pq/em.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import random from collections import Counter import torch class EM: """ EM algorithm used to quantize the columns of W to minimize ||W - W_hat||^2 Args: - W: weight matrix of size (in_features x out_features) - n_iter: number of k-means iterations - n_centroids: number of centroids (size of codebook) - eps: for cluster reassignment when an empty cluster is found - max_tentatives for cluster reassignment when an empty cluster is found - verbose: print error after each iteration Remarks: - If one cluster is empty, the most populated cluster is split into two clusters - All the relevant dimensions are specified in the code """ def __init__( self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True ): self.W = W self.n_centroids = n_centroids self.n_iter = n_iter self.eps = eps self.max_tentatives = max_tentatives self.verbose = verbose self.centroids = torch.Tensor() self.assignments = torch.Tensor() self.objective = [] def initialize_centroids(self): """ Initializes the centroids by sampling random columns from W. """ in_features, out_features = self.W.size() indices = torch.randint( low=0, high=out_features, size=(self.n_centroids,) ).long() self.centroids = self.W[:, indices].t() # (n_centroids x in_features) def step(self, i): """ There are two standard steps for each iteration: expectation (E) and minimization (M). The E-step (assignment) is performed with an exhaustive search and the M-step (centroid computation) is performed with the exact solution. Args: - i: step number Remarks: - The E-step heavily uses PyTorch broadcasting to speed up computations and reduce the memory overhead """ # assignments (E-step) distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) n_empty_clusters = self.resolve_empty_clusters() # centroids (M-step) for k in range(self.n_centroids): W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k) self.centroids[k] = W_k.mean(dim=1) # (in_features) # book-keeping obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item() self.objective.append(obj) if self.verbose: logging.info( f"Iteration: {i},\t" f"objective: {obj:.6f},\t" f"resolved empty clusters: {n_empty_clusters}" ) def resolve_empty_clusters(self): """ If one cluster is empty, the most populated cluster is split into two clusters by shifting the respective centroids. This is done iteratively for a fixed number of tentatives. """ # empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) n_empty_clusters = len(empty_clusters) tentatives = 0 while len(empty_clusters) > 0: # given an empty cluster, find most populated cluster and split it into two k = random.choice(list(empty_clusters)) m = counts.most_common(1)[0][0] e = torch.randn_like(self.centroids[m]) * self.eps self.centroids[k] = self.centroids[m].clone() self.centroids[k] += e self.centroids[m] -= e # recompute assignments distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) # check for empty clusters counts = Counter(map(lambda x: x.item(), self.assignments)) empty_clusters = set(range(self.n_centroids)) - set(counts.keys()) # increment tentatives if tentatives == self.max_tentatives: logging.info( f"Could not resolve all empty clusters, {len(empty_clusters)} remaining" ) raise EmptyClusterResolveError tentatives += 1 return n_empty_clusters def compute_distances(self): """ For every centroid m, computes ||M - m[None, :]||_2 Remarks: - We rely on PyTorch's broadcasting to speed up computations and reduce the memory overhead - Without chunking, the sizes in the broadcasting are modified as: (n_centroids x n_samples x out_features) -> (n_centroids x out_features) - The broadcasting computation is automatically chunked so that the tensors fit into the memory of the GPU """ nb_centroids_chunks = 1 while True: try: return torch.cat( [ (self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1) for centroids_c in self.centroids.chunk( nb_centroids_chunks, dim=0 ) ], dim=0, ) except RuntimeError: nb_centroids_chunks *= 2 def assign(self): """ Assigns each column of W to its closest centroid, thus essentially performing the E-step in train(). Remarks: - The function must be called after train() or after loading centroids using self.load(), otherwise it will return empty tensors """ distances = self.compute_distances() # (n_centroids x out_features) self.assignments = torch.argmin(distances, dim=0) # (out_features) def save(self, path, layer): """ Saves centroids and assignments. Args: - path: folder used to save centroids and assignments """ torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer))) torch.save( self.assignments, os.path.join(path, "{}_assignments.pth".format(layer)) ) torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer))) def load(self, path, layer): """ Loads centroids and assignments from a given path Args: - path: folder use to load centroids and assignments """ self.centroids = torch.load( os.path.join(path, "{}_centroids.pth".format(layer)) ) self.assignments = torch.load( os.path.join(path, "{}_assignments.pth".format(layer)) ) self.objective = torch.load( os.path.join(path, "{}_objective.pth".format(layer)) ) class EmptyClusterResolveError(Exception): pass
7,333
33.59434
92
py
sign-topic
sign-topic-main/fairseq/modules/quantization/pq/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import re from operator import attrgetter, itemgetter import torch import numpy as np import torch.distributed as dist import torch.nn as nn from .modules import PQConv2d, PQEmbedding, PQLinear from .pq import PQ def quantize_model_( model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-6, max_tentatives=100, remove_weights=False, verbose=True, state_dict=None, ): """ Quantize a model in-place by stages. All the targeted layers are replaced by their quantized counterpart, and the model is ready for the finetuning of the centroids in a standard training loop (no modifications required). Note that we do not quantize biases. Args: - model: a nn.Module - size_tracker: useful for tracking quatization statistics - layers_to_quantize: a list containing regexps for filtering the layers to quantize at each stage according to their name (as in model.named_parameters()) - block_sizes_config: dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. - n_centroids_config: dict like { 'Conv2d': ('kernel_size', {'*': 256}), 'Linear': ('in_features', {'*': 256}) } For instance, all conv2d layers are quantized with 256 centroids - step: the layers to quantize inplace corresponding to layers_to_quantize[step] """ quantized_layers = get_layers( model, layers_to_quantize[step], remove_weights=remove_weights ) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) verbose = verbose and is_master_process # get block size and centroids module = attrgetter(layer)(model) block_size = get_param(module, layer, block_sizes_config) n_centroids = get_param(module, layer, n_centroids_config) if verbose: logging.info( f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids" ) # quantize layer weight = module.weight.data.clone() is_bias = "bias" in [x[0] for x in module.named_parameters()] bias = module.bias.data.clone() if is_bias else None quantizer = PQ( weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose, ) # quantization performed on all GPUs with same seed quantizer.encode() centroids = quantizer.centroids.contiguous() assignments = quantizer.assignments.contiguous() # If n_iter = 0 and state_dict is provided, then # we initialize random assignments and centroids to # random values of the appropriate dimensions # because the quantized model parameters will # overwritten by the state_dict later on. if n_iter == 0 and state_dict: # Initialize random centroids of the correct size centroids = torch.rand(centroids.size()) centroids.cuda() # Get counts and assignment keys from layer in loaded checkpoint. counts_key = layer + "." + "counts" assignment_key = layer + "." + "assignments" # Get number of different bins to include. counts = list(state_dict[counts_key].shape)[0] print(layer) print(state_dict[counts_key]) print(counts) # Initialize random assignments of the correct size # with an appropriate number of bins. num_assignments = list(state_dict[assignment_key].shape)[0] num_extra = num_assignments - counts print(num_assignments) print(num_extra) assignments_bins = torch.arange(counts) assignments_rand = torch.randint(0, counts - 1, (num_extra,)) assignments = torch.cat((assignments_bins, assignments_rand), 0) # assignments = assignments.type(torch.IntTensor) assignments.cuda() print("assignments") print(assignments) # broadcast results to make sure weights are up-to-date if dist.is_initialized(): dist.broadcast(centroids, 0) dist.broadcast(assignments, 0) # instantiate the quantized counterpart if isinstance(module, nn.Linear): out_features, in_features = map( lambda k: module.__dict__[k], ["out_features", "in_features"] ) quantized_module = PQLinear( centroids, assignments, bias, in_features, out_features ) elif isinstance(module, nn.Embedding): num_embeddings, embedding_dim = map( lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"] ) quantized_module = PQEmbedding( centroids, assignments, num_embeddings, embedding_dim ) elif isinstance(module, nn.Conv2d): out_channels, in_channels, kernel_size = map( lambda k: module.__dict__[k], ["out_channels", "in_channels", "kernel_size"], ) stride, padding, dilation, groups, padding_mode = map( lambda k: module.__dict__[k], ["stride", "padding", "dilation", "groups", "padding_mode"], ) quantized_module = PQConv2d( centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode, ) else: raise ValueError(f"Module {module} not yet supported for quantization") # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # update statistics size_tracker.update(weight, block_size, n_centroids) # return name of quantized layers return quantized_layers def get_layers(model, filter_regexp, remove_weights=False): """ Filters out the layers according to a regexp. Note that we omit biases. Args: - model: a nn.Module - filter_regexp: a regexp to filter the layers to keep according to their name in model.named_parameters(). For instance, the regexp: down_layers\\.[123456]\\.(conv[12]|identity\\.conv)) is keeping blocks down_layers from 1 to 6, and inside each block is keeping conv1, conv2 and identity.conv. Remarks: - We add (module\\.)? at the beginning of the regexp to account for the possible use of nn.parallel.DataParallel """ # get all parameter names all_layers = map(itemgetter(0), model.named_parameters()) # remove biases all_layers = filter(lambda x: "bias" not in x, all_layers) # remove .weight in all other names (or .weight_orig is spectral norm) all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers) # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names if remove_weights: all_layers = map(lambda x: x.replace(".weights", ""), all_layers) all_layers = map(lambda x: x.replace(".weight", ""), all_layers) # return filtered layers filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")" r = re.compile(filter_regexp) return list(filter(r.match, all_layers)) def get_param(module, layer_name, param_config): """ Given a quantization configuration, get the right parameter for the module to be quantized. Args: - module: a nn.Module - layer_name: the name of the layer - param_config: a dict like { 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}), 'Linear': ('in_features', {'*': 8}) } For instance, all conv2d layers with kernel size 3x3 have a block size of 9 and all Linear layers are quantized with a block size of 8, irrespective of their size. Remarks: - if 'fuzzy_name' is passed as a parameter, layers whose layer_name include 'fuzzy_name' will be assigned the given parameter. In the following example, conv.expand layers will have a block size of 9 while conv.reduce will have a block size of 4 and all other layers will have a block size of 2. { 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}), 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4}) } """ layer_type = module.__class__.__name__ if layer_type not in param_config: raise KeyError(f"Layer type {layer_type} not in config for layer {module}") feature, params = param_config[module.__class__.__name__] if feature != "fuzzy_name": feature_value = str(getattr(module, feature)) if feature_value not in params: if "*" in params: feature_value = "*" else: raise KeyError( f"{feature}={feature_value} not in config for layer {module}" ) else: feature_values = [name for name in params if name in layer_name] if len(feature_values) == 0: if "*" in params: feature_value = "*" else: raise KeyError(f"name={layer_name} not in config for {module}") else: feature_value = feature_values[0] return params[feature_value] class SizeTracker(object): """ Class to keep track of the compressed network size with iPQ. Args: - model: a nn.Module Remarks: - The compressed size is the sum of three components for each layer in the network: (1) Storing the centroids given by iPQ in fp16 (2) Storing the assignments of the blocks in int8 (3) Storing all non-compressed elements such as biases - This cost in only valid if we use 256 centroids (then indexing can indeed by done with int8). """ def __init__(self, model): self.model = model self.size_non_compressed_model = self.compute_size() self.size_non_quantized = self.size_non_compressed_model self.size_index = 0 self.size_centroids = 0 self.n_quantized_layers = 0 def compute_size(self): """ Computes the size of the model (in MB). """ res = 0 for _, p in self.model.named_parameters(): res += p.numel() return res * 4 / 1024 / 1024 def update(self, W, block_size, n_centroids): """ Updates the running statistics when quantizing a new layer. """ # bits per weights bits_per_weight = np.log2(n_centroids) / block_size self.n_quantized_layers += 1 # size of indexing the subvectors of size block_size (in MB) size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024 self.size_index += size_index_layer # size of the centroids stored in float16 (in MB) size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024 self.size_centroids += size_centroids_layer # size of non-compressed layers, e.g. LayerNorms or biases (in MB) size_uncompressed_layer = W.numel() * 4 / 1024 / 1024 self.size_non_quantized -= size_uncompressed_layer def __repr__(self): size_compressed = ( self.size_index + self.size_centroids + self.size_non_quantized ) compression_ratio = self.size_non_compressed_model / size_compressed # NOQA return ( f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. " f"After quantizing {self.n_quantized_layers} layers, size " f"(indexing + centroids + other): {self.size_index:.2f} MB + " f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = " f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x" ) def attrsetter(*items): def resolve_attr(obj, attr): attrs = attr.split(".") head = attrs[:-1] tail = attrs[-1] for name in head: obj = getattr(obj, name) return obj, tail def g(obj, val): for attr in items: resolved_obj, resolved_attr = resolve_attr(obj, attr) setattr(resolved_obj, resolved_attr, val) return g
13,493
34.793103
100
py
sign-topic
sign-topic-main/fairseq/modules/quantization/pq/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQLinear(nn.Module): """ Quantized counterpart of nn.Linear module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Linear module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 15% slower than the non-quantized nn.Linear module for a standard training loop. """ def __init__(self, centroids, assignments, bias, in_features, out_features): super(PQLinear, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_features = in_features self.out_features = out_features # check compatibility if self.in_features % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.out_features != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_features, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, x): return F.linear( x, self.weight, self.bias, ) def extra_repr(self): return f"in_features={self.in_features},\ out_features={self.out_features},\ n_centroids={self.n_centroids},\ block_size={self.block_size},\ bias={self.bias is not None}"
2,547
34.388889
86
py
sign-topic
sign-topic-main/fairseq/modules/quantization/pq/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair class PQConv2d(nn.Module): """ Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments and the non-quantized biases. The full weight is re-instantiated at each forward pass and autograd automatically computes the gradients with respect to the centroids. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_channels x n_blocks - bias: the non-quantized bias, must be either torch.Tensor or None Remarks: - We refer the reader to the official documentation of the nn.Conv2d module for the other arguments and the behavior of the module. - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Conv2d module for a standard training loop. - During the backward, the gradients are averaged by cluster and not summed. This explains the hook registered to the centroids. """ def __init__( self, centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, padding_mode="zeros", ): super(PQConv2d, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.padding_mode = padding_mode # check compatibility if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % out_channels != 0: raise ValueError("Wrong PQ sizes") if in_channels % groups != 0: raise ValueError("in_channels must be divisible by groups") if out_channels % groups != 0: raise ValueError("out_channels must be divisible by groups") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) if bias is not None: self.bias = nn.Parameter(bias) else: self.register_parameter("bias", None) # register hook for averaging gradients per centroids instead of summing self.centroids.register_hook(lambda x: x / self.counts[:, None]) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.out_channels, self.block_size) .permute(1, 0, 2) .reshape( self.out_channels, self.in_channels // self.groups, *self.kernel_size ) ) def forward(self, x): return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def extra_repr(self): s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" if self.padding != (0,) * len(self.padding): s += ", padding={padding}" if self.dilation != (1,) * len(self.dilation): s += ", dilation={dilation}" if self.groups != 1: s += ", groups={groups}" if self.bias is None: s += ", bias=False" if self.padding_mode != "zeros": s += ", padding_mode={padding_mode}" s += ", n_centroids={n_centroids}, block_size={block_size}" return s.format(**self.__dict__)
4,245
35.603448
87
py
sign-topic
sign-topic-main/fairseq/modules/quantization/pq/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F class PQEmbedding(nn.Module): """ Quantized counterpart of nn.Embedding module. Stores the centroids and the assignments. The full weight is re-instantiated at each forward pass. Args: - centroids: centroids of size n_centroids x block_size - assignments: assignments of the centroids to the subvectors of size self.out_features x n_blocks - bias: the non-quantized bias Remarks: - We refer the reader to the official documentation of the nn.Embedding module for the other arguments and the behavior of the module - Performance tests on GPU show that this implementation is 10% slower than the non-quantized nn.Embedding module for a standard training loop. """ def __init__( self, centroids, assignments, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, ): super(PQEmbedding, self).__init__() self.block_size = centroids.size(1) self.n_centroids = centroids.size(0) self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse # check compatibility if self.embedding_dim % self.block_size != 0: raise ValueError("Wrong PQ sizes") if len(assignments) % self.num_embeddings != 0: raise ValueError("Wrong PQ sizes") # define parameters self.centroids = nn.Parameter(centroids, requires_grad=True) self.register_buffer("assignments", assignments) self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) @property def weight(self): return ( self.centroids[self.assignments] .reshape(-1, self.num_embeddings, self.block_size) .permute(1, 0, 2) .flatten(1, 2) ) def forward(self, input): return F.embedding( input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) def extra_repr(self): s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" s += ", n_centroids={n_centroids}, block_size={block_size}" return s.format(**self.__dict__)
3,719
33.444444
86
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from operator import attrgetter import torch.distributed as dist import torch.nn as nn from ..pq.utils import attrsetter, get_layers from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d} def quantize_model_( model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False ): """ Replaces all modules with their scalar quantized counterpart and registers hooks to quantize the post-ativations of those modules. Args: - model: a nn.Module - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations) - bits: number of bits - update_step: update quantization parameters every update_step steps """ # quantize all layers # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights) for layer in quantized_layers: # book-keeping is_master_process = (not dist.is_initialized()) or ( dist.is_initialized() and dist.get_rank() == 0 ) # recover module module = attrgetter(layer)(model) if is_master_process: logging.info( f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}" ) # quantization params q_params = { "p": p, "update_step": update_step, "bits": bits, "method": method, "counter": 0, } # instantiate the quantized counterpart if isinstance(module, tuple(MAPPING.keys())): QuantizedModule = MAPPING[module.__class__] quantized_module = QuantizedModule.__new__(QuantizedModule) params = module.__dict__ params.update(q_params) quantized_module.__dict__.update(params) else: if is_master_process: logging.info(f"Module {module} not yet supported for quantization") continue # activation quantization a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method) # replace layer by its quantized counterpart attrsetter(layer)(model, quantized_module) # return name of quantized layers return quantized_layers
2,657
31.814815
94
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/ops.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch try: import torch.ao.quantization as quantization except ImportError: import torch.quantization as quantization def emulate_int(w, bits, method, scale=None, zero_point=None): q = globals()[f"emulate_int8_{method}"] return q(w, scale=scale, zero_point=zero_point, bits=bits) def quantize(w, scale, zero_point, bits=8): # In the default behavior, max_val = 255. max_val = 2 ** bits - 1 return ( torch.clamp(torch.round(w / scale + zero_point), 0, max_val) - zero_point ) * scale def emulate_int8_histogram(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.HistogramObserver() obs.to(device=w.device) _ = obs(w.float()) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point def emulate_int8_channel(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.PerChannelMinMaxObserver( ch_axis=-1, qscheme=torch.per_channel_symmetric ) obs.to(device=w.device) _ = obs(w) scale, zero_point, ch_axis = obs.get_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point def emulate_int8_tensor(w, scale=None, zero_point=None, bits=8): if scale is None: obs = quantization.observer.MinMaxObserver() obs.to(device=w.device) _ = obs(w) scale, zero_point = obs.calculate_qparams() scale = scale.cuda().type_as(w) zero_point = zero_point.cuda().type_as(w) return quantize(w, scale, zero_point, bits=bits), scale, zero_point
2,031
32.866667
81
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/modules/qlinear.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntLinear(nn.Module): """ Quantized counterpart of the nn.Linear module that applies QuantNoise during training. Args: - in_features: input features - out_features: output features - bias: bias or not - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick. - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_features, out_features, bias=True, p=0, update_step=3000, bits=8, method="histogram", ): super(IntLinear, self).__init__() self.in_features = int(in_features) self.out_features = int(out_features) self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features)) self.chosen_bias = bias if self.chosen_bias: self.bias = torch.nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter("bias", None) self.reset_parameters() # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.chosen_bias: nn.init.constant_(self.bias, 0.0) return def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.linear(input, weight, self.bias) return output def extra_repr(self): return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format( self.in_features, self.out_features, self.bias is not None, self.p, self.bits, self.method, )
3,631
30.859649
101
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/modules/qconv.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair from ..ops import emulate_int class IntConv2d(_ConvNd): """ Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. Args: - standard nn.Conv2d parameters - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-thgourh estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode="zeros", p=0, bits=8, method="histogram", update_step=1000, ): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(IntConv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode, ) # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def _conv_forward(self, input, weight): if self.padding_mode != "zeros": return F.conv2d( F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), weight, self.bias, self.stride, _pair(0), self.dilation, self.groups, ) return F.conv2d( input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 100 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = self._conv_forward(input, weight) return output def extra_repr(self): return ( "in_channels={}, out_channels={}, kernel_size={}, stride={}, " "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " "bits={}, method={}".format( self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.p, self.bits, self.method, ) )
4,450
28.673333
90
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/modules/qemb.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from ..ops import emulate_int class IntEmbedding(nn.Module): """ Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. Args: - num_embeddings: number of tokens - embedding_dim: embedding dimension - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - bits: number of bits - method: choose among {"tensor", "histogram", "channel"} - update_step: recompute scale and zero_point every update_steps iterations Remarks: - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - At test time, the weights are fully quantized """ def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, p=0, update_step=1000, bits=8, method="histogram", ): super(IntEmbedding, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert ( padding_idx < self.num_embeddings ), "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert ( padding_idx >= -self.num_embeddings ), "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq if _weight is None: self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) self.reset_parameters() else: assert list(_weight.shape) == [ num_embeddings, embedding_dim, ], "Shape of weight does not match num_embeddings and embedding_dim" self.weight = nn.Parameter(_weight) self.sparse = sparse # quantization parameters self.p = p self.bits = bits self.method = method self.update_step = update_step self.counter = 0 def reset_parameters(self): nn.init.normal_(self.weight) if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input): # train with QuantNoise and evaluate the fully quantized network p = self.p if self.training else 1 # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # quantize weight weight_quantized, self.scale, self.zero_point = emulate_int( self.weight.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(self.weight) mask.bernoulli_(1 - p) noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) weight = ( torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach() ) # return output output = F.embedding( input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) return output def extra_repr(self): s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.max_norm is not None: s += ", max_norm={max_norm}" if self.norm_type != 2: s += ", norm_type={norm_type}" if self.scale_grad_by_freq is not False: s += ", scale_grad_by_freq={scale_grad_by_freq}" if self.sparse is not False: s += ", sparse=True" s += "quant_noise={p}, bits={bits}, method={method}" return s.format(**self.__dict__)
4,986
32.695946
93
py
sign-topic
sign-topic-main/fairseq/modules/quantization/scalar/modules/qact.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from ..ops import emulate_int class ActivationQuantizer: """ Fake scalar quantization of the activations using a forward hook. Args: - module. a nn.Module for which we quantize the *post-activations* - p: proportion of activations to quantize, set by default to 1 - update_step: to recompute quantization parameters - bits: number of bits for quantization - method: choose among {"tensor", "histogram", "channel"} - clamp_threshold: to prevent gradients overflow Remarks: - Parameters scale and zero_point are recomputed every update_step forward pass to reduce the overhead - For the list of quantization methods and number of bits, see ops.py - To remove the hook from the module, simply call self.handle.remove() - At test time, the activations are fully quantized - We use the straight-through estimator so that the gradients back-propagate nicely in the network, this is implemented with the detach() trick - The activations are hard-clamped in [-clamp_threshold, clamp_threshold] to prevent overflow during the backward pass """ def __init__( self, module, p=1, update_step=1000, bits=8, method="histogram", clamp_threshold=5, ): self.module = module self.p = p self.update_step = update_step self.counter = 0 self.bits = bits self.method = method self.clamp_threshold = clamp_threshold self.handle = None self.register_hook() def register_hook(self): # forward hook def quantize_hook(module, x, y): # update parameters every 1000 iterations if self.counter % self.update_step == 0: self.scale = None self.zero_point = None self.counter += 1 # train with QuantNoise and evaluate the fully quantized network p = self.p if self.module.training else 1 # quantize activations y_q, self.scale, self.zero_point = emulate_int( y.detach(), bits=self.bits, method=self.method, scale=self.scale, zero_point=self.zero_point, ) # mask to apply noise mask = torch.zeros_like(y) mask.bernoulli_(1 - p) noise = (y_q - y).masked_fill(mask.bool(), 0) # using straight-through estimator (STE) clamp_low = -self.scale * self.zero_point clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach() # register hook self.handle = self.module.register_forward_hook(quantize_hook)
3,079
33.606742
87
py
sign-topic
sign-topic-main/fairseq/modules/lightconv_layer/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name="lightconv_layer", ext_modules=[ CUDAExtension( "lightconv_cuda", [ "lightconv_cuda.cpp", "lightconv_cuda_kernel.cu", ], ), ], cmdclass={"build_ext": BuildExtension}, )
581
23.25
67
py
sign-topic
sign-topic-main/fairseq/modules/lightconv_layer/lightconv_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import lightconv_cuda import torch import torch.nn.functional as F from fairseq import utils from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.modules.fairseq_dropout import FairseqDropout from torch import nn from torch.autograd import Function class lightconvFunction(Function): @staticmethod def forward(ctx, x, weights, padding_l): ctx.padding_l = padding_l outputs = lightconv_cuda.forward(x, weights, padding_l) variables = [x, weights] ctx.save_for_backward(*variables) return outputs[0] @staticmethod def backward(ctx, grad_output): outputs = lightconv_cuda.backward( grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors ) grad_input, grad_weights = outputs return grad_input, grad_weights, None @with_incremental_state class LightconvLayer(nn.Module): def __init__( self, input_size, kernel_size=1, padding_l=None, weight_softmax=False, num_heads=1, weight_dropout=0.0, bias=False, ): super(LightconvLayer, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.padding_l = padding_l self.num_heads = num_heads self.weight_softmax = weight_softmax self.weight_dropout_module = FairseqDropout( weight_dropout, module_name=self.__class__.__name__ ) self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(input_size)) else: self.bias = None self.reset_parameters() def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" for k, v in state_dict.items(): if k.endswith(prefix + "weight"): if v.dim() == 3 and v.size(1) == 1: state_dict[k] = v.squeeze(1) def reset_parameters(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.constant_(self.bias, 0.0) def forward(self, x, incremental_state=None): # during inference time, incremental BMM is faster if incremental_state is not None: T, B, C = x.size() K, H = self.kernel_size, self.num_heads R = C // H input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = x.new() x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) if self.kernel_size > 1: self._set_input_buffer( incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] ) x_unfold = x_unfold.view(T * B * H, R, -1) weight = self.weight if self.weight_softmax: weight = F.softmax(weight.float(), dim=1).type_as(weight) weight = weight[:, -x_unfold.size(2) :] K = weight.size(1) weight = ( weight.view(1, H, K) .expand(T * B, H, K) .contiguous() .view(T * B * H, K, 1) ) weight = self.weight_dropout_module(weight) output = torch.bmm(x_unfold, weight) # T*B*H x R x 1 output = output.view(T, B, C) return output # during training time, use CUDA kernel else: x = x.permute(1, 2, 0).contiguous() weight = self.weight if self.weight_softmax: weight = F.softmax(self.weight, -1) if self.weight_dropout_module.p: weight = self.weight_dropout_module(weight) return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, "input_buffer") def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state( self, incremental_state, "input_buffer", new_buffer ) def half(self): return self._apply(lambda t: t.half() if t.is_floating_point() else t)
4,799
33.782609
86
py
sign-topic
sign-topic-main/fairseq/dataclass/constants.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import Enum, EnumMeta from typing import List class StrEnumMeta(EnumMeta): # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see # https://github.com/facebookresearch/hydra/issues/1156 @classmethod def __instancecheck__(cls, other): return "enum" in str(type(other)) class StrEnum(Enum, metaclass=StrEnumMeta): def __str__(self): return self.value def __eq__(self, other: str): return self.value == other def __repr__(self): return self.value def __hash__(self): return hash(str(self)) def ChoiceEnum(choices: List[str]): """return the Enum class used to enforce list of choices""" return StrEnum("Choices", {k: k for k in choices}) LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) DDP_BACKEND_CHOICES = ChoiceEnum( [ "c10d", # alias for pytorch_ddp "fully_sharded", # FullyShardedDataParallel from fairscale "legacy_ddp", "no_c10d", # alias for legacy_ddp "pytorch_ddp", "slowmo", ] ) DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"]) DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( ["unigram", "ensemble", "vote", "dp", "bs"] ) ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
1,787
30.368421
107
py