repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/moses_tokenizer.py | fairseq/data/encoders/moses_tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_tokenizer
@register_tokenizer('moses')
class MosesTokenizer(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--moses-source-lang', metavar='SRC',
help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET',
help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False,
help='don\'t apply dash split rules')
parser.add_argument('--moses-no-escape', action='store_true', default=False,
help='don\'t perform HTML escaping on apostrophy, quotes, etc.')
# fmt: on
def __init__(self, args):
self.args = args
if getattr(args, 'moses_source_lang', None) is None:
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if getattr(args, 'moses_target_lang', None) is None:
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(
x,
aggressive_dash_splits=(not self.args.moses_no_dash_splits),
return_str=True,
escape=(not self.args.moses_no_escape),
)
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/sentencepiece_bpe.py | fairseq/data/encoders/sentencepiece_bpe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import file_utils
from fairseq.data.encoders import register_bpe
@register_bpe('sentencepiece')
class SentencepieceBPE(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--sentencepiece-model', type=str,
help='path to sentencepiece model')
# fmt: on
def __init__(self, args):
sentencepiece_model = file_utils.cached_path(args.sentencepiece_model)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(sentencepiece_model)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
return ' '.join(self.sp.EncodeAsPieces(x))
def decode(self, x: str) -> str:
return x.replace(' ', '').replace('\u2581', ' ').strip()
def is_beginning_of_word(self, x: str) -> bool:
if x in ['<unk>', '<s>', '</s>', '<pad>']:
# special elements are always considered beginnings
# HACK: this logic is already present in fairseq/tasks/masked_lm.py
# but these special tokens are also contained in the sentencepiece
# vocabulary which causes duplicate special tokens. This hack makes
# sure that they are all taken into account.
return True
return x.startswith('\u2581')
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/nltk_tokenizer.py | fairseq/data/encoders/nltk_tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_tokenizer
@register_tokenizer('nltk')
class NLTKTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError('Please install nltk with: pip install nltk')
def encode(self, x: str) -> str:
return ' '.join(self.word_tokenize(x))
def decode(self, x: str) -> str:
return x
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/fastbpe.py | fairseq/data/encoders/fastbpe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import file_utils
from fairseq.data.encoders import register_bpe
@register_bpe('fastbpe')
class fastBPE(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--bpe-codes', type=str,
help='path to fastBPE BPE')
# fmt: on
def __init__(self, args):
if args.bpe_codes is None:
raise ValueError('--bpe-codes is required for --bpe=fastbpe')
codes = file_utils.cached_path(args.bpe_codes)
try:
import fastBPE
self.bpe = fastBPE.fastBPE(codes)
self.bpe_symbol = "@@ "
except ImportError:
raise ImportError('Please install fastBPE with: pip install fastBPE')
def encode(self, x: str) -> str:
return self.bpe.apply([x])[0]
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/utils.py | fairseq/data/encoders/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import encoders
def get_whole_word_mask(args, dictionary):
bpe = encoders.build_bpe(args)
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(dictionary)))
))
return mask_whole_words
return None
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/space_tokenizer.py | fairseq/data/encoders/space_tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from fairseq.data.encoders import register_tokenizer
@register_tokenizer('space')
class SpaceTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
self.space_tok = re.compile(r"\s+")
def encode(self, x: str) -> str:
return self.space_tok.sub(' ', x)
def decode(self, x: str) -> str:
return x
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/bytes.py | fairseq/data/encoders/bytes.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_bpe
from fairseq.data.encoders.byte_utils import (byte_encode, smart_byte_decode,
SPACE, SPACE_ESCAPE)
@register_bpe('bytes')
class Bytes(object):
def __init__(self, args):
pass
@staticmethod
def add_args(parser):
pass
@staticmethod
def encode(x: str) -> str:
encoded = byte_encode(x)
escaped = encoded.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
@staticmethod
def decode(x: str) -> str:
unescaped = x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/__init__.py | fairseq/data/encoders/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY = registry.setup_registry(
'--tokenizer',
default=None,
)
build_bpe, register_bpe, BPE_REGISTRY = registry.setup_registry(
'--bpe',
default=None,
)
# automatically import any Python files in the encoders/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.data.encoders.' + module)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/characters.py | fairseq/data/encoders/characters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.encoders import register_bpe
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
@register_bpe('characters')
class Characters(object):
def __init__(self, args):
pass
@staticmethod
def add_args(parser):
pass
@staticmethod
def encode(x: str) -> str:
escaped = x.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
@staticmethod
def decode(x: str) -> str:
return x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/byte_utils.py | fairseq/data/encoders/byte_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
WHITESPACE_NORMALIZER = re.compile(r'\s+')
SPACE = chr(32)
SPACE_ESCAPE = chr(9601)
# excluding non-breaking space (160) here
PRINTABLE_LATIN = set(
list(range(32, 126 + 1)) + list(range(161, 172 + 1)) +
list(range(174, 255 + 1))
)
BYTE_TO_BCHAR = {
b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256)
}
BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()}
def byte_encode(x: str) -> str:
normalized = WHITESPACE_NORMALIZER.sub(SPACE, x)
return ''.join([BYTE_TO_BCHAR[b] for b in normalized.encode('utf-8')])
def byte_decode(x: str) -> str:
try:
return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode('utf-8')
except ValueError:
return ''
def smart_byte_decode(x: str) -> str:
output = byte_decode(x)
if output == '':
# DP the best recovery (max valid chars) if it's broken
n_bytes = len(x)
f = [0 for _ in range(n_bytes + 1)]
pt = [0 for _ in range(n_bytes + 1)]
for i in range(1, n_bytes + 1):
f[i], pt[i] = f[i - 1], i - 1
for j in range(1, min(4, i) + 1):
if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j: i])) > 0:
f[i], pt[i] = f[i - j] + 1, i - j
cur_pt = n_bytes
while cur_pt > 0:
if f[cur_pt] == f[pt[cur_pt]] + 1:
output = byte_decode(x[pt[cur_pt]: cur_pt]) + output
cur_pt = pt[cur_pt]
return output
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/encoders/byte_bpe.py | fairseq/data/encoders/byte_bpe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import file_utils
from fairseq.data.encoders import register_bpe
from fairseq.data.encoders.byte_utils import (byte_encode, smart_byte_decode,
SPACE, SPACE_ESCAPE)
@register_bpe('byte_bpe')
class ByteBPE(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--sentencepiece-model-path', type=str,
help='path to sentencepiece model')
# fmt: on
def __init__(self, args):
vocab = file_utils.cached_path(args.sentencepiece_model_path)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(vocab)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
byte_encoded = byte_encode(x)
return SPACE.join(self.sp.EncodeAsPieces(byte_encoded))
@staticmethod
def decode(x: str) -> str:
unescaped = x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/audio/raw_audio_dataset.py | fairseq/data/audio/raw_audio_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import logging
import numpy as np
import sys
import torch
import torch.nn.functional as F
from .. import FairseqDataset
logger = logging.getLogger(__name__)
class RawAudioDataset(FairseqDataset):
def __init__(
self,
sample_rate,
max_sample_size=None,
min_sample_size=None,
shuffle=True,
min_length=0,
pad=False,
normalize=False,
):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.min_sample_size = min_sample_size
self.min_length = min_length
self.pad = pad
self.shuffle = shuffle
self.normalize = normalize
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
if feats.dim() == 2:
feats = feats.mean(-1)
if curr_sample_rate != self.sample_rate:
raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}")
assert feats.dim() == 1, feats.dim()
if self.normalize:
with torch.no_grad():
feats = F.layer_norm(feats, feats.shape)
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end]
def collater(self, samples):
samples = [
s
for s in samples
if s["source"] is not None
]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
if self.pad:
input["padding_mask"] = padding_mask
return {"id": torch.LongTensor([s["id"] for s in samples]), "net_input": input}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
if self.pad:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
class FileAudioDataset(RawAudioDataset):
def __init__(
self,
manifest_path,
sample_rate,
max_sample_size=None,
min_sample_size=None,
shuffle=True,
min_length=0,
pad=False,
normalize=False,
):
super().__init__(
sample_rate=sample_rate,
max_sample_size=max_sample_size,
min_sample_size=min_sample_size,
shuffle=shuffle,
min_length=min_length,
pad=pad,
normalize=normalize,
)
self.fnames = []
skipped = 0
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for line in f:
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_length is not None and sz < min_length:
skipped += 1
continue
self.fnames.append(items[0])
self.sizes.append(sz)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
wav, curr_sample_rate = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {"id": index, "source": feats}
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/audio/__init__.py | fairseq/data/audio/__init__.py | python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false | |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/legacy/masked_lm_dictionary.py | fairseq/data/legacy/masked_lm_dictionary.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data import Dictionary
class MaskedLMDictionary(Dictionary):
"""
Dictionary for Masked Language Modelling tasks. This extends Dictionary by
adding the mask symbol.
"""
def __init__(
self,
pad='<pad>',
eos='</s>',
unk='<unk>',
mask='<mask>',
):
super().__init__(pad=pad, eos=eos, unk=unk)
self.mask_word = mask
self.mask_index = self.add_symbol(mask)
self.nspecial = len(self.symbols)
def mask(self):
"""Helper to get index of mask symbol"""
return self.mask_index
class BertDictionary(MaskedLMDictionary):
"""
Dictionary for BERT task. This extends MaskedLMDictionary by adding support
for cls and sep symbols.
"""
def __init__(
self,
pad='<pad>',
eos='</s>',
unk='<unk>',
mask='<mask>',
cls='<cls>',
sep='<sep>'
):
super().__init__(pad=pad, eos=eos, unk=unk, mask=mask)
self.cls_word = cls
self.sep_word = sep
self.cls_index = self.add_symbol(cls)
self.sep_index = self.add_symbol(sep)
self.nspecial = len(self.symbols)
def cls(self):
"""Helper to get index of cls symbol"""
return self.cls_index
def sep(self):
"""Helper to get index of sep symbol"""
return self.sep_index
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/legacy/block_pair_dataset.py | fairseq/data/legacy/block_pair_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset
class BlockPairDataset(FairseqDataset):
"""Break a Dataset of tokens into sentence pair blocks for next sentence
prediction as well as masked language model.
High-level logics are:
1. break input tensor to tensor blocks
2. pair the blocks with 50% next sentence and 50% random sentence
3. return paired blocks as well as related segment labels
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes: array of sentence lengths
dictionary: dictionary for the task
block_size: maximum block size
break_mode: mode for breaking copurs into block pairs. currently we support
2 modes
doc: respect document boundaries and each part of the pair should belong to on document
none: don't respect any boundary and cut tokens evenly
short_seq_prob: probability for generating shorter block pairs
doc_break_size: Size for empty line separating documents. Typically 1 if
the sentences have eos, 0 otherwise.
"""
def __init__(
self,
dataset,
dictionary,
sizes,
block_size,
break_mode="doc",
short_seq_prob=0.1,
doc_break_size=1,
):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert len(dataset) == len(sizes)
if break_mode == "doc":
cur_doc = []
for sent_id, sz in enumerate(sizes):
assert doc_break_size == 0 or sz != 0, (
"when doc_break_size is non-zero, we expect documents to be"
"separated by a blank line with a single eos."
)
# empty line as document separator
if sz == doc_break_size:
if len(cur_doc) == 0:
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP]
self.sent_pairs = []
self.sizes = []
for doc_id, doc in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif break_mode is None or break_mode == "none":
# each block should have half of the block size since we are constructing block pair
sent_length = (block_size - 3) // 2
total_len = sum(dataset.sizes)
length = math.ceil(total_len / sent_length)
def block_at(i):
start = i * sent_length
end = min(start + sent_length, total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([e - s for s, e in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
# pair sentences
self._pair_sentences(dataset_index)
else:
raise ValueError("Invalid break_mode: " + break_mode)
def _pair_sentences(self, dataset_index):
"""
Give a list of evenly cut blocks/sentences, pair these sentences with 50%
consecutive sentences and 50% random sentences.
This is used for none break mode
"""
# pair sentences
for sent_id, sent in enumerate(dataset_index):
next_sent_label = (
1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0
)
if next_sent_label:
next_sent = dataset_index[sent_id + 1]
else:
next_sent = dataset_index[
self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1])
]
self.sent_pairs.append((sent, next_sent, next_sent_label))
# The current blocks don't include the special tokens but the
# sizes already account for this
self.sizes.append(3 + sent[3] + next_sent[3])
def _sent_to_dataset_index(self, sent_sizes):
"""
Build index mapping block indices to the underlying dataset indices
"""
dataset_index = []
ds_idx, ds_remaining = -1, 0
for to_consume in sent_sizes:
sent_size = to_consume
if ds_remaining == 0:
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = sent_sizes[ds_idx] - ds_remaining
while to_consume > ds_remaining:
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append(
(
start_ds_idx, # starting index in dataset
start_offset, # starting offset within starting index
ds_idx, # ending index in dataset
sent_size, # sentence length
)
)
assert ds_remaining == 0
assert ds_idx == len(self.dataset) - 1
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
"""
Go through a single document and genrate sentence paris from it
"""
current_chunk = []
current_length = 0
curr = 0
# To provide more randomness, we decrease target seq length for parts of
# samples (10% by default). Note that max_num_tokens is the hard threshold
# for batching and will never be changed.
target_seq_length = max_num_tokens
if np.random.random() < self.short_seq_prob:
target_seq_length = np.random.randint(2, max_num_tokens)
# loop through all sentences in document
while curr < len(doc):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
# split chunk and generate pair when exceed target_seq_length or
# finish the loop
if curr == len(doc) - 1 or current_length >= target_seq_length:
# split the chunk into 2 parts
a_end = 1
if len(current_chunk) > 2:
a_end = np.random.randint(1, len(current_chunk) - 1)
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
# generate next sentence label, note that if there is only 1 sentence
# in current chunk, label is always 0
next_sent_label = (
1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0
)
if not next_sent_label:
# if next sentence label is 0, sample sent_b from a random doc
target_b_length = target_seq_length - len_a
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if len_b >= target_b_length:
break
# return the second part of the chunk since it's not used
num_unused_segments = len(current_chunk) - a_end
curr -= num_unused_segments
else:
# if next sentence label is 1, use the second part of chunk as sent_B
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
# currently sent_a and sent_B may be longer than max_num_tokens,
# truncate them and return block idx and offsets for them
sent_a, sent_b = self._truncate_sentences(
sent_a, sent_b, max_num_tokens
)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(3 + sent_a[3] + sent_b[3])
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
"""
Generate a random integer which is not in skip_ids. Sample range is [0, total)
TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later
"""
rand_id = np.random.randint(total - len(skip_ids))
return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids)
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
"""
Trancate a pair of sentence to limit total length under max_num_tokens
Logics:
1. Truncate longer sentence
2. Tokens to be truncated could be at the beginning or the end of the sentnce
Returns:
Truncated sentences represented by dataset idx
"""
len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b])
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (
len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b
)
if total_length <= max_num_tokens:
break
if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b:
if np.random.rand() < 0.5:
front_cut_a += 1
else:
end_cut_a += 1
else:
if np.random.rand() < 0.5:
front_cut_b += 1
else:
end_cut_b += 1
# calculate ds indices as well as offsets and return
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return truncated_sent_a, truncated_sent_b
def _cut_sentence(self, sent, front_cut, end_cut):
"""
Cut a sentence based on the numbers of tokens to be cut from beginning and end
Represent the sentence as dataset idx and return
"""
start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0
target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut
while front_cut > 0:
if self.dataset.sizes[start_ds_idx] > front_cut:
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while end_cut > 0:
if self.dataset.sizes[end_ds_idx] > end_cut:
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return start_ds_idx, offset, end_ds_idx, target_len
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
"""
Fetch a block of tokens based on its dataset idx
"""
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
s, e = offset, offset + length
return buffer[s:e]
def __getitem__(self, index):
block1, block2, next_sent_label = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return block1, block2, next_sent_label
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for block1, block2, _ in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], block1[2] + 1):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], block2[2] + 1):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/legacy/masked_lm_dataset.py | fairseq/data/legacy/masked_lm_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from typing import Dict, List, Tuple
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import Dictionary
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.token_block_dataset import TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
class MaskedLMDataset(FairseqDataset):
"""
A wrapper Dataset for masked language modelling. The dataset
wraps around TokenBlockDataset or BlockedPairDataset and creates a batch
where the input blocks are masked according to the specified masking
probability. Additionally the batch can also contain sentence level targets
if this is specified.
Args:
dataset: Dataset which generates blocks of data. Only BlockPairDataset
and TokenBlockDataset are supported.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of padding token in dictionary
mask_idx: Id of mask token in dictionary
classif_token_idx: Id of classification token in dictionary. This is the
token associated with the sentence embedding (Eg: CLS for BERT)
sep_token_idx: Id of separator token in dictionary
(Eg: SEP in BERT)
seed: Seed for random number generator for reproducibility.
shuffle: Shuffle the elements before batching.
has_pairs: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. The default value assumes a single block with no
sentence target.
segment_id: An optional segment id for filling in the segment labels
when we are in the single block setting (Eg: XLM). Default is 0.
masking_ratio: specifies what percentage of the blocks should be masked.
masking_prob: specifies the probability of a given token being
replaced with the "MASK" token.
random_token_prob: specifies the probability of a given token being
replaced by a random token from the vocabulary.
"""
def __init__(
self,
dataset: FairseqDataset,
sizes: np.ndarray,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
classif_token_idx: int,
sep_token_idx: int,
seed: int = 1,
shuffle: bool = True,
has_pairs: bool = True,
segment_id: int = 0,
masking_ratio: float = 0.15,
masking_prob: float = 0.8,
random_token_prob: float = 0.1
):
# Make sure the input datasets are the ones supported
assert (
isinstance(dataset, TokenBlockDataset) or
isinstance(dataset, BlockPairDataset) or
isinstance(dataset, ConcatDataset)
), "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " \
"ConcatDataset"
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.classif_token_idx = classif_token_idx
self.sep_token_idx = sep_token_idx
self.shuffle = shuffle
self.seed = seed
self.has_pairs = has_pairs
self.segment_id = segment_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.random_token_prob = random_token_prob
# If we have only one block then sizes needs to be updated to include
# the classification token
if not has_pairs:
self.sizes = self.sizes + 1
def __getitem__(
self,
index: int
):
# if has_pairs, then expect 2 blocks and a sentence target
if self.has_pairs:
(block_one, block_two, sentence_target) = self.dataset[index]
else:
block_one = self.dataset[index]
return {
"id": index,
"block_one": block_one,
"block_two": block_two if self.has_pairs else None,
"sentence_target": sentence_target if self.has_pairs else None,
}
def __len__(self):
return len(self.dataset)
def _mask_block(
self,
sentence: np.ndarray,
mask_idx: int,
pad_idx: int,
dictionary_token_range: Tuple,
):
"""
Mask tokens for Masked Language Model training
Samples mask_ratio tokens that will be predicted by LM.
Note:This function may not be efficient enough since we had multiple
conversions between np and torch, we can replace them with torch
operators later.
Args:
sentence: 1d tensor to be masked
mask_idx: index to use for masking the sentence
pad_idx: index to use for masking the target for tokens we aren't
predicting
dictionary_token_range: range of indices in dictionary which can
be used for random word replacement
(e.g. without special characters)
Return:
masked_sent: masked sentence
target: target with words which we are not predicting replaced
by pad_idx
"""
masked_sent = np.copy(sentence)
sent_length = len(sentence)
mask_num = math.ceil(sent_length * self.masking_ratio)
mask = np.random.choice(sent_length, mask_num, replace=False)
target = np.copy(sentence)
for i in range(sent_length):
if i in mask:
rand = np.random.random()
# replace with mask if probability is less than masking_prob
# (Eg: 0.8)
if rand < self.masking_prob:
masked_sent[i] = mask_idx
# replace with random token if probability is less than
# masking_prob + random_token_prob (Eg: 0.9)
elif rand < (self.masking_prob + self.random_token_prob):
# sample random token from dictionary
masked_sent[i] = (
np.random.randint(
dictionary_token_range[0], dictionary_token_range[1]
)
)
else:
target[i] = pad_idx
return masked_sent, target
def _collate(
self,
samples: List[Dict],
pad_idx: int,
eos_idx: int
):
"""
Does the heavy lifting for creating a batch from the input list of
examples. The logic is as follows:
1. Mask the input blocks. In case has_pair is True then we have 2
blocks to mask.
2. Prepend the first masked block tensor with the special token
used as sentence embedding. Eg: CLS in BERT. This happens
irrespective of the value of has_pair.
3. If has_pair is True, then append the first masked block with the
special separator token (eg: SEP for BERT) and compute segment
label accordingly. In this case, also append the second masked
block with this special separator token and compute its segment
label.
4. For the targets tensor, prepend and append with padding index
accordingly.
5. Concatenate all tensors.
"""
if len(samples) == 0:
return {}
# To ensure determinism, we reset the state of the PRNG after every
# batch based on the seed and the first id of the batch. This ensures
# that across epochs we get the same mask for the same example. This
# is needed for reproducibility and is how BERT does masking
# TODO: Can we add deteminism without this constraint?
with data_utils.numpy_seed(self.seed + samples[0]["id"]):
for s in samples:
# token range is needed for replacing with random token during
# masking
token_range = (self.vocab.nspecial, len(self.vocab))
# mask according to specified probabilities.
masked_blk_one, masked_tgt_one = self._mask_block(
s["block_one"], self.mask_idx, self.pad_idx, token_range,
)
tokens = np.concatenate([
[self.classif_token_idx], masked_blk_one
])
targets = np.concatenate([[self.pad_idx], masked_tgt_one])
segments = np.ones(len(tokens)) * self.segment_id
# if has_pairs is True then we need to add the SEP token to both
# the blocks after masking and re-compute segments based on the new
# lengths.
if self.has_pairs:
tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
targets_one = np.concatenate([targets, [self.pad_idx]])
masked_blk_two, masked_tgt_two = self._mask_block(
s["block_two"], self.mask_idx, self.pad_idx, token_range)
tokens_two = np.concatenate(
[masked_blk_two, [self.sep_token_idx]])
targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
# block + 1 sep + 1 special (CLS)
segments_one = np.zeros(len(tokens_one))
# block + 1 sep
segments_two = np.ones(len(tokens_two))
tokens = np.concatenate([tokens_one, tokens_two])
targets = np.concatenate([targets_one, targets_two])
segments = np.concatenate([segments_one, segments_two])
s["source"] = torch.LongTensor(tokens)
s["segment_labels"] = torch.LongTensor(segments)
s["lm_target"] = torch.LongTensor(targets)
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False
)
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": merge("source"),
"segment_labels": merge("segment_labels"),
},
"lm_target": merge("lm_target"),
"sentence_target": torch.LongTensor(
[s["sentence_target"] for s in samples]
) if self.has_pairs else None,
"nsentences": len(samples),
}
def collater(
self,
samples: List[Dict]
):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return self._collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(
self,
index: int
):
"""
Return the number of tokens in a sample. This value is used to
enforce max-tokens during batching.
"""
return self.sizes[index]
def size(
self,
index: int
):
"""
Return an example's size as a float or tuple. This value is used when
filtering a dataset with max-positions.
"""
return self.sizes[index]
def ordered_indices(self):
"""
Return an ordered list of indices. Batches will be constructed based
on this order.
"""
if self.shuffle:
return np.random.permutation(len(self))
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/data/legacy/__init__.py | fairseq/data/legacy/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary
from .block_pair_dataset import BlockPairDataset
from .masked_lm_dataset import MaskedLMDataset
__all__ = [
'BertDictionary',
'BlockPairDataset',
'MaskedLMDataset',
'MaskedLMDictionary',
]
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/scoring/__init__.py | fairseq/scoring/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
_build_scoring, register_scoring, SCORING_REGISTRY = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(args, tgt_dict):
from fairseq import utils
if args.sacrebleu:
utils.deprecation_warning(
"--sacrebleu is deprecated. Please use --scoring sacrebleu instead."
)
args.scoring = "sacrebleu"
if args.scoring == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
else:
return _build_scoring(args)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/scoring/wer.py | fairseq/scoring/wer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.scoring import register_scoring
@register_scoring("wer")
class WerScorer(object):
def __init__(self, *unused):
self.reset()
def reset(self):
self.distance = 0
self.ref_length = 0
def add_string(self, ref, pred):
import editdistance
ref_items = ref.split()
pred_items = pred.split()
self.distance += editdistance.eval(ref_items, pred_items)
self.ref_length += len(ref_items)
def result_string(self):
return f"WER: {self.score()}"
def score(self):
return (
100.0 * self.distance / self.ref_length if self.ref_length > 0 else 0
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/scoring/bleu.py | fairseq/scoring/bleu.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import sys
import torch
from fairseq.scoring import register_scoring
class BleuStat(ctypes.Structure):
_fields_ = [
("reflen", ctypes.c_size_t),
("predlen", ctypes.c_size_t),
("match1", ctypes.c_size_t),
("count1", ctypes.c_size_t),
("match2", ctypes.c_size_t),
("count2", ctypes.c_size_t),
("match3", ctypes.c_size_t),
("count3", ctypes.c_size_t),
("match4", ctypes.c_size_t),
("count4", ctypes.c_size_t),
]
@register_scoring("sacrebleu")
class SacrebleuScorer(object):
def __init__(self, *unused):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_bleu(self.sys, [self.ref]).format()
@register_scoring("bleu")
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
try:
from fairseq import libbleu
except ImportError as e:
sys.stderr.write("ERROR: missing libbleu.so. run `pip install --editable .`\n")
raise e
self.C = ctypes.cdll.LoadLibrary(libbleu.__file__)
self.reset()
def reset(self, one_init=False):
if one_init:
self.C.bleu_one_init(ctypes.byref(self.stat))
else:
self.C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
self.C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos),
)
def score(self, order=4):
psum = sum(
math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order]
)
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = "BLEU{} = {:2.2f}, {:2.1f}"
for _ in range(1, order):
fmt += "/{:2.1f}"
fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})"
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(
order,
self.score(order=order),
*bleup,
self.brevity(),
self.stat.predlen / self.stat.reflen,
self.stat.predlen,
self.stat.reflen
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/train.py | bert/train.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import itertools
import os
import math
import torch
import numpy as np
import scipy.stats
from fairseq import distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
def main(args):
if args.max_tokens is None:
args.max_tokens = 10240
print(args)
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load dataset splits
load_dataset_splits(task, ['train', 'valid'])
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
# Make a dummy batch to (i) warm the caching allocator and (ii) as a
# placeholder DistributedDataParallel when there's an uneven number of
# batches per worker.
max_positions = utils.resolve_max_positions(
task.max_positions(),
model.max_positions(),
)
dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
# Build trainer
trainer = Trainer(args, task, model, criterion, dummy_batch)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Initialize dataloader
epoch_itr = task.get_batch_iterator(
dataset=task.dataset(args.train_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
)
# Load bert model if one is available
if hasattr(args, 'load_bert') and args.load_bert:
print('| load bert model from {}'.format(args.load_bert))
load_bert_model(args, trainer)
# Load the latest checkpoint if one is available
if not load_checkpoint(args, trainer, epoch_itr):
trainer.dummy_train_step([dummy_batch])
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
# train for one epoch
if epoch_itr.epoch > 0:
epoch_itr_state = epoch_itr.state_dict()
epoch_itr = task.get_batch_iterator(
dataset=task.dataset(args.train_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed + epoch_itr.epoch,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
)
epoch_itr.load_state_dict(epoch_itr_state)
train(args, trainer, task, epoch_itr)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr()
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
num_batches = len(epoch_itr)
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size', 'x', 'y', 'tp', 'tn', 'fp', 'fn']:
continue # these are already logged above
if 'loss' in k:
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats)
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:
valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg)
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss').avg
stats['nll_loss'] = '{:.3f}'.format(nll_loss)
else:
nll_loss = trainer.get_meter('train_loss').avg
stats['ppl'] = get_perplexity(nll_loss)
stats['wps'] = round(trainer.get_meter('wps').avg)
stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg)
stats['wpb'] = round(trainer.get_meter('wpb').avg)
stats['bsz'] = round(trainer.get_meter('bsz').avg)
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg)
stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg)
stats['oom'] = trainer.get_meter('oom').avg
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg)
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = round(trainer.get_meter('train_wall').sum)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size', 'x', 'y', 'tp', 'tn', 'fp', 'fn']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats)
valid_losses.append(stats['valid_loss'])
return valid_losses
def get_valid_stats(trainer):
stats = collections.OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(save_checkpoint, 'best'):
stats['best'] = min(save_checkpoint.best, stats['valid_loss'])
if trainer.cache['valid_x'] and trainer.cache['valid_y']:
x = np.concatenate(trainer.cache['valid_x'])
y = np.concatenate(trainer.cache['valid_y'])
stats['pearson'] = scipy.stats.pearsonr(x, y)[0]
stats['spearman'] = scipy.stats.spearmanr(x, y)[0]
stats['acc'] = 0.5 * (stats['pearson'] + stats['spearman'])
trainer.cache['valid_x'], trainer.cache['valid_y'] = [], []
if trainer.cache['valid_tp'] and trainer.cache['valid_tn'] and trainer.cache['valid_fp'] and trainer.cache['valid_fn']:
tp = sum(trainer.cache['valid_tp'])
tn = sum(trainer.cache['valid_tn'])
fp = sum(trainer.cache['valid_fp'])
fn = sum(trainer.cache['valid_fn'])
tmp = 2 * tp + fp + fn
stats['f1'] = (2 * tp) / tmp if tmp else 0
tmp = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
stats['mcc'] = (tp * tn - fp * fn) / (tmp ** 0.5) if tmp else 0
trainer.cache['valid_tp'], trainer.cache['valid_tn'], trainer.cache['valid_fp'], trainer.cache['valid_fn'] = [], [], [], []
return stats
def get_perplexity(loss):
try:
return '{:.2f}'.format(math.exp(loss))
except OverflowError:
return float('inf')
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
end_of_epoch and not args.no_epoch_checkpoints and
epoch % args.save_interval == 0
)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
not end_of_epoch and args.save_interval_updates > 0 and
updates % args.save_interval_updates == 0
)
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'best': save_checkpoint.best,
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides))
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
return True
return False
def load_bert_model(args, trainer):
extra_state, _, _ = utils.load_bert_state(args.load_bert, trainer.get_model(), args)
def load_dataset_splits(task, splits):
for split in splits:
if split == 'train':
task.load_dataset(split, combine=True)
else:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
try:
task.load_dataset(split_k, combine=False)
except FileNotFoundError as e:
if k > 0:
break
raise e
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_port > 0 or args.distributed_init_method is not None:
from distributed_train import main as distributed_main
distributed_main(args)
elif args.distributed_world_size > 1:
from multiprocessing_train import main as multiprocessing_main
multiprocessing_main(args)
else:
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/inference.py | bert/inference.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Evaluate the perplexity of a trained language model.
"""
import os
import torch
import torch.nn.functional as F
from fairseq import options, progress_bar, tasks, utils
def main(parsed_args):
assert parsed_args.path is not None, '--path required for evaluation!'
print(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
print('| loading model(s) from {}'.format(parsed_args.path))
models, args = utils.load_ensemble_for_inference(parsed_args.path.split(':'), task)
args.__dict__.update(parsed_args.__dict__)
print(args)
task.args = args
# Load dataset splits
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
if use_cuda:
model.cuda()
if args.fp16:
model.half()
assert len(models) > 0
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens or 10000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
num_shards=args.num_shards,
shard_id=args.shard_id,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
with progress_bar.build_progress_bar(args, itr, no_progress_bar='simple') as progress:
with open(args.output, 'w', encoding='utf-8') as fo:
if args.criterion == 'mean_squared_error':
ans = torch.empty(len(task.dataset(args.gen_subset)), dtype=torch.float)
else:
ans = torch.empty(len(task.dataset(args.gen_subset)), dtype=torch.long)
with torch.no_grad():
for model in models:
model.eval()
for sample in progress:
if use_cuda:
sample = utils.move_to_cuda(sample)
if args.criterion.endswith('binary'):
probs = torch.stack([F.sigmoid(model(**sample['net_input']).view(-1))
for model in models], dim=0).mean(dim=0)
preds = torch.stack([sample['id'], (probs >= 0.5).long()], dim=1)
elif args.criterion == 'mean_squared_error':
probs = torch.stack([model(**sample['net_input']).view(-1)
for model in models], dim=0).mean(dim=0)
preds = list(zip(*[sample['id'], probs]))
else:
probs = torch.stack([model.get_normalized_probs(model(**sample['net_input']), log_probs=False)
for model in models], dim=0).mean(dim=0)
preds = torch.stack([sample['id'], probs.argmax(dim=-1)], dim=1)
for i, y in preds:
ans[i] = y
for y in ans:
fo.write(f"{y}\n")
if __name__ == '__main__':
parser = options.get_inference_parser()
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/setup.py | bert/setup.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3,):
sys.exit('Sorry, Python3 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
reqs = f.read()
bleu = Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=['-std=c++11'],
)
setup(
name='fairseq',
version='0.6.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
long_description=readme,
license=license,
install_requires=reqs.strip().split('\n'),
packages=find_packages(),
ext_modules=[bleu],
test_suite='tests',
)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/generate.py | bert/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate pre-processed data with a trained model.
"""
import torch
from fairseq import bleu, data, options, progress_bar, tasks, tokenizer, utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_generator import SequenceGenerator
from fairseq.sequence_scorer import SequenceScorer
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides))
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
if args.score_reference:
translator = SequenceScorer(models, task.target_dictionary)
else:
translator = SequenceGenerator(
models, task.target_dictionary, beam_size=args.beam, minlen=args.min_len,
stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen, unk_penalty=args.unkpen,
sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature,
diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength,
)
if use_cuda:
translator.cuda()
# Generate and compute BLEU score
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
with progress_bar.build_progress_bar(args, itr) as t:
if args.score_reference:
translations = translator.score_batched_itr(t, cuda=use_cuda, timer=gen_timer)
else:
translations = translator.generate_batched_itr(
t, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b,
cuda=use_cuda, timer=gen_timer, prefix_size=args.prefix_size,
)
wps_meter = TimeMeter()
for sample_id, src_tokens, target_tokens, hypos in translations:
# Process input and ground truth
has_target = target_tokens is not None
target_tokens = target_tokens.int().cpu() if has_target else None
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
src_str = src_dict.string(src_tokens, args.remove_bpe)
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if not args.quiet:
print('S-{}\t{}'.format(sample_id, src_str))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
# Process top predictions
for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
if not args.quiet:
print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
# Score only the top hypothesis
if has_target and i == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tokenizer.Tokenizer.tokenize(
target_str, tgt_dict, add_if_not_exist=True)
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(src_tokens.size(0))
t.log({'wps': round(wps_meter.avg)})
num_sentences += 1
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
if __name__ == '__main__':
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/interactive.py | bert/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import numpy as np
import sys
import torch
from fairseq import data, options, tasks, tokenizer, utils
from fairseq.sequence_generator import SequenceGenerator
Batch = namedtuple('Batch', 'srcs tokens lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def buffered_read(buffer_size):
buffer = []
for src_str in sys.stdin:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions):
tokens = [
tokenizer.Tokenizer.tokenize(src_str, task.source_dictionary, add_if_not_exist=False).long()
for src_str in lines
]
lengths = np.array([t.numel() for t in tokens])
itr = task.get_batch_iterator(
dataset=data.LanguagePairDataset(tokens, lengths, task.source_dictionary),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
srcs=[lines[i] for i in batch['id']],
tokens=batch['net_input']['src_tokens'],
lengths=batch['net_input']['src_lengths'],
), batch['id']
def main(args):
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
model_paths = args.path.split(':')
models, model_args = utils.load_ensemble_for_inference(model_paths, task, model_arg_overrides=eval(args.model_overrides))
# Set dictionaries
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
# Initialize generator
translator = SequenceGenerator(
models, tgt_dict, beam_size=args.beam, minlen=args.min_len,
stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen, unk_penalty=args.unkpen,
sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature,
diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength,
)
if use_cuda:
translator.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
def make_result(src_str, hypos):
result = Translation(
src_str='O\t{}'.format(src_str),
hypos=[],
pos_scores=[],
alignments=[],
)
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str))
result.pos_scores.append('P\t{}'.format(
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
result.alignments.append(
'A\t{}'.format(' '.join(map(lambda x: str(utils.item(x)), alignment)))
if args.print_alignment else None
)
return result
def process_batch(batch):
tokens = batch.tokens
lengths = batch.lengths
if use_cuda:
tokens = tokens.cuda()
lengths = lengths.cuda()
encoder_input = {'src_tokens': tokens, 'src_lengths': lengths}
translations = translator.generate(
encoder_input,
maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
)
return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.buffer_size > 1:
print('| Sentence buffer size:', args.buffer_size)
print('| Type the input sentence and press return:')
for inputs in buffered_read(args.buffer_size):
indices = []
results = []
for batch, batch_indices in make_batches(inputs, args, task, max_positions):
indices.extend(batch_indices)
results += process_batch(batch)
for i in np.argsort(indices):
result = results[i]
print(result.src_str)
for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments):
print(hypo)
print(pos_scores)
if align is not None:
print(align)
if __name__ == '__main__':
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/score.py | bert/score.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq import bleu, tokenizer
from fairseq.data import dictionary
def get_parser():
parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == '-' or os.path.exists(args.sys), \
"System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), \
"Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
yield line
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = tokenizer.Tokenizer.tokenize(sys_tok, dict)
ref_tok = tokenizer.Tokenizer.tokenize(ref_tok, dict)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == '-':
score(sys.stdin)
else:
with open(args.sys, 'r') as f:
score(f)
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/preprocess_bert.py | bert/preprocess_bert.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
from collections import Counter
from itertools import zip_longest
import os
import shutil
from fairseq.data import indexed_dataset, dictionary
from fairseq.tokenizer import Tokenizer, tokenize_line
from multiprocessing import Pool, Manager, Process
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
parser.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix')
parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes')
parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes')
parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')
parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')
parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
help='output format (optional)')
parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
parser.add_argument('--only-source', action='store_true', help='Only process the source language')
parser.add_argument('--padding-factor', metavar='N', default=8, type=int,
help='Pad dictionary size to be multiple of N')
parser.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers')
return parser
def main(args):
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = not args.only_source
def build_dictionary(filenames):
d = dictionary.BertDictionary()
for filename in filenames:
Tokenizer.add_file_to_dictionary(filename, d, tokenize_line, args.workers)
return d
def train_path(lang):
return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '')
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += f'.{lang}'
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path('dict', lang) + '.txt'
if args.joined_dictionary:
assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'
assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'
src_dict = build_dictionary(set([
train_path(lang)
for lang in [args.source_lang, args.target_lang]
]))
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = dictionary.BertDictionary.load(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)])
if target:
if args.tgtdict:
tgt_dict = dictionary.BertDictionary.load(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)])
src_dict.finalize(
threshold=args.thresholdsrc,
nwords=args.nwordssrc,
padding_factor=args.padding_factor,
)
src_dict.save(dict_path(args.source_lang))
if target:
if not args.joined_dictionary:
tgt_dict.finalize(
threshold=args.thresholdtgt,
nwords=args.nwordstgt,
padding_factor=args.padding_factor,
)
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(input_prefix, output_prefix, lang, num_workers):
dict = dictionary.BertDictionary.load(dict_path(lang))
print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result['replaced'])
n_seq_tok[0] += worker_result['nseq']
n_seq_tok[1] += worker_result['ntok']
input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '')
offsets = Tokenizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers-1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(binarize, (args, input_file, dict, prefix, lang,
offsets[worker_id],
offsets[worker_id + 1]), callback=merge_result)
pool.close()
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin'))
merge_result(Tokenizer.binarize(input_file, dict, lambda t: ds.add_item(t),
offset=0, end=offsets[1]))
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
lang, input_file, n_seq_tok[0], n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1], dict.unk_word))
def make_dataset(input_prefix, output_prefix, lang, num_workers=1):
if args.output_format == 'binary':
make_binary_dataset(input_prefix, output_prefix, lang, num_workers)
elif args.output_format == 'raw':
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
def make_all(lang):
if args.trainpref:
make_dataset(args.trainpref, 'train', lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(',')):
outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
make_dataset(validpref, outprefix, lang)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(',')):
outprefix = 'test{}'.format(k) if k > 0 else 'test'
make_dataset(testpref, outprefix, lang)
make_all(args.source_lang)
if target:
make_all(args.target_lang)
print('| Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
src_dict = dictionary.BertDictionary.load(dict_path(args.source_lang))
tgt_dict = dictionary.BertDictionary.load(dict_path(args.target_lang))
freq_map = {}
with open(args.alignfile, 'r') as align_file:
with open(src_file_name, 'r') as src_file:
with open(tgt_file_name, 'r') as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split('-')), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
args.source_lang, args.target_lang)), 'w') as f:
for k, v in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, dict, output_prefix, lang, offset, end):
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin'))
def consumer(tensor):
ds.add_item(tensor)
res = Tokenizer.binarize(filename, dict, consumer, offset=offset, end=end)
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = f'{args.destdir}/{output_prefix}'
lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else ''
return f'{base}{lang_part}'
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return f'{base}.{extension}'
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/multiprocessing_train.py | bert/multiprocessing_train.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import random
import signal
import torch
from fairseq import distributed_utils, options
from train import main as single_process_main
def main(args):
# Set distributed training parameters for a single node.
args.distributed_world_size = torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_init_host = 'localhost'
args.distributed_port = port + 1
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(args.distributed_world_size):
args.distributed_rank = i
args.device_id = i
procs.append(mp.Process(target=run, args=(args, error_queue, ), daemon=True))
procs[i].start()
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, error_queue):
try:
args.distributed_rank = distributed_utils.distributed_init(args)
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.distributed_rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/eval_lm.py | bert/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Evaluate the perplexity of a trained language model.
"""
import numpy as np
import torch
from fairseq import data, options, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.count = 0
def add(self, log_prob):
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob / self.count, self.is_bpe)
def main(parsed_args):
assert parsed_args.path is not None, '--path required for evaluation!'
print(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
print('| loading model(s) from {}'.format(parsed_args.path))
models, args = utils.load_ensemble_for_inference(parsed_args.path.split(':'), task)
args.__dict__.update(parsed_args.__dict__)
print(args)
task.args = args
# Load dataset splits
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
if args.fp16:
model.half()
assert len(models) > 0
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
num_shards=args.num_shards,
shard_id=args.shard_id,
ignore_invalid_inputs=True,
).next_epoch_itr(shuffle=False)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(models, task.target_dictionary)
if use_cuda:
scorer.cuda()
score_sum = 0.
count = 0
if args.remove_bpe is not None:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = set(i for i in range(len(task.dictionary)) if task.dictionary[i].endswith(bpe_cont))
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
with progress_bar.build_progress_bar(args, itr) as t:
results = scorer.score_batched_itr(t, cuda=use_cuda, timer=gen_timer)
wps_meter = TimeMeter()
for _, src_tokens, __, hypos in results:
for hypo in hypos:
pos_scores = hypo['positional_scores']
skipped_toks = 0
if bpe_toks is not None:
for i in range(len(hypo['tokens']) - 1):
if hypo['tokens'][i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
print('| Skipping tokens with inf scores:',
task.target_dictionary.string(hypo['tokens'][inf_scores.nonzero()]))
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += utils.item(pos_scores.sum())
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(hypo['tokens'])):
w_ind = hypo['tokens'][i].item()
w += task.dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item())
is_bpe = False
w = ''
if args.output_word_probs:
print('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
wps_meter.update(src_tokens.size(0))
t.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count
print('| Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, 1. / gen_timer.avg))
print('| Loss: {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, np.exp(avg_nll_loss)))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
print(ws)
if __name__ == '__main__':
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/distributed_train.py | bert/distributed_train.py | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import socket
import subprocess
from train import main as single_process_main
from fairseq import distributed_utils, options
def main(args):
if args.distributed_init_method is None and args.distributed_port > 0:
# We can determine the init method automatically for Slurm.
node_list = os.environ.get('SLURM_JOB_NODELIST')
if node_list is not None:
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(
host=hostnames.split()[0].decode('utf-8'),
port=args.distributed_port)
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError as e: # Slurm is not installed
pass
if args.distributed_init_method is None and args.distributed_port is None:
raise ValueError('--distributed-init-method or --distributed-port '
'must be specified for distributed training')
args.distributed_rank = distributed_utils.distributed_init(args)
print('| initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))
single_process_main(args)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/preprocess.py | bert/preprocess.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
from collections import Counter
from itertools import zip_longest
import os
import shutil
from fairseq.data import indexed_dataset, dictionary
from fairseq.tokenizer import Tokenizer, tokenize_line
from multiprocessing import Pool, Manager, Process
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
parser.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix')
parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes')
parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes')
parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')
parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')
parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
help='output format (optional)')
parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
parser.add_argument('--only-source', action='store_true', help='Only process the source language')
parser.add_argument('--padding-factor', metavar='N', default=8, type=int,
help='Pad dictionary size to be multiple of N')
parser.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers')
return parser
def main(args):
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = not args.only_source
def build_dictionary(filenames):
d = dictionary.Dictionary()
for filename in filenames:
Tokenizer.add_file_to_dictionary(filename, d, tokenize_line, args.workers)
return d
def train_path(lang):
return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '')
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += f'.{lang}'
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path('dict', lang) + '.txt'
if args.joined_dictionary:
assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'
assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'
src_dict = build_dictionary(set([
train_path(lang)
for lang in [args.source_lang, args.target_lang]
]))
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = dictionary.Dictionary.load(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)])
if target:
if args.tgtdict:
tgt_dict = dictionary.Dictionary.load(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)])
src_dict.finalize(
threshold=args.thresholdsrc,
nwords=args.nwordssrc,
padding_factor=args.padding_factor,
)
src_dict.save(dict_path(args.source_lang))
if target:
if not args.joined_dictionary:
tgt_dict.finalize(
threshold=args.thresholdtgt,
nwords=args.nwordstgt,
padding_factor=args.padding_factor,
)
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(input_prefix, output_prefix, lang, num_workers):
dict = dictionary.Dictionary.load(dict_path(lang))
print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result['replaced'])
n_seq_tok[0] += worker_result['nseq']
n_seq_tok[1] += worker_result['ntok']
input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '')
offsets = Tokenizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers-1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(binarize, (args, input_file, dict, prefix, lang,
offsets[worker_id],
offsets[worker_id + 1]), callback=merge_result)
pool.close()
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin'))
merge_result(Tokenizer.binarize(input_file, dict, lambda t: ds.add_item(t),
offset=0, end=offsets[1]))
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
lang, input_file, n_seq_tok[0], n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1], dict.unk_word))
def make_dataset(input_prefix, output_prefix, lang, num_workers=1):
if args.output_format == 'binary':
make_binary_dataset(input_prefix, output_prefix, lang, num_workers)
elif args.output_format == 'raw':
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
def make_all(lang):
if args.trainpref:
make_dataset(args.trainpref, 'train', lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(',')):
outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
make_dataset(validpref, outprefix, lang)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(',')):
outprefix = 'test{}'.format(k) if k > 0 else 'test'
make_dataset(testpref, outprefix, lang)
make_all(args.source_lang)
if target:
make_all(args.target_lang)
print('| Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
src_dict = dictionary.Dictionary.load(dict_path(args.source_lang))
tgt_dict = dictionary.Dictionary.load(dict_path(args.target_lang))
freq_map = {}
with open(args.alignfile, 'r') as align_file:
with open(src_file_name, 'r') as src_file:
with open(tgt_file_name, 'r') as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split('-')), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
args.source_lang, args.target_lang)), 'w') as f:
for k, v in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, dict, output_prefix, lang, offset, end):
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin'))
def consumer(tensor):
ds.add_item(tensor)
res = Tokenizer.binarize(filename, dict, consumer, offset=offset, end=end)
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = f'{args.destdir}/{output_prefix}'
lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else ''
return f'{base}{lang_part}'
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return f'{base}.{extension}'
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/scripts/build_sym_alignment.py | bert/scripts/build_sym_alignment.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, 'scripts', 'ems',
'support', 'symmetrize-fast-align.perl')
# create joined file
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r') as src, open(args.target_file, 'r') as tgt:
with open(joined_file, 'w') as joined:
for s, t in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
FWD=fwd_align_file)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
BWD=bwd_align_file)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin
)
assert os.system(sym_cmd) == 0
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/scripts/__init__.py | bert/scripts/__init__.py | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false | |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/scripts/average_checkpoints.py | bert/scripts/average_checkpoints.py | #!/usr/bin/env python3
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
if k not in params_dict:
params_dict[k] = []
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
params_dict[k].append(p)
averaged_params = collections.OrderedDict()
# v should be a list of torch Tensor.
for k, v in params_dict.items():
summed_v = None
for x in v:
summed_v = summed_v + x if summed_v is not None else x
averaged_params[k] = summed_v / len(v)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
parser.add_argument(
'--inputs',
required=True,
nargs='+',
help='Input checkpoint file paths.',
)
parser.add_argument(
'--output',
required=True,
metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this '
'path.',
)
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument(
'--num-epoch-checkpoints',
type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.',
)
num_group.add_argument(
'--num-update-checkpoints',
type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.',
)
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
if num is not None:
args.inputs = last_n_checkpoints(args.inputs, num, is_update_based)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/scripts/read_binarized.py | bert/scripts/read_binarized.py | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import argparse
from fairseq.data import dictionary
from fairseq.data import IndexedDataset
def get_parser():
parser = argparse.ArgumentParser(
description='writes text from binarized file to stdout')
parser.add_argument('--dict', metavar='FP', required=True, help='dictionary containing known words')
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
return parser
def main(args):
dict = dictionary.Dictionary.load(args.dict)
ds = IndexedDataset(args.input, fix_lua_indexing=True)
for tensor_line in ds:
print(dict.string(tensor_line))
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_sequence_scorer.py | bert/tests/test_sequence_scorer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import unittest
import torch
from fairseq.sequence_scorer import SequenceScorer
import tests.utils as test_utils
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
'source': torch.LongTensor([w1, w2, eos]),
'target': torch.LongTensor([w1, w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]),
# step 3:
torch.FloatTensor([
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer([model], task.target_dictionary)
for id, _src, _ref, hypos in scorer.score_batched_itr(data_itr):
self.assertHypoTokens(hypos[0], data[id]['target'])
self.assertHypoScore(hypos[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_convtbc.py | bert/tests/test_convtbc.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import unittest
from fairseq.modules import ConvTBC
import torch.nn as nn
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_utils.py | bert/tests/test_utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
])
right_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
])
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
])
left_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
])
right_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
])
right_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
])
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad, left_pad=True),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad, left_pad=False),
)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_reproducibility.py | bert/tests/test_reproducibility.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
from io import StringIO
import json
import os
import tempfile
import unittest
import torch
from fairseq import options
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(self, name, extra_flags=None):
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with contextlib.redirect_stdout(StringIO()):
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
stdout = StringIO()
with contextlib.redirect_stdout(stdout):
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', '3',
] + extra_flags,
)
stdout = stdout.getvalue()
train_log, valid_log = map(json.loads, stdout.split('\n')[-4:-2])
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, 'checkpoint1.pt'),
os.path.join(data_dir, 'checkpoint_last.pt'),
)
stdout = StringIO()
with contextlib.redirect_stdout(stdout):
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', '3',
] + extra_flags,
)
stdout = stdout.getvalue()
train_res_log, valid_res_log = map(json.loads, stdout.split('\n')[-4:-2])
def cast(s):
return round(float(s), 3)
for k in ['loss', 'ppl', 'num_updates', 'gnorm']:
self.assertEqual(cast(train_log[k]), cast(train_res_log[k]))
for k in ['valid_loss', 'valid_ppl', 'num_updates', 'best']:
self.assertEqual(cast(valid_log[k]), cast(valid_res_log[k]))
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', [
'--fp16',
'--fp16-init-scale', '4096',
])
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_sequence_generator.py | bert/tests/test_sequence_generator.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import unittest
import torch
from fairseq.sequence_generator import SequenceGenerator
import tests.utils as test_utils
class TestSequenceGenerator(unittest.TestCase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
self.encoder_input = {
'src_tokens': src_tokens, 'src_lengths': src_lengths,
}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator([self.model], self.tgt_dict, normalize_scores=False)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator([self.model], self.tgt_dict, len_penalty=lenpen)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator([self.model], self.tgt_dict, len_penalty=lenpen)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator([self.model], self.tgt_dict, maxlen=2)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_no_stop_early(self):
generator = SequenceGenerator([self.model], self.tgt_dict, stop_early=False)
hypos = generator.generate(self.encoder_input, beam_size=2)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w2, w2, w2, w2, eos])
self.assertHypoScore(hypos[1][0], [0.3, 0.9, 0.99, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestDiverseBeamSearch(unittest.TestCase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor([
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
generator = SequenceGenerator(
[self.model], self.tgt_dict,
beam_size=2, diverse_beam_groups=2, diverse_beam_strength=0.,
)
encoder_input = {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}
hypos = generator.generate(encoder_input)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/utils.py | bert/tests/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import torch
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqModel,
)
from fairseq.tasks import FairseqTask
def dummy_dictionary(vocab_size, prefix='token_'):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if 'id' not in sample:
sample['id'] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]),
# step 3:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(FairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths):
return src_tokens
def reorder_encoder_out(self, encoder_out, new_order):
return encoder_out.index_select(0, new_order)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, 'beam_probs') or hasattr(args, 'probs')
args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, 'step')
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, 'step', step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, 'probs'):
assert self.args.probs.dim() == 3, \
'expected probs to have size bsz*steps*vocab'
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
return probs, attn
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_dictionary.py | bert/tests/test_dictionary.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
from fairseq.tokenizer import Tokenizer
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
'A B C D',
'B C D',
'C D',
'D',
]
ref_ids1 = list(map(torch.IntTensor, [
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
]))
ref_ids2 = list(map(torch.IntTensor, [
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
]))
# build dictionary
d = Dictionary()
for line in txt:
Tokenizer.tokenize(line, d, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(Tokenizer.tokenize(line, dictionary, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode='w') as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_label_smoothing.py | bert/tests/test_label_smoothing.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/__init__.py | bert/tests/__init__.py | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false | |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_binaries.py | bert/tests/test_binaries.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
from io import StringIO
import os
import random
import sys
import tempfile
import unittest
import torch
from fairseq import options
import preprocess
import train
import generate
import interactive
import eval_lm
class TestTranslation(unittest.TestCase):
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--output-format', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--raw-text'])
generate_main(data_dir, ['--raw-text'])
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'],
)
self.assertTrue(
'skip this example with --skip-invalid-size-inputs-valid-test' \
in str(context.exception)
)
train_translation_model(
data_dir, 'fconv_iwslt_de_en',
['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, [
'--sampling',
'--sampling-temperature', '2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topk', '3',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, ['--prefix-size', '2'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', [
'--encoder-layers', '2',
'--encoder-bidirectional',
'--encoder-hidden-size', '256',
'--decoder-layers', '2',
])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en')
generate_main(data_dir)
class TestStories(unittest.TestCase):
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
'--encoder-layers', '[(512, 3)] * 2',
'--decoder-layers', '[(512, 3)] * 2',
'--decoder-attention', 'True',
'--encoder-attention', 'False',
'--gated-attention', 'True',
'--self-attention', 'True',
'--project-input', 'True',
]
train_translation_model(data_dir, 'fconv_self_att_wp', config)
generate_main(data_dir)
# fusion model
os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))
config.extend([
'--pretrained', 'True',
'--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'),
'--save-dir', os.path.join(data_dir, 'fusion_model'),
])
train_translation_model(data_dir, 'fconv_self_att_wp', config)
class TestLanguageModeling(unittest.TestCase):
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm')
eval_lm_main(data_dir)
def create_dummy_data(data_dir, num_examples=1000, maxlen=20):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(
[
'--source-lang', 'in',
'--target-lang', 'out',
'--trainpref', os.path.join(data_dir, 'train'),
'--validpref', os.path.join(data_dir, 'valid'),
'--testpref', os.path.join(data_dir, 'test'),
'--thresholdtgt', '0',
'--thresholdsrc', '0',
'--destdir', data_dir,
] + (extra_flags or []),
)
preprocess.main(preprocess_args)
def train_translation_model(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'translation',
data_dir,
'--save-dir', data_dir,
'--arch', arch,
'--optimizer', 'nag',
'--lr', '0.05',
'--max-tokens', '500',
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--source-lang', 'in',
'--target-lang', 'out',
] + (extra_flags or []),
)
train.main(train_args)
def generate_main(data_dir, extra_flags=None):
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--beam', '3',
'--batch-size', '64',
'--max-len-b', '5',
'--gen-subset', 'valid',
'--no-progress-bar',
'--print-alignment',
] + (extra_flags or []),
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
def preprocess_lm_data(data_dir):
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args([
'--only-source',
'--trainpref', os.path.join(data_dir, 'train.out'),
'--validpref', os.path.join(data_dir, 'valid.out'),
'--testpref', os.path.join(data_dir, 'test.out'),
'--destdir', data_dir,
])
preprocess.main(preprocess_args)
def train_language_model(data_dir, arch):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'language_modeling',
data_dir,
'--arch', arch,
'--optimizer', 'nag',
'--lr', '1.0',
'--criterion', 'adaptive_loss',
'--adaptive-softmax-cutoff', '5,10,15',
'--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
'--decoder-embed-dim', '280',
'--max-tokens', '500',
'--tokens-per-sample', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
],
)
train.main(train_args)
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--no-progress-bar',
],
)
eval_lm.main(eval_lm_args)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_backtranslation_dataset.py | bert/tests/test_backtranslation_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import unittest
import tests.utils as test_utils
import torch
from fairseq.data.backtranslation_dataset import BacktranslationDataset
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
backtranslation_args = argparse.Namespace()
"""
Same as defaults from fairseq/options.py
"""
backtranslation_args.backtranslation_unkpen = 0
backtranslation_args.backtranslation_sampling = False
backtranslation_args.backtranslation_max_len_a = 0
backtranslation_args.backtranslation_max_len_b = 200
backtranslation_args.backtranslation_beam = 2
self.backtranslation_args = backtranslation_args
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
def test_backtranslation_dataset(self):
backtranslation_dataset = BacktranslationDataset(
args=self.backtranslation_args,
tgt_dataset=self.tgt_dataset,
tgt_dict=self.tgt_dict,
backtranslation_model=self.model,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_character_token_embedder.py | bert/tests/test_character_token_embedder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import unittest
from fairseq.data import Dictionary
from fairseq.modules import CharacterTokenEmbedder
class TestCharacterTokenEmbedder(unittest.TestCase):
def test_character_token_embedder(self):
vocab = Dictionary()
vocab.add_symbol('hello')
vocab.add_symbol('there')
embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2)
test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']]
max_len = max(len(s) for s in test_sents)
input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad())
for i in range(len(test_sents)):
input[i][0] = vocab.eos()
for j in range(len(test_sents[i])):
input[i][j + 1] = vocab.index(test_sents[i][j])
input[i][j + 2] = vocab.eos()
embs = embedder(input)
assert embs.size() == (len(test_sents), max_len + 2, 5)
self.assertAlmostEqual(embs[0][0], embs[1][0])
self.assertAlmostEqual(embs[0][0], embs[0][-1])
self.assertAlmostEqual(embs[0][1], embs[2][1])
self.assertAlmostEqual(embs[0][3], embs[1][1])
embs.sum().backward()
assert embedder.char_embeddings.weight.grad is not None
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_average_checkpoints.py | bert/tests/test_average_checkpoints.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import collections
import os
import tempfile
import unittest
import numpy as np
import torch
from scripts.average_checkpoints import average_checkpoints
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict(
[
('a', torch.DoubleTensor([100.0])),
('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])),
('c', torch.IntTensor([7, 8, 9])),
]
)
params_1 = collections.OrderedDict(
[
('a', torch.DoubleTensor([1.0])),
('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
('c', torch.IntTensor([2, 2, 2])),
]
)
params_avg = collections.OrderedDict(
[
('a', torch.DoubleTensor([50.5])),
('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])),
# We expect truncation for integer division
('c', torch.IntTensor([4, 5, 5])),
]
)
fd_0, path_0 = tempfile.mkstemp()
fd_1, path_1 = tempfile.mkstemp()
torch.save(collections.OrderedDict([('model', params_0)]), path_0)
torch.save(collections.OrderedDict([('model', params_1)]), path_1)
output = average_checkpoints([path_0, path_1])['model']
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for (k_expected, v_expected), (k_out, v_out) in zip(
params_avg.items(), output.items()):
self.assertEqual(
k_expected, k_out, 'Key mismatch - expected {} but found {}. '
'(Expected list of keys: {} vs actual list of keys: {})'.format(
k_expected, k_out, params_avg.keys(), output.keys()
)
)
np.testing.assert_allclose(
v_expected.numpy(),
v_out.numpy(),
err_msg='Tensor value mismatch for key {}'.format(k_expected)
)
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_iterators.py | bert/tests/test_iterators.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator(self):
x = list(range(10))
itr = iterators.CountingIterator(x)
self.assertTrue(itr.has_next())
self.assertEqual(next(itr), 0)
self.assertEqual(next(itr), 1)
itr.skip(3)
self.assertEqual(next(itr), 5)
itr.skip(3)
self.assertEqual(next(itr), 9)
self.assertFalse(itr.has_next())
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/tests/test_train.py | bert/tests/test_train.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
from io import StringIO
import unittest
from unittest.mock import MagicMock, patch
import torch
from fairseq import data
import train
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
'train_iterator': {
'epoch': epoch,
'iterations_in_epoch': iterations_in_epoch,
'shuffle': False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size)))
tokens_ds = data.TokenBlockDataset(tokens, sizes=[len(tokens)], block_size=1, pad=0, eos=1, include_targets=False)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.args_mock = MagicMock()
self.args_mock.optimizer_overrides = '{}'
self.patches = {
'os.makedirs': MagicMock(),
'os.path.join': MagicMock(),
'os.path.isfile': MagicMock(return_value=True),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
train.load_checkpoint(self.args_mock, trainer, epoch_itr)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
train.load_checkpoint(self.args_mock, trainer, epoch_itr)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(0, 150, 0, 0)
self.patches['os.path.isfile'].return_value = False
train.load_checkpoint(self.args_mock, trainer, epoch_itr)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def tearDown(self):
patch.stopall()
if __name__ == '__main__':
unittest.main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/bert/concat_short_sentences.py | bert/macaron-scripts/bert/concat_short_sentences.py | import sys
def score(line1, line2): # the smaller the more likely to be concat
s = len(line1) + len(line2)
if s > 250:
return 9999999
if line1[-1] in ['.', '"', '!', '?']:
s += 5
return s
def main():
buf = []
for line in sys.stdin:
words = line.strip().split()
if len(words) == 0:
while True:
if min([len(sent) for sent in buf]) >= 5:
break
mi, best = 9999999, None
for i in range(len(buf) - 1):
s = score(buf[i], buf[i + 1])
if s < mi:
mi = s
best = i
if best is None:
break
buf[best] = buf[best] + buf[best + 1]
buf.pop(best + 1)
sys.stdout.write(''.join(' '.join(sent) + '\n' for sent in buf) + '\n')
buf = []
else:
buf.append(words)
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/bert/filter_and_cleanup_lines.py | bert/macaron-scripts/bert/filter_and_cleanup_lines.py | import re
import string
import sys
from collections import Counter
def is_valid(line):
l = len(line)
if l > 1000000 or l < 50:
return False
count = Counter(line)
alpha_cnt = sum(count[ch] for ch in string.ascii_letters)
if alpha_cnt < 50 or alpha_cnt / l < 0.7:
return False
if count['/'] / l > 0.05: # filter hyperlinks
return False
if count['\\'] / l > 0.05: # filter latex math equations
return False
if count['|'] / l > 0.05 or line[0] == '|': # filter remaining tables
return False
return True
def post_cleanup(line):
line = re.sub(r'\\', ' ', line) # remove all backslashes
return ' '.join(line.strip().split()) # remove redundant spaces
existed = set()
pending_tail = string.ascii_letters + string.digits + ','
def write_output(line):
global existed
if is_valid(line):
line = post_cleanup(line)
if line not in existed:
existed.add(line)
sys.stdout.write(line + '\n')
def check_concat(line1, line2):
global pending_tail
if len(line1) == 0 or len(line2) == 0:
return False
return (line1[-1] in pending_tail) and (line2[0] in string.ascii_lowercase)
def main():
buf = []
for line in sys.stdin:
line = ' '.join(line.strip().split())
if buf and (not check_concat(buf[-1], line)):
write_output(' '.join(buf) + '\n')
buf = []
buf.append(line)
if buf:
write_output(' '.join(buf) + '\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/bert/WikiExtractor.py | bert/macaron-scripts/bert/WikiExtractor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 2.75 (March 4, 2017)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
# orangain (orangain@gmail.com)
# Seth Cleveland (scleveland@turnitin.com)
# Bren Barn
#
# =============================================================================
# Copyright (c) 2011-2017. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License at <http://www.gnu.org/licenses/> for more details.
#
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" revid="" url="" title="">
...
</doc>
If the program is invoked with the --json flag, then each file will
contain several documents formatted as json ojects, one per line, with
the following structure
{"id": "", "revid": "", "url":"", "title": "", "text": "..."}
Template expansion requires preprocesssng first the whole dump and
collecting template definitions.
"""
from __future__ import unicode_literals, division
import argparse
import bz2
import codecs
import fileinput
import html
import json
import logging
import os.path
import re # TODO use regex when it will be standard
import sys
import time
from html.entities import name2codepoint
from io import StringIO
from itertools import zip_longest
from multiprocessing import Queue, Process, Value, cpu_count
from timeit import default_timer
from types import SimpleNamespace
from urllib.parse import quote
text_type = str
# ===========================================================================
# Program version
version = '2.75'
## PARAMS ####################################################################
options = SimpleNamespace(
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces={'Template': 10},
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace='',
templatePrefix='',
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace='',
##
# Recognize only these namespaces in links
# w: Internal links to the Wikipedia
# wiktionary: Wiki dictionary
# wikt: shortcut for Wiktionary
#
acceptedNamespaces=['w', 'wiktionary', 'wikt'],
# This is obtained from <siteinfo>
urlbase='',
##
# Filter disambiguation pages
filter_disambig_pages=False,
##
# Drop tables from the article
keep_tables=False,
##
# Whether to preserve links in output
keepLinks=False,
##
# Whether to preserve section titles
keepSections=True,
##
# Whether to preserve lists
keepLists=False,
##
# Whether to output HTML instead of text
toHTML=False,
##
# Whether to write json instead of the xml-like default output format
write_json=False,
##
# Whether to expand templates
expand_templates=True,
##
## Whether to escape doc content
escape_doc=False,
##
# Print the wikipedia article revision
print_revision=False,
##
# Minimum expanded text length required to print document
min_text_length=0,
# Shared objects holding templates, redirects and cache
templates={},
redirects={},
# cache of parser templates
# FIXME: sharing this with a Manager slows down.
templateCache={},
# Elements to ignore/discard
ignored_tag_patterns=[],
discardElements=[
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small',
'sub', 'sup', 'indicator'
],
)
##
# Keys for Template and Module namespaces
templateKeys = {'10', '828'}
##
# Regex for identifying disambig pages
filter_disambig_page_pattern = re.compile("{{disambig(uation)?(\|[^}]*)?}}")
##
# page filtering logic -- remove templates, undesired xml namespaces, and disambiguation pages
def keepPage(ns, page):
if ns != '0': # Aritcle
return False
# remove disambig pages if desired
if options.filter_disambig_pages:
for line in page:
if filter_disambig_page_pattern.match(line):
return False
return True
def get_url(uid):
return "%s?curid=%s" % (options.urlbase, uid)
# =========================================================================
#
# MediaWiki Markup Grammar
# https://www.mediawiki.org/wiki/Preprocessor_ABNF
# xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
# sptab = SP / HTAB
# ; everything except ">" (%x3E)
# attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
# literal = *xml-char
# title = wikitext-L3
# part-name = wikitext-L3
# part-value = wikitext-L3
# part = ( part-name "=" part-value ) / ( part-value )
# parts = [ title *( "|" part ) ]
# tplarg = "{{{" parts "}}}"
# template = "{{" parts "}}"
# link = "[[" wikitext-L3 "]]"
# comment = "<!--" literal "-->"
# unclosed-comment = "<!--" literal END
# ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
# line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
# attr = *attr-char
# nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
# wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment / xmlish-element /
# *wikitext-L3
# ------------------------------------------------------------------------------
selfClosingTags = ('hr', 'nobr', 'ref', 'references', 'nowiki')
placeholder_tags = {'math': 'formula', 'code': 'codice'}
def normalizeTitle(title):
"""Normalize title"""
# remove leading/trailing whitespace and underscores
title = title.strip(' _')
# replace sequences of whitespace and underscore chars with a single space
title = re.sub(r'[\s_]+', ' ', title)
m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
if m:
prefix = m.group(1)
if m.group(2):
optionalWhitespace = ' '
else:
optionalWhitespace = ''
rest = m.group(3)
ns = normalizeNamespace(prefix)
if ns in options.knownNamespaces:
# If the prefix designates a known namespace, then it might be
# followed by optional whitespace that should be removed to get
# the canonical page name
# (e.g., "Category: Births" should become "Category:Births").
title = ns + ":" + ucfirst(rest)
else:
# No namespace, just capitalize first letter.
# If the part before the colon is not a known namespace, then we
# must not remove the space after the colon (if any), e.g.,
# "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
# However, to get the canonical page name we must contract multiple
# spaces into one, because
# "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
else:
# no namespace, just capitalize first letter
title = ucfirst(title)
return title
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
:param text The HTML (or XML) source text.
:return The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
code = m.group(1)
try:
if text[1] == "#": # character reference
if text[2] == "x":
return chr(int(code[1:], 16))
else:
return chr(int(code))
else: # named entity
return chr(name2codepoint[code])
except:
return text # leave as is
return re.sub("&#?(\w+);", fixup, text)
# Match HTML comments
# The buggy template {{Template:T}} has a comment terminating with just "->"
comment = re.compile(r'<!--.*?-->', re.DOTALL)
# Match <nowiki>...</nowiki>
nowiki = re.compile(r'<nowiki>.*?</nowiki>')
def ignoreTag(tag):
left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
options.ignored_tag_patterns.append((left, right))
# Match selfClosing HTML tags
selfClosing_tag_patterns = [
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
]
# Match HTML placeholder tags
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
]
# Match preformatted lines
preformatted = re.compile(r'^ .*?$')
# Match external links (space separates second optional parameter)
externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
# Matches bold/italic
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
italic = re.compile(r"''(.*?)''")
quote_quote = re.compile(r'""([^"]*?)""')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots
dots = re.compile(r'\.{4,}')
# ======================================================================
class Template(list):
"""
A Template is a list of TemplateText or TemplateArgs
"""
@classmethod
def parse(cls, body):
tpl = Template()
# we must handle nesting, s.a.
# {{{1|{{PAGENAME}}}
# {{{italics|{{{italic|}}}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
#
start = 0
for s, e in findMatchingBraces(body, 3):
tpl.append(TemplateText(body[start:s]))
tpl.append(TemplateArg(body[s + 3:e - 3]))
start = e
tpl.append(TemplateText(body[start:])) # leftover
return tpl
def subst(self, params, extractor, depth=0):
# We perform parameter substitutions recursively.
# We also limit the maximum number of iterations to avoid too long or
# even endless loops (in case of malformed input).
# :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
#
# Parameter values are assigned to parameters in two (?) passes.
# Therefore a parameter name in a template can depend on the value of
# another parameter of the same template, regardless of the order in
# which they are specified in the template call, for example, using
# Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
# {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
# "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
# logging.debug('&*ssubst tpl %d %s', extractor.frame.length, '', depth, self)
if depth > extractor.maxParameterRecursionLevels:
extractor.recursion_exceeded_3_errs += 1
return ''
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
def __str__(self):
return ''.join([text_type(x) for x in self])
class TemplateText(text_type):
"""Fixed text of template"""
def subst(self, params, extractor, depth):
return self
class TemplateArg(object):
"""
parameter to a template.
Has a name and a default value, both of which are Templates.
"""
def __init__(self, parameter):
"""
:param parameter: the parts of a tplarg.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
# 4|{{{{{subst|}}}CURRENTYEAR}}
# any parts in a tplarg after the first (the parameter default) are
# ignored, and an equals sign in the first part is treated as plain text.
# logging.debug('TemplateArg %s', parameter)
parts = splitParts(parameter)
self.name = Template.parse(parts[0])
if len(parts) > 1:
# This parameter has a default value
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return '{{{%s|%s}}}' % (self.name, self.default)
else:
return '{{{%s}}}' % self.name
def subst(self, params, extractor, depth):
"""
Substitute value for this argument from dict :param params:
Use :param extractor: to evaluate expressions for name and default.
Limit substitution to the maximun :param depth:.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
paramName = self.name.subst(params, extractor, depth + 1)
paramName = extractor.transform(paramName)
res = ''
if paramName in params:
res = params[paramName] # use parameter value specified in template invocation
elif self.default: # use the default value
defaultValue = self.default.subst(params, extractor, depth + 1)
res = extractor.transform(defaultValue)
# logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
return res
class Frame(object):
def __init__(self, title='', args=[], prev=None):
self.title = title
self.args = args
self.prev = prev
self.depth = prev.depth + 1 if prev else 0
def push(self, title, args):
return Frame(title, args, self)
def pop(self):
return self.prev
def __str__(self):
res = ''
prev = self.prev
while prev:
if res: res += ', '
res += '(%s, %s)' % (prev.title, prev.args)
prev = prev.prev
return '<Frame [' + res + ']>'
# ======================================================================
substWords = 'subst:|safesubst:'
class Extractor(object):
"""
An extraction task on a article.
"""
def __init__(self, id, revid, title, lines):
"""
:param id: id of page.
:param title: tutle of page.
:param lines: a list of lines.
"""
self.id = id
self.revid = revid
self.title = title
self.text = ''.join(lines)
self.magicWords = MagicWords()
self.frame = Frame()
self.recursion_exceeded_1_errs = 0 # template recursion within expand()
self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
self.recursion_exceeded_3_errs = 0 # parameter recursion
self.template_title_errs = 0
def write_output(self, out, text):
"""
:param out: a memory file
:param text: the text of the page
"""
url = get_url(self.id)
if options.write_json:
json_data = {
'id': self.id,
'url': url,
'title': self.title,
'text': "\n".join(text)
}
if options.print_revision:
json_data['revid'] = self.revid
# We don't use json.dump(data, out) because we want to be
# able to encode the string if the output is sys.stdout
out_str = json.dumps(json_data, ensure_ascii=False)
if out == sys.stdout: # option -a or -o -
out_str = out_str.encode('utf-8')
out.write(out_str)
out.write('\n')
else:
for line in text:
if out == sys.stdout: # option -a or -o -
line = line.encode('utf-8')
out.write(line)
out.write('\n')
def extract(self, out):
"""
:param out: a memory file.
"""
logging.info('%s\t%s', self.id, self.title)
# https://www.mediawiki.org/wiki/Help:Magic_words
colon = self.title.find(':')
if colon != -1:
ns = self.title[:colon]
pagename = self.title[colon + 1:]
else:
ns = '' # Main
pagename = self.title
self.magicWords['NAMESPACE'] = ns
self.magicWords['NAMESPACENUMBER'] = options.knownNamespaces.get(ns, '0')
self.magicWords['PAGENAME'] = pagename
self.magicWords['FULLPAGENAME'] = self.title
slash = pagename.rfind('/')
if slash != -1:
self.magicWords['BASEPAGENAME'] = pagename[:slash]
self.magicWords['SUBPAGENAME'] = pagename[slash + 1:]
else:
self.magicWords['BASEPAGENAME'] = pagename
self.magicWords['SUBPAGENAME'] = ''
slash = pagename.find('/')
if slash != -1:
self.magicWords['ROOTPAGENAME'] = pagename[:slash]
else:
self.magicWords['ROOTPAGENAME'] = pagename
self.magicWords['CURRENTYEAR'] = time.strftime('%Y')
self.magicWords['CURRENTMONTH'] = time.strftime('%m')
self.magicWords['CURRENTDAY'] = time.strftime('%d')
self.magicWords['CURRENTHOUR'] = time.strftime('%H')
self.magicWords['CURRENTTIME'] = time.strftime('%H:%M:%S')
text = self.text
self.text = '' # save memory
#
# @see https://doc.wikimedia.org/mediawiki-core/master/php/classParser.html
# This does the equivalent of internalParse():
#
# $dom = $this->preprocessToDom( $text, $flag );
# $text = $frame->expand( $dom );
#
# ############### Process HTML ###############
# turn into HTML, except for the content of <syntaxhighlight>
res = ''
cur = 0
for m in syntaxhighlight.finditer(text):
res += unescape(text[cur:m.start()]) + m.group(1)
cur = m.end()
text = res + unescape(text[cur:])
text = self.transform(text)
text = self.wiki2text(text)
text = compact(self.clean(text))
if sum(len(line) for line in text) < options.min_text_length:
return
self.write_output(out, text)
errs = (self.template_title_errs,
self.recursion_exceeded_1_errs,
self.recursion_exceeded_2_errs,
self.recursion_exceeded_3_errs)
if any(errs):
logging.warning("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
self.title, self.id, *errs)
def transform(self, wikitext):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
# look for matching <nowiki>...</nowiki>
res = ''
cur = 0
for m in nowiki.finditer(wikitext, cur):
res += self.transform1(wikitext[cur:m.start()]) + wikitext[m.start() + 8:m.end() - 9]
cur = m.end()
# leftover
res += self.transform1(wikitext[cur:])
return res
def transform1(self, text):
"""Transform text not containing <nowiki>"""
if options.expand_templates:
# expand templates
# See: http://www.mediawiki.org/wiki/Help:Templates
return self.expand(text)
else:
# Drop transclusions (template, parser functions)
return dropNested(text, r'{{', r'}}')
def wiki2text(self, text):
#
# final part of internalParse().)
#
# $text = $this->doTableStuff( $text );
# $text = preg_replace( '/(^|\n)-----*/', '\\1<hr />', $text );
# $text = $this->doDoubleUnderscore( $text );
# $text = $this->doHeadings( $text );
# $text = $this->replaceInternalLinks( $text );
# $text = $this->doAllQuotes( $text );
# $text = $this->replaceExternalLinks( $text );
# $text = str_replace( self::MARKER_PREFIX . 'NOPARSE', '', $text );
# $text = $this->doMagicLinks( $text );
# $text = $this->formatHeadings( $text, $origText, $isMain );
# Drop tables
# first drop residual templates, or else empty parameter |} might look like end of table.
if not options.keep_tables:
text = dropNested(text, r'{{', r'}}')
text = dropNested(text, r'{\|', r'\|}')
# Handle bold/italic/quote
if options.toHTML:
text = bold_italic.sub(r'<b>\1</b>', text)
text = bold.sub(r'<b>\1</b>', text)
text = italic.sub(r'<i>\1</i>', text)
else:
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
# drop MagicWords behavioral switches
text = magicWordsRE.sub('', text)
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in options.ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Replace br with \n
text = re.compile(r'(<\s*br\b[^>]*/\s*>)|(<br\b\s*>)', re.DOTALL | re.IGNORECASE).sub('\n', text)
# Drop discarded elements
for tag in options.discardElements:
text = dropNested(text, r'<\s*%s\b[^>]*>' % tag, r'<\s*/\s*%s>' % tag)
if not options.toHTML:
# Turn into text what is left (&nbsp;) and <syntaxhighlight>
text = unescape(text)
# replace internal links
text = replaceInternalLinks(text)
# assert '[[' not in text, "003 " + text
# replace external links
text = replaceExternalLinks(text)
return text
def clean(self, text):
"""
Removes irrelevant parts from :param: text.
"""
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', '«').replace('>>', '»')
#############################################
# Cleanup text
text = '\n' + text + '\n'
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(' (,:\.\)\]»)', r'\1', text)
text = re.sub('(\[\(«) ', r'\1', text)
text = text.replace(',,', ',').replace(',.', '.')
text = re.sub(r'(^[ \t]+)|([ \t]+$)', '', text, flags=re.MULTILINE)
# text = re.sub(r'\\', ' ', text)
text = re.sub(r' \*([^\s])', r' \1', text)
text = re.sub(r'(\w)\n([a-z])', r'\1 \2', text)
text = re.sub(r'^\|.*$', '', text, flags=re.MULTILINE)
text = text.strip()
if options.keep_tables:
# the following regular expressions are used to remove the wikiml chartacters around table strucutures
# yet keep the content. The order here is imporant so we remove certain markup like {| and then
# then the future html attributes such as 'style'. Finally we drop the remaining '|-' that delimits cells.
text = re.sub(r'!(?:\s)?style=\"[a-z]+:(?:\d+)%;\"', r'', text)
text = re.sub(r'!(?:\s)?style="[a-z]+:(?:\d+)%;[a-z]+:(?:#)?(?:[0-9a-z]+)?"', r'', text)
text = text.replace('|-', '')
text = text.replace('|', '')
if options.toHTML:
text = html.escape(text)
return text
# ----------------------------------------------------------------------
# Expand templates
maxTemplateRecursionLevels = 30
maxParameterRecursionLevels = 10
# check for template beginning
reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
def expand(self, wikitext):
"""
:param wikitext: the text to be expanded.
Templates are frequently nested. Occasionally, parsing mistakes may
cause template insertion to enter an infinite loop, for instance when
trying to instantiate Template:Country
{{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
which is repeatedly trying to insert template 'country_', which is
again resolved to Template:Country. The straightforward solution of
keeping track of templates that were already inserted for the current
article would not work, because the same template may legally be used
more than once, with different parameters in different parts of the
article. Therefore, we limit the number of iterations of nested
template inclusion.
"""
# Test template expansion at:
# https://en.wikipedia.org/wiki/Special:ExpandTemplates
# https://it.wikipedia.org/wiki/Speciale:EspandiTemplate
res = ''
if self.frame.depth >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_1_errs += 1
return res
# logging.debug('%*s<expand', self.frame.depth, '')
cur = 0
# look for matching {{...}}
for s, e in findMatchingBraces(wikitext, 2):
res += wikitext[cur:s] + self.expandTemplate(wikitext[s + 2:e - 2])
cur = e
# leftover
res += wikitext[cur:]
# logging.debug('%*sexpand> %s', self.frame.depth, '', res)
return res
def templateParams(self, parameters):
"""
Build a dictionary with positional or name key to expanded parameters.
:param parameters: the parts[1:] of a template, i.e. all except the title.
"""
templateParams = {}
if not parameters:
return templateParams
# logging.debug('%*s<templateParams: %s', self.frame.length, '', '|'.join(parameters))
# Parameters can be either named or unnamed. In the latter case, their
# name is defined by their ordinal position (1, 2, 3, ...).
unnamedParameterCounter = 0
# It's legal for unnamed parameters to be skipped, in which case they
# will get default values (if available) during actual instantiation.
# That is {{template_name|a||c}} means parameter 1 gets
# the value 'a', parameter 2 value is not defined, and parameter 3 gets
# the value 'c'. This case is correctly handled by function 'split',
# and does not require any special handling.
for param in parameters:
# Spaces before or after a parameter value are normally ignored,
# UNLESS the parameter contains a link (to prevent possible gluing
# the link to the following text after template substitution)
# Parameter values may contain "=" symbols, hence the parameter
# name extends up to the first such symbol.
# It is legal for a parameter to be specified several times, in
# which case the last assignment takes precedence. Example:
# "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
# Therefore, we don't check if the parameter has been assigned a
# value before, because anyway the last assignment should override
# any previous ones.
# FIXME: Don't use DOTALL here since parameters may be tags with
# attributes, e.g. <div class="templatequotecite">
# Parameters may span several lines, like:
# {{Reflist|colwidth=30em|refs=
# <ref name="Goode">Title</ref>
# The '=' might occurr within an HTML attribute:
# "<ref name=value"
# but we stop at first.
m = re.match(' *([^=]*?) *?=(.*)', param, re.DOTALL)
if m:
# This is a named parameter. This case also handles parameter
# assignments like "2=xxx", where the number of an unnamed
# parameter ("2") is specified explicitly - this is handled
# transparently.
parameterName = m.group(1).strip()
parameterValue = m.group(2)
if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
parameterValue = parameterValue.strip()
templateParams[parameterName] = parameterValue
else:
# this is an unnamed parameter
unnamedParameterCounter += 1
if ']]' not in param: # if the value does not contain a link, trim whitespace
param = param.strip()
templateParams[str(unnamedParameterCounter)] = param
# logging.debug('%*stemplateParams> %s', self.frame.length, '', '|'.join(templateParams.values()))
return templateParams
def expandTemplate(self, body):
"""Expands template invocation.
:param body: the parts of a template.
:see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
of the process.
See in particular: Expansion of names and values
http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
For most parser functions all names and values are expanded,
regardless of what is relevant for the result. The branching functions
(#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
All names in a template call are expanded, and the titles of the
tplargs in the template body, after which it is determined which
values must be expanded, and for which tplargs in the template body
the first part (default) [sic in the original doc page].
In the case of a tplarg, any parts beyond the first are never
expanded. The possible name and the value of the first part is
expanded if the title does not match a name in the template call.
:see code for braceSubstitution at
https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
"""
# template = "{{" parts "}}"
# Templates and tplargs are decomposed in the same way, with pipes as
# separator, even though eventually any parts in a tplarg after the first
# (the parameter default) are ignored, and an equals sign in the first
# part is treated as plain text.
# Pipes inside inner templates and tplargs, or inside double rectangular
# brackets within the template or tplargs are not taken into account in
# this decomposition.
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | true |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/bert/split.py | bert/macaron-scripts/bert/split.py | import sys
def main():
cnt = 0
f_cnt = 0
input_file = sys.argv[1]
output_prefix = sys.argv[2]
chunk_size = int(sys.argv[3])
f_ov = open(f'{output_prefix}.valid.txt', 'w', encoding='utf-8')
f_ot = None
with open(input_file, 'r', encoding='utf-8') as f_in:
for line in f_in:
if cnt % 200 == 199:
f_ov.write(line)
else:
if cnt // chunk_size >= f_cnt:
f_ot = open(f'{output_prefix}.train.txt.{f_cnt}', 'w', encoding='utf-8')
f_cnt += 1
f_ot.write(line)
cnt += 1
f_ov.close()
f_ot.close()
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/bert/segment_sentence.py | bert/macaron-scripts/bert/segment_sentence.py | import re
import sys
from multiprocessing import Pool
import spacy
spacy.require_gpu()
nlp = None
def init():
global nlp
nlp = spacy.load('en', disable=['tagger', 'ner', 'textcat'])
def segment(line):
global nlp
return ''.join([str(sent) + '\n'
for sent in nlp(line).sents
if not re.match(r'^\W+$', str(sent))])
def main():
with Pool(4, initializer=init) as pool:
for text in pool.imap(segment, sys.stdin, chunksize=128):
sys.stdout.write(text)
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/test/generate_test_scripts.py | bert/macaron-scripts/test/generate_test_scripts.py | import os
import sys
import copy
import itertools
import inspect
def task(name, n_sentences, task, criterion, symmetric, n_classes, data_path):
return locals()
def params(*args):
keys = ["seed_list", "n_epoch_list", "batch_sz_list", "lr_list", "weight_decay_list"]
assert len(args) == len(keys)
values = itertools.product(*args)
return [{k: v for k, v in zip(keys, vs)} for vs in values]
cola = (
task("cola", 8551, "glue_single", "cross_entropy_classify_binary", "", 1, "CoLA"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 60s / epoch, 3h / search
mrpc = (
task("mrpc", 3668, "glue_pair", "cross_entropy_classify_binary", "--symmetric", 1, "MRPC"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 50s / epoch, 3h / search
sts = (
task("sts", 5749, "glue_pair", "mean_squared_error", "--symmetric", 1, "STS-B"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 50s / epoch, 4h / search
rte = (
task("rte", 2475, "glue_pair", "cross_entropy_classify", "", 2, "RTE"),
params(["100 200 300 400 500 600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 60s / epoch, 3h / search
mnli = (
task("mnli", 392702, "glue_pair", "cross_entropy_classify", "", 3, "MNLI"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 5000s / epoch, bs 32 oom
mnlimm = (
task("mnlimm", 392702, "glue_pair", "cross_entropy_classify", "", 3, "MNLI-mm"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 5000s / epoch, bs 32 oom
qnli = (
task("qnli", 108436, "glue_pair", "cross_entropy_classify", "", 2, "QNLI-new"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 1600s / epoch, bs 32 oom
qqp = (
task("qqp", 363849, "glue_pair", "cross_entropy_classify_binary", "--symmetric", 1, "QQP"),
params(["100", "200", "300"], ["3 4 5"], ["16 24"], ["0.00005", "0.00003"], ["0.00", "0.01"])
) # 4000s / epoch, bs 32 oom
sst = (
task("sst", 67349, "glue_single", "cross_entropy_classify", "", 2, "SST-2"),
params(["100", "200", "300", "400", "500", "600"], ["3 4 5"], ["16 32"], ["0.00005 0.00003"], ["0.00 0.01"])
) # 400s / epoch, 18h / search
task_list = [cola, mrpc, sts, rte, mnli, mnlimm, qnli, qqp, sst]
bert_model_config = {
"bert_model_name": "macaron_pretrained",
"bert_model_path": "log/bert/transformer_bert_base_macaron/checkpoint_pretrained.pt",
"bert_model_arch": "transformer_classifier_base_macaron",
}
script_dir = os.path.join("generated/", bert_model_config["bert_model_name"])
env_vars = """
PROBLEM={name}
BERT_MODEL_NAME={bert_model_name}
TASK={task}
BERT_MODEL_PATH={bert_model_path}
N_CLASSES={n_classes}
ARCH={bert_model_arch}
N_SENT={n_sentences}
CRITERION={criterion}
SYMMETRIC={symmetric}
DATA_PATH=data/glue/{data_path}
SEED_LIST="{seed_list}"
N_EPOCH_LIST="{n_epoch_list}"
BATCH_SZ_LIST="{batch_sz_list}"
LR_LIST="{lr_list}"
WEIGHT_DECAY_LIST="{weight_decay_list}"
"""
script_template = r"""
CODE_PATH=.
cd $CODE_PATH
export PYTHONPATH=$CODE_PATH:$PYTHONPATH
for SEED in $SEED_LIST
do
for N_EPOCH in $N_EPOCH_LIST
do
for BATCH_SZ in $BATCH_SZ_LIST
do
SENT_PER_GPU=$(( BATCH_SZ / 1 ))
N_UPDATES=$(( ((N_SENT + BATCH_SZ - 1) / BATCH_SZ) * N_EPOCH ))
WARMUP_UPDATES=$(( (N_UPDATES + 5) / 10 ))
echo $SENT_PER_GPU $N_UPDATES $WARMUP_UPDATES
for LR in $LR_LIST
do
for WEIGHT_DECAY in $WEIGHT_DECAY_LIST
do
OUTPUT_PATH=log/bert_downstream/$BERT_MODEL_NAME/$PROBLEM/${N_EPOCH}-${BATCH_SZ}-${LR}-${WEIGHT_DECAY}-$SEED
mkdir -p $OUTPUT_PATH
python train.py $DATA_PATH --task $TASK --load-bert $BERT_MODEL_PATH --load-type no_out \
--arch $ARCH --n-classes $N_CLASSES \
--optimizer adam --adam-betas '(0.9, 0.999)' --adam-eps 1e-6 --clip-norm 0.0 --weight-decay $WEIGHT_DECAY \
--lr $LR --lr-scheduler linear --warmup-init-lr 1e-07 --warmup-updates $WARMUP_UPDATES --min-lr 1e-09 \
--criterion $CRITERION $SYMMETRIC \
--max-sentences $SENT_PER_GPU --max-update $N_UPDATES --seed $SEED \
--save-dir $OUTPUT_PATH --no-progress-bar --log-interval 100 --no-epoch-checkpoints \
| tee -a $OUTPUT_PATH/train_log.txt
done
done
done
done
done
"""
os.makedirs(script_dir, exist_ok=True)
os.system('cp {} {}'.format(__file__, script_dir))
for task_dict, params_list in task_list:
for i, param_dict in enumerate(params_list):
result_dict = {}
result_dict.update(task_dict)
result_dict.update(bert_model_config)
result_dict.update(param_dict)
this_env_var = env_vars.format(**result_dict)
script = this_env_var + script_template
script_name = os.path.join(script_dir, ".".join([task_dict["name"], "%02d" % i, "sh"]))
print(script_name)
with open(script_name, "w") as f:
f.write(script)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/common/precleanup_english.py | bert/macaron-scripts/common/precleanup_english.py | import re
import sys
def pre_cleanup(line):
line = line.replace('\t', ' ') # replace tab with spaces
line = ' '.join(line.strip().split()) # remove redundant spaces
line = re.sub(r'\.{4,}', '...', line) # remove extra dots
line = line.replace('<<', '«').replace('>>', '»') # group << together
line = re.sub(' (,:\.\)\]»)', r'\1', line) # remove space before >>
line = re.sub('(\[\(«) ', r'\1', line) # remove space after <<
line = line.replace(',,', ',').replace(',.', '.') # remove redundant punctuations
line = re.sub(r' \*([^\s])', r' \1', line) # remove redundant asterisks
return ' '.join(line.strip().split()) # remove redundant spaces
def main():
for line in sys.stdin:
line = pre_cleanup(line)
sys.stdout.write(line + '\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/common/remove_non_utf8_chars.py | bert/macaron-scripts/common/remove_non_utf8_chars.py | import io
import sys
for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors='ignore'):
sys.stdout.write(line)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/common/truncate_by_token.py | bert/macaron-scripts/common/truncate_by_token.py | import sys
max_len = int(sys.argv[1])
for line in sys.stdin:
lst = line.strip().split(' ')
if len(lst) <= max_len:
sys.stdout.write(line)
else:
sys.stdout.write(" ".join(lst[:max_len]) + '\n')
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/common/length_filter_by_token.py | bert/macaron-scripts/common/length_filter_by_token.py | import sys
for line in sys.stdin:
l = len(line.strip().split(' '))
if int(sys.argv[1]) <= l <= int(sys.argv[2]):
sys.stdout.write(line)
else:
sys.stdout.write('\n')
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/common/length_filter_by_char.py | bert/macaron-scripts/common/length_filter_by_char.py | import sys
for line in sys.stdin:
l = len(line)
if int(sys.argv[1]) <= l <= int(sys.argv[2]):
sys.stdout.write(line)
else:
sys.stdout.write('\n')
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_wnli.py | bert/macaron-scripts/glue/generate_wnli.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(int(line[3]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(int(line[3]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_mnli.py | bert/macaron-scripts/glue/generate_mnli.py | import argparse
import csv
import os
import torch
_label_to_id = {
'neutral': 0,
'entailment': 1,
'contradiction': 2
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2" and line[10] == 'label1' and line[11] == 'gold_label':
continue
assert line[10] == line[11]
fo.write(f'{line[8]}\n{line[9]}\n')
labels.append(_label_to_id[line[10]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev_matched.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2" and line[15] == 'gold_label':
continue
fo.write(f'{line[8]}\n{line[9]}\n')
labels.append(_label_to_id[line[10]])
# with open(os.path.join(args.data, "dev_mismatched.tsv"), "r", encoding="utf-8") as fi:
# reader = csv.reader(fi, delimiter="\t", quotechar=None)
# for line in reader:
# if line[8] == "sentence1" and line[9] == "sentence2" and line[15] == 'gold_label':
# continue
# fo.write(f'{line[8]}\n{line[9]}\n')
# labels.append(_label_to_id[line[10]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test_matched.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2":
continue
fo.write(f'{line[8]}\n{line[9]}\n')
# with open(os.path.join(args.data, "test_mismatched.tsv"), "r", encoding="utf-8") as fi:
# reader = csv.reader(fi, delimiter="\t", quotechar=None)
# for line in reader:
# if line[8] == "sentence1" and line[9] == "sentence2":
# continue
# fo.write(f'{line[8]}\n{line[9]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_diagnostic.py | bert/macaron-scripts/glue/generate_diagnostic.py | import argparse
import csv
import os
_label_to_id = {
'neutral': 0,
'entailment': 1,
'contradiction': 2
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "diagnostic.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[1] == "sentence1" and line[2] == "sentence2":
continue
fo.write(f'{line[1]}\n{line[2]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_sts.py | bert/macaron-scripts/glue/generate_sts.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[7] == "sentence1" and line[8] == "sentence2" and line[9] == 'score':
continue
fo.write(f'{line[7]}\n{line[8]}\n')
labels.append(0.5 *(float(line[9]) - 3))
torch.save(torch.FloatTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[7] == "sentence1" and line[8] == "sentence2" and line[9] == 'score':
continue
fo.write(f'{line[7]}\n{line[8]}\n')
labels.append(0.5 * (float(line[9]) - 3))
torch.save(torch.FloatTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[7] == "sentence1" and line[8] == "sentence2":
continue
fo.write(f'{line[7]}\n{line[8]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/process_predictions.py | bert/macaron-scripts/glue/process_predictions.py | import argparse
import os
rte_labels = ['not_entailment', 'entailment']
mnli_labels = ['neutral', 'entailment', 'contradiction']
qnli_labels = ['not_entailment', 'entailment']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str,
help='input path of predictions')
parser.add_argument('--output', type=str,
help='output path of submissions')
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f'{args.output} is not a directory')
with open(os.path.join(args.output, 'CoLA.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_CoLA.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{line.strip()}\n')
cnt += 1
with open(os.path.join(args.output, 'MRPC.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_MRPC.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{line.strip()}\n')
cnt += 1
with open(os.path.join(args.output, 'STS-B.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_STS-B.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{float(line.strip()) * 2.0 + 3.0}\n')
cnt += 1
with open(os.path.join(args.output, 'RTE.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_RTE.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{rte_labels[int(line.strip())]}\n')
cnt += 1
with open(os.path.join(args.output, 'MNLI-m.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_MNLI.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{mnli_labels[int(line.strip())]}\n')
cnt += 1
with open(os.path.join(args.output, 'MNLI-mm.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_MNLI-mm.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{mnli_labels[int(line.strip())]}\n')
cnt += 1
with open(os.path.join(args.output, 'QNLI.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_QNLI.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{qnli_labels[int(line.strip())]}\n')
cnt += 1
with open(os.path.join(args.output, 'QQP.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_QQP.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{line.strip()}\n')
cnt += 1
with open(os.path.join(args.output, 'SST-2.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_SST-2.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{line.strip()}\n')
cnt += 1
with open(os.path.join(args.output, 'AX.tsv'), 'w', encoding='utf-8') as fo, open(os.path.join(args.input, 'prediction_diagnostic.txt'), 'r', encoding='utf-8') as fi:
fo.write('index\tprediction\n')
cnt = 0
for line in fi:
fo.write(f'{cnt}\t{mnli_labels[int(line.strip())]}\n')
cnt += 1
with open(os.path.join(args.output, 'WNLI.tsv'), 'w', encoding='utf-8') as fo:
fo.write('index\tprediction\n')
for i in range(146):
fo.write(f'{i}\t0\n')
if __name__ == '__main__':
main() | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/align_text.py | bert/macaron-scripts/glue/align_text.py | import sys
import re
for line in sys.stdin:
re.sub(r" n't\b", "n't", line)
re.sub(r" 's\b", "'s", line)
sys.stdout.write(line)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_cola.py | bert/macaron-scripts/glue/generate_cola.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
fo.write(line[3] + "\n")
labels.append(int(line[1]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
fo.write(line[3] + "\n")
labels.append(int(line[1]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence":
continue
fo.write(line[1] + "\n")
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/single_sentence.py | bert/macaron-scripts/glue/single_sentence.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "sentence" and line[1] == "label":
continue
fo.write(line[0] + "\n")
labels.append(int(line[1]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "sentence" and line[1] == "label":
continue
fo.write(line[0] + "\n")
labels.append(int(line[1]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence":
continue
fo.write(line[1] + "\n")
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_mrpc.py | bert/macaron-scripts/glue/generate_mrpc.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8-sig") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[3] == "#1 String" and line[4] == "#2 String" and line[0] == 'Quality':
continue
fo.write(f'{line[3]}\n{line[4]}\n')
labels.append(int(line[0]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8-sig") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[3] == "#1 String" and line[4] == "#2 String" and line[0] == 'Quality':
continue
fo.write(f'{line[3]}\n{line[4]}\n')
labels.append(int(line[0]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8-sig") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[3] == "#1 String" and line[4] == "#2 String":
continue
fo.write(f'{line[3]}\n{line[4]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_rte.py | bert/macaron-scripts/glue/generate_rte.py | import argparse
import csv
import os
import torch
_label_to_id = {
'not_entailment': 0,
'entailment': 1
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(_label_to_id[line[3]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(_label_to_id[line[3]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "sentence1" and line[2] == 'sentence2':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_mnli_mm.py | bert/macaron-scripts/glue/generate_mnli_mm.py | import argparse
import csv
import os
import torch
_label_to_id = {
'neutral': 0,
'entailment': 1,
'contradiction': 2
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2" and line[10] == 'label1' and line[11] == 'gold_label':
continue
assert line[10] == line[11]
fo.write(f'{line[8]}\n{line[9]}\n')
labels.append(_label_to_id[line[10]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev_mismatched.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2" and line[15] == 'gold_label':
continue
fo.write(f'{line[8]}\n{line[9]}\n')
labels.append(_label_to_id[line[10]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test_mismatched.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[8] == "sentence1" and line[9] == "sentence2":
continue
fo.write(f'{line[8]}\n{line[9]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_qqp.py | bert/macaron-scripts/glue/generate_qqp.py | import argparse
import csv
import os
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "id" and line[1] == "qid1" and line[2] == 'qid2' and line[3] == 'question1' and line[4] == 'question2' and line[5] == 'is_duplicate':
continue
if len(line) != 6:
# print(line)
continue
fo.write(f'{line[3]}\n{line[4]}\n')
labels.append(int(line[5]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "id" and line[1] == "qid1" and line[2] == 'qid2' and line[3] == 'question1' and line[4] == 'question2' and line[5] == 'is_duplicate':
continue
if len(line) != 6:
# print(line, 'valid')
continue
fo.write(f'{line[3]}\n{line[4]}\n')
labels.append(int(line[5]))
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "id" and line[1] == "question1" and line[2] == 'question2':
continue
if len(line) != 3:
# print(line, 'test')
continue
fo.write(f'{line[1]}\n{line[2]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/generate_qnli.py | bert/macaron-scripts/glue/generate_qnli.py | import argparse
import csv
import os
import torch
_label_to_id = {
'not_entailment': 0,
'entailment': 1
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("data", type=str,
help="path of data")
parser.add_argument("--output", type=str, required=True,
help="path of output")
args = parser.parse_args()
if not os.path.exists(args.output):
os.mkdir(args.output)
elif not os.path.isdir(args.output):
raise FileExistsError(f"{args.output} is not a directory")
labels = []
with open(os.path.join(args.data, "train.tsv"), "r", encoding="utf-8") as fi, open(
os.path.join(args.output, "train.txt"), "w", encoding="utf-8") as fo:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "question" and line[2] == 'sentence' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(_label_to_id[line[3]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "train_labels.pt"))
labels = []
with open(os.path.join(args.output, "valid.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "dev.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "question" and line[2] == 'sentence' and line[3] == 'label':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
labels.append(_label_to_id[line[3]])
torch.save(torch.LongTensor(labels), os.path.join(args.output, "valid_labels.pt"))
with open(os.path.join(args.output, "test.txt"), "w", encoding="utf-8") as fo:
with open(os.path.join(args.data, "test.tsv"), "r", encoding="utf-8") as fi:
reader = csv.reader(fi, delimiter="\t", quotechar=None)
for line in reader:
if line[0] == "index" and line[1] == "question" and line[2] == 'sentence':
continue
fo.write(f'{line[1]}\n{line[2]}\n')
if __name__ == '__main__':
main()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/macaron-scripts/glue/download_glue_data.py | bert/macaron-scripts/glue/download_glue_data.py | ''' Script for downloading all GLUE data.
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
'''
import os
import sys
import shutil
import argparse
import tempfile
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4',
"SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
"MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc',
"QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5',
"STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5',
"MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce',
"SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df',
"QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601',
"RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb',
"WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf',
"diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'}
MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
data_file = "%s.zip" % task
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with open(mrpc_train_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \
open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(',')
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
assert task_name in TASKS, "Task %s not found!" % task_name
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string',
type=str, default='all')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt',
type=str, default='')
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == 'MRPC':
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == 'diagnostic':
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/search.py | bert/fairseq/search.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
class Search(object):
def __init__(self, tgt_dict):
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores, beam_size):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
group_size = beam_size // self.num_groups
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
class Sampling(Search):
def __init__(self, tgt_dict, sampling_topk=-1, sampling_temperature=1.):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_temperature = sampling_temperature
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
# we exclude the first two vocab items, one of which is pad
assert self.pad == 1, 'sampling assumes the first two symbols can be ignored'
lprobs_nopad = lprobs[:, :, 2:]
# only sample from top-k candidates
if self.sampling_topk > 0:
lprobs_nopad, topk_indices = lprobs_nopad.topk(self.sampling_topk)
# sampling temperature
if self.sampling_temperature != 1.:
lprobs_nopad = lprobs_nopad.div_(self.sampling_temperature)
# sample
probs_nopad = lprobs_nopad.exp_()
if step == 0:
self.indices_buf = torch.multinomial(
probs_nopad.view(bsz, -1),
beam_size,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
else:
self.indices_buf = torch.multinomial(
probs_nopad.view(bsz * beam_size, -1),
1,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs_nopad = probs_nopad.expand(bsz, beam_size, -1)
# gather scores
torch.gather(
probs_nopad,
dim=2,
index=self.indices_buf.unsqueeze(-1),
out=self.scores_buf,
)
self.scores_buf = self.scores_buf.log_().view(bsz, -1)
# remap indices if using top-k sampling
if self.sampling_topk > 0:
self.indices_buf = torch.gather(
topk_indices.expand(bsz, beam_size, -1),
dim=2,
index=self.indices_buf.unsqueeze(-1),
).squeeze(2)
# remap indices since we excluded the first two vocab items
self.indices_buf.add_(2)
if step == 0:
self.beams_buf = self.indices_buf.new_zeros(bsz, beam_size)
else:
self.beams_buf = torch.arange(0, beam_size, out=self.beams_buf).repeat(bsz, 1)
# make scores cumulative
self.scores_buf.add_(
torch.gather(
scores[:, :, step - 1],
dim=1,
index=self.beams_buf,
)
)
return self.scores_buf, self.indices_buf, self.beams_buf
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/distributed_utils.py | bert/fairseq/distributed_utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import namedtuple
import pickle
import torch
from torch import nn
from fairseq import utils
def is_master(args):
return args.distributed_rank == 0
_use_c10d = [True]
C10dStatus = namedtuple('C10dStatus', ['has_c10d', 'is_default'])
if hasattr(nn.parallel, 'deprecated'):
c10d_status = C10dStatus(has_c10d=True, is_default=True)
elif hasattr(nn.parallel, '_DistributedDataParallelC10d'):
c10d_status = C10dStatus(has_c10d=True, is_default=False)
else:
c10d_status = C10dStatus(has_c10d=False, is_default=False)
if c10d_status.is_default:
import torch.distributed as dist_c10d
import torch.distributed.deprecated as dist_no_c10d
elif c10d_status.has_c10d:
import torch.distributed.c10d as dist_c10d
import torch.distributed as dist_no_c10d
else:
import torch.distributed as dist_no_c10d
def distributed_init(args):
if args.distributed_world_size == 1:
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if args.ddp_backend == 'no_c10d':
_use_c10d[0] = False
print('| distributed init (rank {}): {}'.format(
args.distributed_rank, args.distributed_init_method), flush=True)
if _use_c10d[0]:
init_fn = dist_c10d.init_process_group
else:
init_fn = dist_no_c10d.init_process_group
init_fn(
backend=args.distributed_backend,
init_method=args.distributed_init_method,
world_size=args.distributed_world_size,
rank=args.distributed_rank,
)
if not is_master(args):
suppress_output()
return args.distributed_rank
def suppress_output():
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
if 'force' in kwargs:
force = kwargs.pop('force')
if force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def get_rank():
if _use_c10d[0]:
return dist_c10d.get_rank()
else:
return dist_no_c10d.get_rank()
def get_world_size():
if _use_c10d[0]:
return dist_c10d.get_world_size()
else:
return dist_no_c10d.get_world_size()
def get_default_group():
if _use_c10d[0]:
return dist_c10d.group.WORLD
else:
return dist_no_c10d.group.WORLD
def all_reduce(tensor, group=None):
if group is None:
group = get_default_group()
if _use_c10d[0]:
return dist_c10d.all_reduce(tensor, group=group)
else:
return dist_no_c10d.all_reduce(tensor, group=group)
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
rank = get_rank()
world_size = get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
buffer = all_gather_list._buffer
buffer.zero_()
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError('encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
buffer_rank = buffer[rank * max_size : (rank + 1) * max_size]
buffer_rank[0] = enc_size // 255 # this encoding works for max_size < 65k
buffer_rank[1] = enc_size % 255
buffer_rank[2:enc_size+2] = torch.ByteTensor(list(enc))
all_reduce(buffer, group=group)
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
size = (255 * utils.item(out_buffer[0])) + utils.item(out_buffer[1])
if size > 0:
result.append(
pickle.loads(bytes(out_buffer[2:size+2].tolist()))
)
return result
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/utils.py | bert/fairseq/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import defaultdict, OrderedDict
import logging
import os
import re
import torch
import traceback
from torch.serialization import default_restore_location
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logging.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(filename, args, model, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None):
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
'args': args,
'model': model.state_dict() if model else {},
'optimizer_history': optim_history + [
{
'criterion_name': criterion.__class__.__name__,
'optimizer_name': optimizer.__class__.__name__,
'lr_scheduler_state': lr_scheduler.state_dict(),
'num_updates': num_updates,
}
],
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
'extra_state': extra_state,
}
torch_persistent_save(state_dict, filename)
def load_model_state(filename, model):
if not os.path.exists(filename):
return None, [], None
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
state = _upgrade_state_dict(state)
model.upgrade_state_dict(state['model'])
# load model parameters
try:
model.load_state_dict(state['model'], strict=True)
except Exception:
raise Exception('Cannot load model parameters from checkpoint, '
'please ensure that the architectures match')
return state['extra_state'], state['optimizer_history'], state['last_optimizer_state']
def load_bert_state(filename, model, args):
assert os.path.exists(filename), f"File {filename} not found."
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
state = _upgrade_state_dict(state)
state['model'] = _upgrade_state_dict_bert(state['model'], args)
model.upgrade_state_dict(state['model'])
# load model parameters
model.load_state_dict(state['model'], strict=False)
return state['extra_state'], state['optimizer_history'], state['last_optimizer_state']
def _upgrade_state_dict_bert(state, args):
load_type = getattr(args, "load_type", "all")
if "no_fc" in load_type:
fc_pattern = re.compile(r"(encoder|decoder)\.layers\.[0-9]+\.fc[0-9]+\.(weight|bias)")
state = {k: v for k, v in state.items() if not fc_pattern.match(k)}
if "no_out" in load_type:
state['decoder.embed_tokens.weight'] = state['embed_out']
del state['embed_out']
del state['classifier.weight']
del state['classifier.bias']
return state
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
# add optimizer_history
if 'optimizer_history' not in state:
state['optimizer_history'] = [
{
'criterion_name': 'CrossEntropyCriterion',
'best_loss': state['best_loss'],
},
]
state['last_optimizer_state'] = state['optimizer']
del state['optimizer']
del state['best_loss']
# move extra_state into sub-dictionary
if 'epoch' in state and 'extra_state' not in state:
state['extra_state'] = {
'epoch': state['epoch'],
'batch_offset': state['batch_offset'],
'val_loss': state['val_loss'],
}
del state['epoch']
del state['batch_offset']
del state['val_loss']
# reduce optimizer history's memory usage (only keep the last state)
if 'optimizer' in state['optimizer_history'][-1]:
state['last_optimizer_state'] = state['optimizer_history'][-1]['optimizer']
for optim_hist in state['optimizer_history']:
del optim_hist['optimizer']
# record the optimizer class name
if 'optimizer_name' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['optimizer_name'] = 'FairseqNAG'
# move best_loss into lr_scheduler_state
if 'lr_scheduler_state' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['lr_scheduler_state'] = {
'best': state['optimizer_history'][-1]['best_loss'],
}
del state['optimizer_history'][-1]['best_loss']
# keep track of number of updates
if 'num_updates' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['num_updates'] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state['args'], 'max_positions') and not hasattr(state['args'], 'max_source_positions'):
state['args'].max_source_positions = state['args'].max_positions
state['args'].max_target_positions = state['args'].max_positions
# use stateful training data iterator
if 'train_iterator' not in state['extra_state']:
state['extra_state']['train_iterator'] = {
'epoch': state['extra_state']['epoch'],
'iterations_in_epoch': state['extra_state'].get('batch_offset', 0),
}
return state
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
"""Load an ensemble of models for inference.
model_arg_overrides allows you to pass a dictionary model_arg_overrides --
{'arg_name': arg} -- to override model args that were used during model
training
"""
# load model architectures and weights
states = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError('Model file not found: {}'.format(filename))
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
state = _upgrade_state_dict(state)
states.append(state)
ensemble = []
for state in states:
args = state['args']
if model_arg_overrides is not None:
args = _override_model_args(args, model_arg_overrides)
# build model for ensemble
model = task.build_model(args)
model.upgrade_state_dict(state['model'])
model.load_state_dict(state['model'], strict=True)
ensemble.append(model)
return ensemble, args
def _override_model_args(args, model_arg_overrides):
# Uses model_arg_overrides {'arg_name': arg} to override model args
for arg_name, arg_val in model_arg_overrides.items():
setattr(args, arg_name, arg_val)
return args
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str):
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, 'r') as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]])
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return ' '.join(hypo_tokens)
def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe):
from fairseq import tokenizer
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string())
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, tgt_dict, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx, left_pad, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
if onnx_trace:
range_buf = torch._dim_arange(like=tensor, dim=1) + padding_idx + 1
mask = tensor.ne(padding_idx)
positions = range_buf.expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return positions * mask.long() + positions * (1 - mask.long())
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = tensor.new()
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def checkpoint_paths(path, pattern=r'checkpoint(\d+)\.pt'):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
else:
max_positions = tuple(
map(nullsafe_min, zip(max_positions, arg))
)
return max_positions
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/multiprocessing_pdb.py | bert/fairseq/multiprocessing_pdb.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import multiprocessing
import os
import pdb
import sys
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
_stdin_fd = sys.stdin.fileno()
_stdin = None
_stdin_lock = multiprocessing.Lock()
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with self._stdin_lock:
try:
if not self._stdin:
self._stdin = os.fdopen(self._stdin_fd)
sys.stdin = self._stdin
self.cmdloop()
finally:
sys.stdin = stdin_bak
pdb = MultiprocessingPdb()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/trainer.py | bert/fairseq/trainer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a network across multiple GPUs.
"""
from collections import defaultdict, OrderedDict
import contextlib
from itertools import chain
import torch
from fairseq import distributed_utils, models, optim, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.optim import lr_scheduler
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, dummy_batch):
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
self.args = args
self.task = task
# copy model and criterion to current device
self.criterion = criterion.cuda()
if args.fp16:
self._model = model.half().cuda()
else:
self._model = model.cuda()
# initialize meters
self.meters = OrderedDict()
self.meters['train_loss'] = AverageMeter()
self.meters['train_nll_loss'] = AverageMeter()
self.meters['valid_loss'] = AverageMeter()
self.meters['valid_nll_loss'] = AverageMeter()
self.meters['train_acc'] = AverageMeter()
self.meters['valid_acc'] = AverageMeter()
self.meters['wps'] = TimeMeter() # words per second
self.meters['ups'] = TimeMeter() # updates per second
self.meters['wpb'] = AverageMeter() # words per batch
self.meters['bsz'] = AverageMeter() # sentences per batch
self.meters['gnorm'] = AverageMeter() # gradient norm
self.meters['clip'] = AverageMeter() # % of updates clipped
self.meters['oom'] = AverageMeter() # out of memory
if args.fp16:
self.meters['loss_scale'] = AverageMeter() # dynamic loss scale
self.meters['wall'] = TimeMeter() # wall time in seconds
self.meters['train_wall'] = StopwatchMeter() # train wall time in seconds
self.cache = OrderedDict()
self.cache['valid_x'] = []
self.cache['valid_y'] = []
self.cache['valid_tp'] = []
self.cache['valid_tn'] = []
self.cache['valid_fp'] = []
self.cache['valid_fn'] = []
self._dummy_batch = dummy_batch
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._wrapped_model = None
@property
def model(self):
if self._wrapped_model is None:
if self.args.distributed_world_size > 1:
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
def _build_optimizer(self):
if self.args.fp16:
if torch.cuda.get_device_capability(0)[0] < 7:
print('| WARNING: your device does NOT support faster training with --fp16, '
'please switch to FP32 which is likely to be faster')
params = list(filter(lambda p: p.requires_grad, self.model.parameters()))
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if torch.cuda.get_device_capability(0)[0] >= 7:
print('| NOTICE: your device may support faster training with --fp16')
self._optimizer = optim.build_optimizer(self.args, self.model.parameters())
self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if distributed_utils.is_master(self.args): # only save one checkpoint
extra_state['train_meters'] = self.meters
utils.save_state(
filename, self.args, self.get_model(), self.criterion, self.optimizer,
self.lr_scheduler, self._num_updates, self._optim_history, extra_state,
)
def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = \
utils.load_model_state(filename, self.get_model())
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert last_optim['criterion_name'] == self.criterion.__class__.__name__, \
'criterion does not match; please reset the optimizer (--reset-optimizer)'
assert last_optim['optimizer_name'] == self.optimizer.__class__.__name__, \
'optimizer does not match; please reset the optimizer (--reset-optimizer)'
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self._num_updates = last_optim['num_updates']
if extra_state is not None and 'train_meters' in extra_state:
self.meters.update(extra_state['train_meters'])
del extra_state['train_meters']
# reset TimeMeters, since their start times don't make sense anymore
for meter in self.meters.values():
if isinstance(meter, TimeMeter):
meter.reset()
return extra_state
def train_step(self, samples, dummy_batch=False):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
self.zero_grad()
if not dummy_batch:
self.meters['train_wall'].start()
# forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
try:
# forward
loss, sample_size, logging_output = self.task.get_loss(
self.model, self.criterion, sample,
)
if ignore_grad:
loss *= 0
if self.args.distributed_world_size > 1:
# only all-reduce gradients in the last backwards pass
if i < len(samples) - 1:
self.model.need_reduction = False
else:
self.model.need_reduction = True
# backward
self.optimizer.backward(loss)
if not ignore_grad:
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
ooms += 1
self.zero_grad()
else:
raise e
if dummy_batch:
return None
# gather logging outputs from all replicas
if self.args.distributed_world_size > 1:
logging_outputs, sample_sizes, ooms = zip(*distributed_utils.all_gather_list(
[logging_outputs, sample_sizes, ooms],
))
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
ooms = sum(ooms)
if ooms == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping update')
self.zero_grad()
return None
# aggregate logging outputs and sample sizes
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
sample_size = self.criterion.__class__.grad_denom(sample_sizes)
if not all(k in logging_output for k in ['ntokens', 'nsentences']):
raise Exception((
'Please update the {}.aggregate_logging_outputs() method to '
'return ntokens and nsentences'
).format(self.criterion.__class__.__name__))
try:
# normalize grads by sample size
self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))
# clip grads
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
# take an optimization step
self.optimizer.step()
self._num_updates += 1
# update learning rate
self.lr_scheduler.step_update(self._num_updates)
# update meters
ntokens = logging_output.get('ntokens', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(
1. if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.
)
self.meters['oom'].update(ooms)
self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)
if 'acc' in logging_output:
self.meters['train_acc'].update(logging_output.get('acc', 0), sample_size)
elif 'nsp_acc' in logging_output:
self.meters['train_acc'].update(logging_output.get('nsp_acc', 0), nsentences)
if 'nll_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
elif 'mlm_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('mlm_loss', 0), sample_size)
except OverflowError as e:
print('| WARNING: overflow detected, ' + str(e))
self.zero_grad()
logging_output = None
if self.args.fp16:
self.meters['loss_scale'].reset()
self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)
self.meters['train_wall'].stop()
return logging_output
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
with torch.no_grad():
self.model.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
try:
_loss, sample_size, logging_output = self.task.get_loss(
self.model, self.criterion, sample,
)
except RuntimeError as e:
if 'out of memory' in str(e) and not raise_oom:
print('| WARNING: ran out of memory, retrying batch')
for p in self.model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
else:
raise e
if ignore_results:
logging_output, sample_size = {}, 0
# gather logging outputs from all replicas
if self.args.distributed_world_size > 1:
logging_output, sample_size = zip(*distributed_utils.all_gather_list(
[logging_output, sample_size],
))
logging_output = list(logging_output)
sample_size = list(sample_size)
else:
logging_output = [logging_output]
sample_size = [sample_size]
# aggregate logging outputs and sample sizes
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_output)
sample_size = self.criterion.__class__.grad_denom(sample_size)
# update meters for validation
ntokens = logging_output.get('ntokens', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size)
if 'acc' in logging_output:
self.meters['valid_acc'].update(logging_output.get('acc', 0), sample_size)
elif 'nsp_acc' in logging_output:
self.meters['train_acc'].update(logging_output.get('nsp_acc', 0), nsentences)
if 'nll_loss' in logging_output:
self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
elif 'mlm_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('mlm_loss', 0), sample_size)
if 'x' in logging_output:
self.cache['valid_x'].append(logging_output['x'])
if 'y' in logging_output:
self.cache['valid_y'].append(logging_output['y'])
if 'tp' in logging_output:
self.cache['valid_tp'].append(logging_output['tp'])
if 'tn' in logging_output:
self.cache['valid_tn'].append(logging_output['tn'])
if 'fp' in logging_output:
self.cache['valid_fp'].append(logging_output['fp'])
if 'fn' in logging_output:
self.cache['valid_fn'].append(logging_output['fn'])
return logging_output
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, dummy_batch=True)
self.zero_grad()
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate based on the validation loss."""
return self.lr_scheduler.step(epoch, val_loss)
def lr_step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.lr_scheduler.step_update(num_updates)
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_meter(self, name):
"""Get a specific meter by name."""
if name not in self.meters:
return None
return self.meters[name]
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
return utils.move_to_cuda(sample)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/__init__.py | bert/fairseq/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .multiprocessing_pdb import pdb
__all__ = ['pdb']
import fairseq.criterions
import fairseq.models
import fairseq.modules
import fairseq.optim
import fairseq.optim.lr_scheduler
import fairseq.tasks
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tokenizer.py | bert/fairseq/tokenizer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import Counter
import os, re
import torch
from multiprocessing import Pool
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Tokenizer:
@staticmethod
def add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in counter.items():
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(
Tokenizer.add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers)
))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Tokenizer.add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
@staticmethod
def binarize(filename, dict, consumer, tokenize=tokenize_line,
append_eos=True, reverse_order=False,
offset=0, end=-1):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, 'r') as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = Tokenizer.tokenize(
line=line,
dict=dict,
tokenize=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
@staticmethod
def find_offsets(filename, num_chunks):
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
@staticmethod
def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True,
consumer=None, append_eos=True, reverse_order=False):
words = tokenize(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = dict.add_symbol(word)
else:
idx = dict.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = dict.eos_index
return ids
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/sequence_generator.py | bert/fairseq/sequence_generator.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
from fairseq import search, utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self, models, tgt_dict, beam_size=1, minlen=1, maxlen=None, stop_early=True,
normalize_scores=True, len_penalty=1, unk_penalty=0, retain_dropout=False,
sampling=False, sampling_topk=-1, sampling_temperature=1,
diverse_beam_groups=-1, diverse_beam_strength=0.5,
):
"""Generates translations of a given source sentence.
Args:
min/maxlen: The length of the generated output will be bounded by
minlen and maxlen (not including the end-of-sentence marker).
stop_early: Stop generation immediately after we finalize beam_size
hypotheses, even though longer hypotheses might have better
normalized scores.
normalize_scores: Normalize scores by the length of the output.
"""
self.models = models
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
self.minlen = minlen
max_decoder_len = min(m.max_decoder_positions() for m in self.models)
max_decoder_len -= 1 # we define maxlen not including the EOS marker
self.maxlen = max_decoder_len if maxlen is None else min(maxlen, max_decoder_len)
self.stop_early = stop_early
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_temperature)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
else:
self.search = search.BeamSearch(tgt_dict)
def cuda(self):
for model in self.models:
model.cuda()
return self
def generate_batched_itr(
self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None,
cuda=False, timer=None, prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
if maxlen_b is None:
maxlen_b = self.maxlen
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if 'net_input' not in s:
continue
input = s['net_input']
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items()
if k != 'prev_output_tokens'
}
srclen = encoder_input['src_tokens'].size(1)
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
encoder_input,
beam_size=beam_size,
maxlen=int(maxlen_a*srclen + maxlen_b),
prefix_tokens=s['target'][:, :prefix_size] if prefix_size > 0 else None,
)
if timer is not None:
timer.stop(sum(len(h[0]['tokens']) for h in hypos))
for i, id in enumerate(s['id'].data):
# remove padding
src = utils.strip_pad(input['src_tokens'].data[i, :], self.pad)
ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
yield id, src, ref, hypos[i]
def generate(self, encoder_input, beam_size=None, maxlen=None, prefix_tokens=None):
"""Generate a batch of translations.
Args:
encoder_input: dictionary containing the inputs to
model.encoder.forward
beam_size: int overriding the beam size. defaults to
self.beam_size
max_len: maximum length of the generated sequence
prefix_tokens: force decoder to begin with these tokens
"""
with torch.no_grad():
return self._generate(encoder_input, beam_size, maxlen, prefix_tokens)
def _generate(self, encoder_input, beam_size=None, maxlen=None, prefix_tokens=None):
"""See generate"""
src_tokens = encoder_input['src_tokens']
bsz, srclen = src_tokens.size()
maxlen = min(maxlen, self.maxlen) if maxlen is not None else self.maxlen
# the max beam size is the dictionary size - 1, since we never select pad
beam_size = beam_size if beam_size is not None else self.beam_size
beam_size = min(beam_size, self.vocab_size - 1)
encoder_outs = []
incremental_states = {}
for model in self.models:
if not self.retain_dropout:
model.eval()
if isinstance(model.decoder, FairseqIncrementalDecoder):
incremental_states[model] = {}
else:
incremental_states[model] = None
# compute the encoder output for each beam
encoder_out = model.encoder(**encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, new_order)
encoder_outs.append(encoder_out)
# initialize buffers
scores = src_tokens.data.new(bsz * beam_size, maxlen + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.data.new(bsz * beam_size, maxlen + 2).fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos
attn, attn_buf = None, None
nonpad_idxs = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
if self.stop_early or step == maxlen or unfinalized_scores is None:
return True
# stop if the best unfinalized score is worse than the worst
# finalized one
best_unfinalized_score = unfinalized_scores[sent].max()
if self.normalize_scores:
best_unfinalized_score /= maxlen ** self.len_penalty
if worst_finalized[sent]['score'] >= best_unfinalized_score:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
# replace worst hypo for this sentence with new/better one
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
finalized[sent][worst_idx] = get_hypo()
# find new worst finalized hypo for this sentence
idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {
'score': s['score'],
'idx': idx,
}
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(maxlen + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
if isinstance(model.decoder, FairseqIncrementalDecoder):
model.decoder.reorder_incremental_state(incremental_states[model], reorder_state)
encoder_outs[i] = model.encoder.reorder_encoder_out(encoder_outs[i], reorder_state)
lprobs, avg_attn_scores = self._decode(tokens[:, :step + 1], encoder_outs, incremental_states)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), maxlen + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
if step < maxlen:
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, cand_size)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, cand_size).data
cand_beams = torch.zeros_like(cand_indices)
else:
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, step - 1].unsqueeze(-1))
# finalize all active hypotheses once we hit maxlen
# pick the hypothesis with the highest prob of EOS right now
torch.sort(
lprobs[:, self.eos],
descending=True,
out=(eos_scores, eos_bbsz_idx),
)
num_remaining_sent -= len(finalize_hypos(
step, eos_bbsz_idx, eos_scores))
assert num_remaining_sent == 0
break
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.eos)
finalized_sents = set()
if step >= self.minlen:
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, cand_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < maxlen
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
active_mask = buffer('active_mask')
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(_ignore, active_hypos)
)
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def _decode(self, tokens, encoder_outs, incremental_states):
if len(self.models) == 1:
return self._decode_one(tokens, self.models[0], encoder_outs[0], incremental_states, log_probs=True)
avg_probs = None
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(tokens, model, encoder_out, incremental_states, log_probs=False)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs.div_(len(self.models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(self, tokens, model, encoder_out, incremental_states, log_probs):
with torch.no_grad():
if incremental_states[model] is not None:
decoder_out = list(model.decoder(tokens, encoder_out, incremental_state=incremental_states[model]))
else:
decoder_out = list(model.decoder(tokens, encoder_out))
decoder_out[0] = decoder_out[0][:, -1, :]
attn = decoder_out[1]
if type(attn) is dict:
attn = attn['attn']
if attn is not None:
if type(attn) is dict:
attn = attn['attn']
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
return probs, attn
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/meters.py | bert/fairseq/meters.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/progress_bar.py | bert/fairseq/progress_bar.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
from collections import OrderedDict
import json
from numbers import Number
import sys
import os
from tqdm import tqdm
from fairseq.meters import AverageMeter
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval, args.save_dir)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
return bar
class progress_bar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += '| epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{:g}'.format(postfix[key])
# Meter: display both current and average value
elif isinstance(postfix[key], AverageMeter):
postfix[key] = '{:.2f} ({:.2f})'.format(
postfix[key].val, postfix[key].avg)
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], str):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
return postfix
class json_progress_bar(progress_bar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable):
yield obj
if self.stats is not None and i > 0 and \
self.log_interval is not None and i % self.log_interval == 0:
update = self.epoch - 1 + float(i / size) if self.epoch is not None else None
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
print(json.dumps(stats), flush=True)
def log(self, stats):
"""Log intermediate stats according to log_interval."""
self.stats = stats
def print(self, stats):
"""Print end-of-epoch stats."""
self.stats = stats
stats = self._format_stats(self.stats, epoch=self.epoch)
print(json.dumps(stats), flush=True)
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = update
# Preprocess stats according to datatype
for key in stats.keys():
# Meter: display both current and average value
if isinstance(stats[key], AverageMeter):
postfix[key] = stats[key].val
postfix[key + '_avg'] = stats[key].avg
else:
postfix[key] = stats[key]
return postfix
class noop_progress_bar(progress_bar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats):
"""Print end-of-epoch stats."""
pass
class simple_progress_bar(progress_bar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000, save_dir=os.path.curdir):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.log_file = open(os.path.join(save_dir, "log.txt"), "a", encoding="utf-8")
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable):
yield obj
if self.stats is not None and i > 0 and \
self.log_interval is not None and i % self.log_interval == 0:
postfix = self._str_commas(self.stats)
print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix),
flush=True)
def log(self, stats):
"""Log intermediate stats according to log_interval."""
self.stats = self._format_stats(stats)
def print(self, stats):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
print('{} | {}'.format(self.prefix, postfix), flush=True)
self.log_file.write(f"{self.prefix} | {postfix}\n")
self.log_file.flush()
class tqdm_progress_bar(progress_bar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/bleu.py | bert/fairseq/bleu.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import ctypes
import math
import torch
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libbleu.so. run `python setup.py install`\n')
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
('reflen', ctypes.c_size_t),
('predlen', ctypes.c_size_t),
('match1', ctypes.c_size_t),
('count1', ctypes.c_size_t),
('match2', ctypes.c_size_t),
('count2', ctypes.c_size_t),
('match3', ctypes.c_size_t),
('count3', ctypes.c_size_t),
('match4', ctypes.c_size_t),
('count4', ctypes.c_size_t),
]
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError('ref must be a torch.IntTensor (got {})'
.format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError('pred must be a torch.IntTensor(got {})'
.format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos))
def score(self, order=4):
psum = sum(math.log(p) if p > 0 else float('-Inf')
for p in self.precision()[:order])
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
for _ in range(1, order):
fmt += '/{:2.1f}'
fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(order, self.score(order=order), *bleup,
self.brevity(), self.stat.predlen/self.stat.reflen,
self.stat.predlen, self.stat.reflen)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/sequence_scorer.py | bert/fairseq/sequence_scorer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, models, tgt_dict):
self.models = models
self.pad = tgt_dict.pad()
def cuda(self):
for model in self.models:
model.cuda()
return self
def score_batched_itr(self, data_itr, cuda=False, timer=None):
"""Iterate over a batched dataset and yield scored translations."""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if timer is not None:
timer.start()
pos_scores, attn = self.score(s)
for i, id in enumerate(s['id'].data):
# remove padding from ref
src = utils.strip_pad(s['net_input']['src_tokens'].data[i, :], self.pad)
ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
tgt_len = ref.numel()
pos_scores_i = pos_scores[i][:tgt_len]
score_i = pos_scores_i.sum() / tgt_len
if attn is not None:
attn_i = attn[i]
_, alignment = attn_i.max(dim=0)
else:
attn_i = alignment = None
hypos = [{
'tokens': ref,
'score': score_i,
'attention': attn_i,
'alignment': alignment,
'positional_scores': pos_scores_i,
}]
if timer is not None:
timer.stop(s['ntokens'])
# return results in the same format as SequenceGenerator
yield id, src, ref, hypos
def score(self, sample):
"""Score a batch of translations."""
net_input = sample['net_input']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in self.models:
with torch.no_grad():
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
probs = model.get_normalized_probs(decoder_out, log_probs=False, sample=sample).data
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs.div_(len(self.models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(self.models))
avg_probs = avg_probs.gather(
dim=2,
index=sample['target'].data.unsqueeze(-1),
)
return avg_probs.squeeze(2), avg_attn
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/options.py | bert/fairseq/options.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import torch
from fairseq.criterions import CRITERION_REGISTRY
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
from fairseq.optim import OPTIMIZER_REGISTRY
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
from fairseq.tasks import TASK_REGISTRY
def get_training_parser(default_task='translation'):
parser = get_parser('Trainer', default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task='translation'):
parser = get_parser('Generation', default_task)
add_dataset_args(parser, gen=True)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task='translation'):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task='language_modeling'):
parser = get_parser('Evaluate Language Model', default_task)
add_dataset_args(parser, gen=True)
add_eval_lm_args(parser)
return parser
def get_inference_parser(default_task='glue'):
parser = get_parser('Inference', default_task)
add_dataset_args(parser, gen=True)
add_inference_args(parser)
return parser
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(parser, input_args=None, parse_known=False):
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group(
'Model-specific configuration',
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
if hasattr(args, 'criterion'):
CRITERION_REGISTRY[args.criterion].add_args(parser)
if hasattr(args, 'optimizer'):
OPTIMIZER_REGISTRY[args.optimizer].add_args(parser)
if hasattr(args, 'lr_scheduler'):
LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser)
if hasattr(args, 'task'):
TASK_REGISTRY[args.task].add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, 'lr'):
args.lr = eval_str_list(args.lr, type=float)
if hasattr(args, 'update_freq'):
args.update_freq = eval_str_list(args.update_freq, type=int)
if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
# Apply architecture configuration.
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task='translation'):
parser = argparse.ArgumentParser()
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--fp16-init-scale', default=2**7, type=int,
help='default FP16 loss scale')
# Task definitions can be found under fairseq/tasks/
parser.add_argument(
'--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task',
)
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (train, valid, valid1, test, test1)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
return group
def add_distributed_training_args(parser):
group = parser.add_argument_group('Distributed training')
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=torch.cuda.device_count(),
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=150, type=int, metavar='MB',
help='bucket size for reduction')
return group
def add_optimization_args(parser):
group = parser.add_argument_group('Optimization')
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N',
help='update parameters every N_i batches, when in epoch i')
# Optimizer definitions can be found under fairseq/optim/
group.add_argument('--optimizer', default='nag', metavar='OPT',
choices=OPTIMIZER_REGISTRY.keys(),
help='Optimizer')
group.add_argument('--lr', '--learning-rate', default='0.25', metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--momentum', default=0.99, type=float, metavar='M',
help='momentum factor')
group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# Learning rate schedulers can be found under fairseq/optim/lr_scheduler/
group.add_argument('--lr-scheduler', default='reduce_lr_on_plateau',
choices=LR_SCHEDULER_REGISTRY.keys(),
help='Learning Rate Scheduler')
group.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
help='learning rate shrink factor for annealing, lr_new = (lr * lr_shrink)')
group.add_argument('--min-lr', default=1e-5, type=float, metavar='LR',
help='minimum learning rate')
group.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum loss scale (for FP16 training)')
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename in save-dir from which to load checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep last N checkpoints saved with --save-interval-updates')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--load-bert', metavar='PATH', type=str,
help='pretrained bert encoder to load from file')
group.add_argument('--load-type', metavar='TYPE', default="all", type=str,
help='which part pretrained bert encoder to load ("all", "no_fc", "no_out", "no_out_no_fc")')
return group
def add_common_eval_args(group):
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring')
group.add_argument('--cpu', action='store_true', help='generate on CPU')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
def add_generation_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--no-early-stop', action='store_true',
help=('continue searching even after finalizing k=beam '
'hypotheses; this is more correct, but increases '
'generation time by 50%%'))
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-temperature', default=1, type=float, metavar='N',
help='temperature for random sampling')
group.add_argument('--diverse-beam-groups', default=1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation that were used during model training')
return group
def add_interactive_args(parser):
group = parser.add_argument_group('Interactive')
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
def add_inference_args(parser):
group = parser.add_argument_group('Inference')
add_common_eval_args(group)
group.add_argument('--output', type=str, metavar='PATH',
help='path of inference output')
return group
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
group.add_argument(
'--arch', '-a', default='fconv', metavar='ARCH', required=True,
choices=ARCH_MODEL_REGISTRY.keys(),
help='Model Architecture',
)
# Criterion definitions can be found under fairseq/criterions/
group.add_argument(
'--criterion', default='cross_entropy', metavar='CRIT',
choices=CRITERION_REGISTRY.keys(),
help='Training Criterion',
)
return group
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/bert.py | bert/fairseq/tasks/bert.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import os
import numpy as np
from torch.utils.data import ConcatDataset
from fairseq import options
from fairseq.data import (
Dictionary, BertDictionary, IndexedInMemoryDataset,
IndexedRawTextDataset, BertDataset
)
from . import FairseqTask, register_task
@register_task('bert')
class BertTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (Dictionary): dictionary for the source language
tgt_dict (Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`train.py <train>`,
:mod:`generate.py <generate>` and :mod:`interactive.py <interactive>`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad', default='False', type=str, metavar='BOOL',
help='pad the sentence on the left')
parser.add_argument('--max-positions', default=384, type=int, metavar='N',
help='max number of tokens in the sequence')
parser.add_argument('--masked-lm-prob', default=0.15, type=float, metavar='P',
help='masked LM probability')
parser.add_argument('--mlm-mask-prob', default=0.8, type=float, metavar='P',
help='probability to mask a word in MLM')
parser.add_argument('--mlm-random-prob', default=0.1, type=float, metavar='P',
help='probability to replace with a random word')
def __init__(self, args, dictionary):
super().__init__(args)
self.dict = dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad = options.eval_bool(args.left_pad)
# load dictionaries
dictionary = BertDictionary.load(os.path.join(args.data[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def split_exists(split, data_path):
filename = os.path.join(data_path, split)
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedInMemoryDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedInMemoryDataset.exists(path):
return IndexedInMemoryDataset(path, fix_lua_indexing=True)
return None
datasets = []
data_paths = self.args.data
for data_path in data_paths:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
if split_exists(split_k, data_path):
prefix = os.path.join(data_path, split_k)
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
datasets.append(indexed_dataset(prefix, self.dict))
print('| {} {} {} examples'.format(data_path, split_k, len(datasets[-1])))
if not combine:
break
if len(datasets) == 1:
dataset = datasets[0]
sizes = dataset.sizes
else:
if self.args.upsample_primary > 1:
datasets.extend([datasets[0]] * (self.args.upsample_primary - 1))
dataset = ConcatDataset(datasets)
sizes = np.concatenate([ds.sizes for ds in datasets])
self.datasets[split] = BertDataset(
dataset, sizes, self.dict, self.args.left_pad, self.args.max_positions
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return self.args.max_positions
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/fairseq_task.py | bert/fairseq/tasks/fairseq_task.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq.data import data_utils, FairseqDataset, iterators
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
def __init__(self, args):
self.args = args
self.datasets = {}
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args)
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError('Dataset not loaded: ' + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError('Datasets are expected to be of type FairseqDataset')
return self.datasets[split]
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch.
Default: ``None``
max_sentences (int, optional): max number of sentences in each
batch. Default: ``None``
max_positions (optional): max sentence length supported by the
model. Default: ``None``
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long. Default: ``False``
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N. Default: ``1``
seed (int, optional): seed for random number generator for
reproducibility. Default: ``1``
num_shards (int, optional): shard the data iterator into N
shards. Default: ``1``
shard_id (int, optional): which shard of the data iterator to
return. Default: ``0``
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
assert isinstance(dataset, FairseqDataset)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
indices = data_utils.filter_by_size(
indices, dataset.size, max_positions, raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
)
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models
return models.build_model(args, self)
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def get_loss(self, model, criterion, sample):
"""
Return the loss as computed by *criterion* for the given *model* and
*sample*.
Args:
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
"""
return criterion(model, sample)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/translation.py | bert/fairseq/tasks/translation.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import numpy as np
import os
from torch.utils.data import ConcatDataset
from fairseq import options
from fairseq.data import (
data_utils, Dictionary, LanguagePairDataset, IndexedInMemoryDataset,
IndexedRawTextDataset,
)
from . import FairseqTask, register_task
@register_task('translation')
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (Dictionary): dictionary for the source language
tgt_dict (Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`train.py <train>`,
:mod:`generate.py <generate>` and :mod:`interactive.py <interactive>`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = Dictionary.load(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = Dictionary.load(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedInMemoryDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedInMemoryDataset.exists(path):
return IndexedInMemoryDataset(path, fix_lua_indexing=True)
return None
src_datasets = []
tgt_datasets = []
data_paths = self.args.data
for data_path in data_paths:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_datasets.append(indexed_dataset(prefix + src, self.src_dict))
tgt_datasets.append(indexed_dataset(prefix + tgt, self.tgt_dict))
print('| {} {} {} examples'.format(data_path, split_k, len(src_datasets[-1])))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
src_sizes = src_dataset.sizes
tgt_sizes = tgt_dataset.sizes
else:
if self.args.upsample_primary > 1:
src_datasets.extend([src_datasets[0]] * (self.args.upsample_primary - 1))
tgt_datasets.extend([tgt_datasets[0]] * (self.args.upsample_primary - 1))
src_dataset = ConcatDataset(src_datasets)
tgt_dataset = ConcatDataset(tgt_datasets)
src_sizes = np.concatenate([ds.sizes for ds in src_datasets])
tgt_sizes = np.concatenate([ds.sizes for ds in tgt_datasets])
self.datasets[split] = LanguagePairDataset(
src_dataset, src_sizes, self.src_dict,
tgt_dataset, tgt_sizes, self.tgt_dict,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/glue.py | bert/fairseq/tasks/glue.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import os
import torch
import numpy as np
from torch.utils.data import ConcatDataset
from fairseq import options
from fairseq.data import (
BertDictionary, IndexedInMemoryDataset,
IndexedRawTextDataset, GlueSingleDataset, GluePairDataset
)
from . import FairseqTask, register_task
@register_task('glue_single')
class GlueSingleTask(FairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad', default='False', type=str, metavar='BOOL',
help='pad the sentence on the left')
parser.add_argument('--max-positions', default=384, type=int, metavar='N',
help='max number of tokens in the sequence')
def __init__(self, args, dictionary):
super().__init__(args)
self.dict = dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad = options.eval_bool(args.left_pad)
# load dictionaries
dictionary = BertDictionary.load(os.path.join(args.data[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def split_exists(split, data_path):
filename = os.path.join(data_path, split)
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedInMemoryDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedInMemoryDataset.exists(path):
return IndexedInMemoryDataset(path, fix_lua_indexing=True)
return None
datasets, labels = [], []
data_paths = self.args.data
for data_path in data_paths:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
if split_exists(split_k, data_path) and (os.path.exists(os.path.join(data_path, split_k + "_labels.pt"))
or split == 'test'):
prefix = os.path.join(data_path, split_k)
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
datasets.append(indexed_dataset(prefix, self.dict))
if split == 'test':
# TODO: make it more good-looking
labels.append(torch.zeros(1000000))
else:
labels.append(torch.load(os.path.join(data_path, split + "_labels.pt")))
print('| {} {} {} examples'.format(data_path, split_k, len(datasets[-1])))
if not combine:
break
if len(datasets) == 1:
dataset = datasets[0]
label = labels[0]
sizes = dataset.sizes
else:
if self.args.upsample_primary > 1:
datasets.extend([datasets[0]] * (self.args.upsample_primary - 1))
dataset = ConcatDataset(datasets)
label = torch.cat(labels)
sizes = np.concatenate([ds.sizes for ds in datasets])
self.datasets[split] = GlueSingleDataset(
dataset, sizes, self.dict, label, self.args.left_pad, self.args.max_positions
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return self.args.max_positions
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dict
@register_task('glue_pair')
class GluePairTask(FairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad', default='False', type=str, metavar='BOOL',
help='pad the sentence on the left')
parser.add_argument('--max-positions', default=384, type=int, metavar='N',
help='max number of tokens in the sequence')
parser.add_argument('--symmetric', action='store_true',
help='whether or not the sentence pair is symmetric')
def __init__(self, args, dictionary):
super().__init__(args)
self.dict = dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad = options.eval_bool(args.left_pad)
# load dictionaries
dictionary = BertDictionary.load(os.path.join(args.data[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def split_exists(split, data_path):
filename = os.path.join(data_path, split)
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedInMemoryDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedInMemoryDataset.exists(path):
return IndexedInMemoryDataset(path, fix_lua_indexing=True)
return None
datasets, labels = [], []
data_paths = self.args.data
for data_path in data_paths:
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
if split_exists(split_k, data_path) and (os.path.exists(os.path.join(data_path, split_k + "_labels.pt"))
or split == 'test'):
prefix = os.path.join(data_path, split_k)
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
datasets.append(indexed_dataset(prefix, self.dict))
if split == 'test':
# TODO: make it more good-looking
labels.append(torch.zeros(1000000))
else:
labels.append(torch.load(os.path.join(data_path, split + "_labels.pt")))
print('| {} {} {} examples'.format(data_path, split_k, len(datasets[-1])))
if not combine:
break
if len(datasets) == 1:
dataset = datasets[0]
label = labels[0]
sizes = dataset.sizes
else:
if self.args.upsample_primary > 1:
datasets.extend([datasets[0]] * (self.args.upsample_primary - 1))
dataset = ConcatDataset(datasets)
label = torch.cat(labels)
sizes = np.concatenate([ds.sizes for ds in datasets])
self.datasets[split] = GluePairDataset(
dataset, sizes, self.dict, label, self.args.left_pad, self.args.max_positions,
symmetric=split == 'train' and self.args.symmetric
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return self.args.max_positions
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dict | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/__init__.py | bert/fairseq/tasks/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args):
return TASK_REGISTRY[args.task].setup_task(args)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
group_task.add_argument(
'--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``'
)
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/tasks/language_modeling.py | bert/fairseq/tasks/language_modeling.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import numpy as np
import os
from torch.utils.data import ConcatDataset
from fairseq.data import (
Dictionary, IndexedInMemoryDataset, IndexedRawTextDataset,
MonolingualDataset, TokenBlockDataset, TruncatedDictionary
)
from . import FairseqTask, register_task
@register_task('language_modeling')
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (Dictionary): the dictionary for the input of the language model
output_dictionary (Dictionary): the dictionary for the output of the language model.
In most cases it will be the same as dictionary, but could possibly be a more limited
version of the dictionary (if --output-dictionary-size is used).
targets (List[str]): list of the target types that the language model should predict.
Can be one of "self", "future", and "past". Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`train.py <train>`,
:mod:`generate.py <generate>`, :mod:`interactive.py <interactive>` and
:mod:`eval_lm.py <eval_lm>`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode',
choices=['none', 'complete', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
def __init__(self, args, dictionary, output_dictionary, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary
if targets is None:
targets = ['future']
self.targets = targets
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size)
# upgrade old checkpoints
if hasattr(args, 'exclude_self_target'):
args.self_target = not args.exclude_self_target
targets = []
if args.self_target:
targets.append('self')
if args.future_target:
targets.append('future')
if args.past_target:
targets.append('past')
if len(targets) == 0:
# standard language modeling
targets = ['future']
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError('Unsupported language modeling target: {}'.format(target))
return model
def load_dataset(self, split, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
path = os.path.join(self.args.data, split_k)
if self.args.raw_text and IndexedRawTextDataset.exists(path):
ds = IndexedRawTextDataset(path, self.dictionary)
tokens = [t for l in ds.tokens_list for t in l]
elif not self.args.raw_text and IndexedInMemoryDataset.exists(path):
ds = IndexedInMemoryDataset(path, fix_lua_indexing=True)
tokens = ds.buffer
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, self.args.data))
loaded_datasets.append(
TokenBlockDataset(
tokens, ds.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode, include_targets=True,
))
print('| {} {} {} examples'.format(self.args.data, split_k, len(loaded_datasets[-1])))
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
add_eos_for_other_targets = self.args.sample_break_mode is not None and self.args.sample_break_mode != 'none'
self.datasets[split] = MonolingualDataset(
dataset, sizes, self.dictionary, self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets, shuffle=False,
targets=self.targets,
)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/distributed_fairseq_model.py | bert/fairseq/models/distributed_fairseq_model.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.nn import parallel
from fairseq.distributed_utils import c10d_status
from . import BaseFairseqModel
class DistributedFairseqModel(BaseFairseqModel):
"""
A wrapper around a :class:`BaseFairseqModel` instance that adds support for
distributed training.
Anytime a method or attribute is called on this class we first try to
forward it to the underlying DistributedDataParallel instance, otherwise we
forward it to the original :class:`BaseFairseqModel` instance.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
"""
def __init__(self, args, model):
super().__init__()
assert isinstance(model, BaseFairseqModel)
if args.ddp_backend == 'c10d':
if c10d_status.is_default:
ddp_class = parallel.DistributedDataParallel
elif c10d_status.has_c10d:
ddp_class = parallel._DistributedDataParallelC10d
else:
raise Exception(
'Can\'t find c10d version of DistributedDataParallel. '
'Please update PyTorch.'
)
self.ddp_model = ddp_class(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=False,
bucket_cap_mb=args.bucket_cap_mb,
)
elif args.ddp_backend == 'no_c10d':
if c10d_status.is_default:
ddp_class = parallel.deprecated.DistributedDataParallel
else:
ddp_class = parallel.DistributedDataParallel
self.ddp_model = ddp_class(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=False,
)
else:
raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend)
def __call__(self, *args, **kwargs):
return self.ddp_model(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.ddp_model.forward(*args, **kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
pass
try:
return self.ddp_model.__getattr__(name)
except AttributeError:
pass
return self.ddp_model.module.__getattr__(name)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/fconv.py | bert/fairseq/models/fconv.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding,
LinearizedConvolution,
)
from . import (
FairseqEncoder, FairseqIncrementalDecoder, FairseqModel,
FairseqLanguageModel, register_model, register_model_architecture,
)
@register_model('fconv')
class FConvModel(FairseqModel):
"""
A fully convolutional model, i.e. a convolutional encoder and a
convolutional decoder, as described in `"Convolutional Sequence to Sequence
Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.
Args:
encoder (FConvEncoder): the encoder
decoder (FConvDecoder): the decoder
The Convolutional model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.fconv_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--share-input-output-embed', action='store_true',
help='share input and output embeddings (requires'
' --decoder-out-embed-dim and --decoder-embed-dim'
' to be equal)')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
encoder_embed_dict = None
if args.encoder_embed_path:
encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)
utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)
decoder_embed_dict = None
if args.decoder_embed_path:
decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)
utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)
encoder = FConvEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
embed_dict=encoder_embed_dict,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
)
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
embed_dict=decoder_embed_dict,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
share_embed=args.share_input_output_embed,
)
return FConvModel(encoder, decoder)
@register_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, 'max_target_positions'):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
class FConvEncoder(FairseqEncoder):
"""
Convolutional encoder consisting of `len(convolutions)` layers.
Args:
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_dim (int, optional): embedding dimension
embed_dict (str, optional): filename from which to load pre-trained
embeddings
max_positions (int, optional): maximum supported input sequence length
convolutions (list, optional): the convolutional layer structure. Each
list item `i` corresponds to convolutional layer `i`. Layers are
given as ``(out_channels, kernel_width, [residual])``. Residual
connections are added between layers when ``residual=1`` (which is
the default behavior).
dropout (float, optional): dropout to be applied before each conv layer
normalization_constant (float, optional): multiplies the result of the
residual block by sqrt(value)
left_pad (bool, optional): whether the input is left-padded. Default:
``True``
"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024,
convolutions=((512, 3),) * 20, dropout=0.1, left_pad=True,
):
super().__init__(dictionary)
self.dropout = dropout
self.left_pad = left_pad
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
left_pad=self.left_pad,
)
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
if kernel_size % 2 == 1:
padding = kernel_size // 2
else:
padding = 0
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size,
dropout=dropout, padding=padding)
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
- **encoder_out** (tuple): a tuple with two elements, where the
first element is the last encoder layer's output and the
second element is the same quantity summed with the input
embedding (used for attention). The shape of both tensors is
`(batch, src_len, embed_dim)`.
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x
# project to size of convolution
x = self.fc1(x)
# used to mask padding in input
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
residuals = [x]
# temporal convolutions
for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = F.dropout(x, p=self.dropout, training=self.training)
if conv.kernel_size[0] % 2 == 1:
# padding is implicit in the conv
x = conv(x)
else:
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding) * math.sqrt(0.5)
return {
'encoder_out': (x, y),
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = (
encoder_out['encoder_out'][0].index_select(0, new_order),
encoder_out['encoder_out'][1].index_select(0, new_order),
)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim, bmm=None):
super().__init__()
# projects from output of convolution to embedding dimension
self.in_projection = Linear(conv_channels, embed_dim)
# projects from embedding dimension to convolution size
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = bmm if bmm is not None else torch.bmm
def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):
residual = x
# attention
x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
x = self.bmm(x, encoder_out[0])
# don't attend over padding
if encoder_padding_mask is not None:
x = x.float().masked_fill(
encoder_padding_mask.unsqueeze(1),
float('-inf')
).type_as(x) # FP16 support: cast to float and back
# softmax over last dim
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
x = self.bmm(x, encoder_out[1])
# scale attention output (respecting potentially different lengths)
s = encoder_out[1].size(1)
if encoder_padding_mask is None:
x = x * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding
s = s.unsqueeze(-1)
x = x * (s * s.rsqrt())
# project back
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
"""Replace torch.bmm with BeamableMM."""
if beamable_mm_beam_size is not None:
del self.bmm
self.add_module('bmm', BeamableMM(beamable_mm_beam_size))
class FConvDecoder(FairseqIncrementalDecoder):
"""Convolutional decoder"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256,
max_positions=1024, convolutions=((512, 3),) * 20, attention=True,
dropout=0.1, share_embed=False, positional_embeddings=True,
adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0,
left_pad=False,
):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.dropout = dropout
self.left_pad = left_pad
self.need_attn = True
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
# expand True into [True, True, ...] and do the same with False
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of '
'length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
left_pad=self.left_pad,
) if positional_embeddings else None
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
self.convolutions.append(
LinearizedConv1d(in_channels, out_channels * 2, kernel_size,
padding=(kernel_size - 1), dropout=dropout)
)
self.attention.append(AttentionLayer(out_channels, embed_dim)
if attention[i] else None)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff,
dropout=adaptive_softmax_dropout)
else:
self.fc2 = Linear(in_channels, out_embed_dim)
if share_embed:
assert out_embed_dim == embed_dim, \
"Shared embed weights implies same dimensions " \
" out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
def forward(self, prev_output_tokens, encoder_out_dict=None, incremental_state=None):
if encoder_out_dict is not None:
encoder_out = encoder_out_dict['encoder_out']
encoder_padding_mask = encoder_out_dict['encoder_padding_mask']
# split and transpose encoder outputs
encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state)
if self.embed_positions is not None:
pos_embed = self.embed_positions(prev_output_tokens, incremental_state)
else:
pos_embed = 0
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
x = self._embed_tokens(prev_output_tokens, incremental_state)
# embed tokens and combine with positional embeddings
x += pos_embed
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
# temporal convolutions
avg_attn_scores = None
num_attn_layers = len(self.attention)
residuals = [x]
for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention,
self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x, incremental_state)
x = F.glu(x, dim=2)
# attention
if attention is not None:
x = self._transpose_if_training(x, incremental_state)
x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask)
if not self.training and self.need_attn:
attn_scores = attn_scores / num_attn_layers
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
x = self._transpose_if_training(x, incremental_state)
# residual
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = self._transpose_if_training(x, incremental_state)
# project back to size of vocabulary if not using adaptive softmax
if self.fc2 is not None and self.fc3 is not None:
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.fc3(x)
return x, avg_attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if encoder_out is not None:
encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)
utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions() if self.embed_positions is not None else float('inf')
def upgrade_state_dict(self, state_dict):
if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2:
# old models use incorrect weight norm dimension
for i, conv in enumerate(self.convolutions):
# reconfigure weight norm
nn.utils.remove_weight_norm(conv)
self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)
state_dict['decoder.version'] = torch.Tensor([1])
return state_dict
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _embed_tokens(self, tokens, incremental_state):
if incremental_state is not None:
# keep only the last token for incremental forward pass
tokens = tokens[:, -1:]
return self.embed_tokens(tokens)
def _split_encoder_out(self, encoder_out, incremental_state):
"""Split and transpose encoder outputs.
This is cached when doing incremental inference.
"""
cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if cached_result is not None:
return cached_result
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(1, 2).contiguous()
result = (encoder_a, encoder_b)
if incremental_state is not None:
utils.set_incremental_state(self, incremental_state, 'encoder_out', result)
return result
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def extend_conv_spec(convolutions):
"""
Extends convolutional spec that is a list of tuples of 2 or 3 parameters
(kernel size, dim size and optionally how many layers behind to look for residual)
to default the residual propagation param if it is not specified
"""
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')
return tuple(extended)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m)
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
@register_model_architecture('fconv_lm', 'fconv_lm')
def base_lm_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')
def fconv_lm_dauphin_wikitext103(args):
layers = '[(850, 6)] * 3'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 5)] * 4'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 4)] * 3'
layers += ' + [(1024, 4)] * 1'
layers += ' + [(2048, 4)] * 1'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')
base_lm_architecture(args)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')
def fconv_lm_dauphin_gbw(args):
layers = '[(512, 5)]'
layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'
layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
base_lm_architecture(args)
@register_model_architecture('fconv', 'fconv')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.share_input_output_embed = getattr(args, 'share_input_output_embed', False)
@register_model_architecture('fconv', 'fconv_iwslt_de_en')
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_ro')
def fconv_wmt_en_ro(args):
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_de')
def fconv_wmt_en_de(args):
convs = '[(512, 3)] * 9' # first 9 layers have 512 units
convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units
convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_fr')
def fconv_wmt_en_fr(args):
convs = '[(512, 3)] * 6' # first 6 layers have 512 units
convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units
convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units
convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions
convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/fconv_self_att.py | bert/fairseq/models/fconv_self_att.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import (
DownsampledMultiHeadAttention, GradMultiply, LearnedPositionalEmbedding,
LinearizedConvolution,
)
from fairseq import utils
from . import (
FairseqEncoder, CompositeEncoder, FairseqDecoder, FairseqModel,
register_model, register_model_architecture,
)
@register_model('fconv_self_att')
class FConvModelSelfAtt(FairseqModel):
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
self.pretrained_encoder = pretrained_encoder
if self.pretrained_encoder is None:
encoders = {'encoder': encoder}
else:
encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}
# for fusion model, CompositeEncoder contains both pretrained and training encoders
# these are forwarded and then combined in the decoder
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR',
help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int,
help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int,
help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR',
help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int,
help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR',
help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR',
help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR',
help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR',
help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR',
help='use pretrained model when training [True, ...]')
@classmethod
def build_model(cls, args, task):
trained_encoder, trained_decoder = None, None
pretrained = eval(args.pretrained)
if pretrained:
print("| loading pretrained model")
trained_model = utils.load_ensemble_for_inference(
# not actually for inference, but loads pretrained model parameters
filenames=[args.pretrained_checkpoint],
task=task,
)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
# freeze pretrained model
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
"""Build a new model instance."""
encoder = FConvEncoder(
task.source_dictionary,
embed_dim=args.encoder_embed_dim,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
attention=eval(args.encoder_attention),
attention_nheads=args.encoder_attention_nheads
)
decoder = FConvDecoder(
task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
selfattention=eval(args.self_attention),
attention_nheads=args.multihead_attention_nheads,
selfattention_nheads=args.multihead_self_attention_nheads,
project_input=eval(args.project_input),
gated_attention=eval(args.gated_attention),
downsample=eval(args.downsample),
pretrained=pretrained,
trained_decoder=trained_decoder
)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return self.pretrained_encoder is not None
class FConvEncoder(FairseqEncoder):
"""Convolutional encoder"""
def __init__(
self, dictionary, embed_dim=512, max_positions=1024,
convolutions=((512, 3),) * 20, dropout=0.1, attention=False,
attention_nheads=1, left_pad=True,
):
super().__init__(dictionary)
self.dropout = dropout
self.num_attention_layers = None
self.left_pad = left_pad
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
left_pad=self.left_pad,
)
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels) if in_channels != out_channels else None
)
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout)
)
self.attention.append(
SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
x = attention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)
return {
'encoder_out': (x, y),
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple(
eo.index_select(0, new_order) for eo in encoder_out['encoder_out']
)
if 'pretrained' in encoder_out:
encoder_out['pretrained']['encoder_out'] = tuple(
eo.index_select(0, new_order)
for eo in encoder_out['pretrained']['encoder_out']
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
class FConvDecoder(FairseqDecoder):
"""Convolutional decoder"""
def __init__(
self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024,
convolutions=((512, 3),) * 8, attention=True, dropout=0.1,
selfattention=False, attention_nheads=1, selfattention_nheads=1,
project_input=False, gated_attention=False, downsample=False,
pretrained=False, trained_decoder=None, left_pad=False,
):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout = dropout
self.left_pad = left_pad
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of '
'length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
left_pad=self.left_pad,
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels) if in_channels != out_channels else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels, out_channels * 2, kernel_size,
padding=(kernel_size - 1), dropout=dropout,
)
)
self.attention.append(
DownsampledMultiHeadAttention(
out_channels, embed_dim, attention_nheads,
project_input=project_input, gated=False, downsample=False,
) if attention[i] else None
)
self.attproj.append(
Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None
)
self.selfattention.append(
SelfAttention(
out_channels, embed_dim, selfattention_nheads,
project_input=project_input, gated=gated_attention,
downsample=downsample,
) if selfattention[i] else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
# model fusion
if self.pretrained:
# independent gates are learned from the concatenated input
self.gate1 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())
self.gate2 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())
# pretrained and trained models are joined
self.joining = nn.Sequential(
Linear(out_embed_dim*2, out_embed_dim*2),
nn.LayerNorm(out_embed_dim*2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim*2),
nn.LayerNorm(out_embed_dim*2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim),
nn.LayerNorm(out_embed_dim)
)
# pretrained model contains an output layer that is nhid -> vocab size
# but the models are combined in their hidden state
# the hook stores the output of the pretrained model forward
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs["out"] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out_dict):
encoder_out = encoder_out_dict['encoder']['encoder_out']
trained_encoder_out = encoder_out_dict['pretrained'] if self.pretrained else None
encoder_a, encoder_b = self._split_encoder_out(encoder_out)
# embed positions
positions = self.embed_positions(prev_output_tokens)
# embed tokens and positions
x = self.embed_tokens(prev_output_tokens) + positions
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
avg_attn_scores = None
for proj, conv, attention, selfattention, attproj in zip(
self.projections, self.convolutions, self.attention, self.selfattention, self.attproj
):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
# attention
if attention is not None:
r = x
x, attn_scores = attention(attproj(x) + target_embedding, encoder_a, encoder_b)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if not self.pretrained:
x = self.fc3(x)
# fusion gating
if self.pretrained:
trained_x, _ = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs["out"]
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return fusion_output, avg_attn_scores
else:
return x, avg_attn_scores
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions()
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
"""Split and transpose encoder outputs."""
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
class SelfAttention(nn.Module):
def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False):
super().__init__()
self.attention = DownsampledMultiHeadAttention(
out_channels, embed_dim, num_heads, dropout=0, bias=True,
project_input=project_input, gated=gated, downsample=downsample,
)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = nn.LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
x, _ = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True)
return self.ln(x + residual)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
m.weight.data.normal_(0, 0.1)
return m
def Linear(in_features, out_features, dropout=0.):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
@register_model_architecture('fconv_self_att', 'fconv_self_att')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.self_attention = getattr(args, 'self_attention', 'False')
args.encoder_attention = getattr(args, 'encoder_attention', 'False')
args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1)
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1)
args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1)
args.project_input = getattr(args, 'project_input', 'False')
args.gated_attention = getattr(args, 'gated_attention', 'False')
args.downsample = getattr(args, 'downsample', 'False')
args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '')
args.pretrained = getattr(args, 'pretrained', 'False')
@register_model_architecture('fconv_self_att', 'fconv_self_att_wp')
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.self_attention = getattr(args, 'self_attention', 'True')
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)
args.project_input = getattr(args, 'project_input', 'True')
args.gated_attention = getattr(args, 'gated_attention', 'True')
args.downsample = getattr(args, 'downsample', 'True')
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/fairseq_encoder.py | bert/fairseq/models/fairseq_encoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn as nn
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths, segment):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
segment (LongTensor): segment of the sentence (1 for the first and 0 for the second)
"""
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/fairseq_incremental_decoder.py | bert/fairseq/models/fairseq_incremental_decoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqDecoder
class FairseqIncrementalDecoder(FairseqDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the immediately
previous output token (for input feeding) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`FairseqDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`FairseqIncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
"""
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder incremental state.
This should be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
def apply_reorder_incremental_state(module):
if module != self and hasattr(module, 'reorder_incremental_state'):
module.reorder_incremental_state(
incremental_state,
new_order,
)
self.apply(apply_reorder_incremental_state)
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, '_beam_size', -1) != beam_size:
def apply_set_beam_size(module):
if module != self and hasattr(module, 'set_beam_size'):
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/bert/fairseq/models/lstm.py | bert/fairseq/models/lstm.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import AdaptiveSoftmax
from . import (
FairseqEncoder, FairseqIncrementalDecoder, FairseqModel, register_model,
register_model_architecture,
)
@register_model('lstm')
class LSTMModel(FairseqModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
help='encoder hidden size')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='number of encoder layers')
parser.add_argument('--encoder-bidirectional', action='store_true',
help='make all layers of encoder bidirectional')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
help='dropout probability for encoder input embedding')
parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
help='dropout probability for encoder output')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
parser.add_argument('--share-decoder-input-output-embed', default=False,
action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim)
else:
num_embeddings = len(task.source_dictionary)
pretrained_encoder_embed = Embedding(
num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
)
if args.share_all_embeddings:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise RuntimeError('--share-all-embeddings requires a joint dictionary')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError(
'--share-all-embed not compatible with --decoder-embed-path'
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to '
'match --decoder-embed-dim'
)
pretrained_decoder_embed = pretrained_encoder_embed
args.share_decoder_input_output_embed = True
else:
# separate decoder input embeddings
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path,
task.target_dictionary,
args.decoder_embed_dim
)
# one last double check of parameter combinations
if args.share_decoder_input_output_embed and (
args.decoder_embed_dim != args.decoder_out_embed_dim):
raise RuntimeError(
'--share-decoder-input-output-embeddings requires '
'--decoder-embed-dim to match --decoder-out-embed-dim'
)
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=options.eval_bool(args.decoder_attention),
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
)
return cls(encoder, decoder)
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, num_layers=1,
dropout_in=0.1, dropout_out=0.1, bidirectional=False,
left_pad=True, pretrained_embed=None, padding_value=0.,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.bidirectional = bidirectional
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out if num_layers > 1 else 0.,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths):
if self.left_pad:
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.data.new(*state_size).zero_()
c0 = x.data.new(*state_size).zero_()
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_value)
x = F.dropout(x, p=self.dropout_out, training=self.training)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
def combine_bidir(outs):
return outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous().view(self.num_layers, bsz, -1)
final_hiddens = combine_bidir(final_hiddens)
final_cells = combine_bidir(final_cells)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
return {
'encoder_out': (x, final_hiddens, final_cells),
'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple(
eo.index_select(1, new_order)
for eo in encoder_out['encoder_out']
)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return int(1e5) # an arbitrary large number
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(input_embed_dim + output_embed_dim, output_embed_dim, bias=False)
def forward(self, input, source_hids, encoder_padding_mask):
# input: bsz x input_embed_dim
# source_hids: srclen x bsz x output_embed_dim
# x: bsz x output_embed_dim
x = self.input_proj(input)
# compute attention
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
# don't attend over padding
if encoder_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(
encoder_padding_mask,
float('-inf')
).type_as(attn_scores) # FP16 support: cast to float and back
attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
# sum weighted sources
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512,
num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True,
encoder_embed_dim=512, encoder_output_units=512, pretrained_embed=None,
share_input_output_embed=False, adaptive_softmax_cutoff=None,
):
super().__init__(dictionary)
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.hidden_size = hidden_size
self.share_input_output_embed = share_input_output_embed
self.need_attn = True
self.adaptive_softmax = None
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.encoder_output_units = encoder_output_units
assert encoder_output_units == hidden_size, \
'encoder_output_units ({}) != hidden_size ({})'.format(encoder_output_units, hidden_size)
# TODO another Linear layer if not equal
self.layers = nn.ModuleList([
LSTMCell(
input_size=encoder_output_units + embed_dim if layer == 0 else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
])
self.attention = AttentionLayer(encoder_output_units, hidden_size) if attention else None
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
if adaptive_softmax_cutoff is not None:
# setting adaptive_softmax dropout to dropout_out for now but can be redefined
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, embed_dim, adaptive_softmax_cutoff,
dropout=dropout_out)
elif not self.share_input_output_embed:
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
def forward(self, prev_output_tokens, encoder_out_dict, incremental_state=None):
encoder_out = encoder_out_dict['encoder_out']
encoder_padding_mask = encoder_out_dict['encoder_padding_mask']
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# get outputs from encoder
encoder_outs, _, _ = encoder_out[:3]
srclen = encoder_outs.size(0)
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is not None:
prev_hiddens, prev_cells, input_feed = cached_state
else:
_, encoder_hiddens, encoder_cells = encoder_out[:3]
num_layers = len(self.layers)
prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)]
prev_cells = [encoder_cells[i] for i in range(num_layers)]
input_feed = x.data.new(bsz, self.encoder_output_units).zero_()
attn_scores = x.data.new(srclen, seqlen, bsz).zero_()
outs = []
for j in range(seqlen):
# input feeding: concatenate context vector from previous time step
input = torch.cat((x[j, :, :], input_feed), dim=1)
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
# apply attention using the last layer's hidden state
if self.attention is not None:
out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs, encoder_padding_mask)
else:
out = hidden
out = F.dropout(out, p=self.dropout_out, training=self.training)
# input feeding
input_feed = out
# save final output
outs.append(out)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self, incremental_state, 'cached_state', (prev_hiddens, prev_cells, input_feed))
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
if not self.training and self.need_attn:
attn_scores = attn_scores.transpose(0, 2)
else:
attn_scores = None
# project back to size of vocabulary
if self.adaptive_softmax is None:
if hasattr(self, 'additional_fc'):
x = self.additional_fc(x)
x = F.dropout(x, p=self.dropout_out, training=self.training)
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = self.fc_out(x)
return x, attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, 'cached_state', new_state)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return int(1e5) # an arbitrary large number
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture('lstm', 'lstm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', args.encoder_embed_dim)
args.encoder_layers = getattr(args, 'encoder_layers', 1)
args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.decoder_attention = getattr(args, 'decoder_attention', '1')
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
@register_model_architecture('lstm', 'lstm_wiseman_iwslt_de_en')
def lstm_wiseman_iwslt_de_en(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
base_architecture(args)
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000)
args.encoder_layers = getattr(args, 'encoder_layers', 4)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1000)
args.decoder_layers = getattr(args, 'decoder_layers', 4)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 1000)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0)
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.