id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
183,889 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
class IndexedDataset(FairseqDataset):
def __init__(self, path, fix_lua_indexing=False):
def read_index(self, path):
def read_data(self, path):
def check_index(self, i):
def __del__(self):
def __getitem__(self, i) -> torch.Tensor:
def __len__(self):
def num_tokens(self, index):
def size(self, index):
def exists(path):
def supports_prefetch(self):
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
def supports_prefetch(self):
def prefetch(self, indices):
def __getitem__(self, i):
class IndexedRawTextDataset(FairseqDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
def read_data(self, path, dictionary):
def check_index(self, i):
def __getitem__(self, i):
def get_original_text(self, i):
def __del__(self):
def __len__(self):
def num_tokens(self, index):
def size(self, index):
def exists(path):
class MMapIndexedDataset(torch.utils.data.Dataset):
def writer(cls, path, dtype):
def __enter__(self):
def _get_pointers(sizes):
def write(self, sizes):
def __exit__(self, exc_type, exc_val, exc_tb):
def __init__(self, path):
def __del__(self):
def dtype(self):
def sizes(self):
def __getitem__(self, i):
def __len__(self):
def __init__(self, path):
def __getstate__(self):
def __setstate__(self, state):
def _do_init(self, path):
def __del__(self):
def __len__(self):
def __getitem__(self, i):
def sizes(self):
def supports_prefetch(self):
def exists(path):
class FastaDataset(torch.utils.data.Dataset):
def __init__(self, path: str, cache_indices=False):
def _get_file(self):
def __getitem__(self, idx):
def __len__(self):
def _build_index(self, path: str):
def __setstate__(self, state):
def __getstate__(self):
def __del__(self):
def exists(path):
class EncodedFastaDataset(FastaDataset):
def __init__(self, path, dictionary):
def __getitem__(self, idx):
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == "raw" and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
elif impl == "fasta" and FastaDataset.exists(path):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
return None | null |
183,890 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
class IndexedDataset(FairseqDataset):
def __init__(self, path, fix_lua_indexing=False):
def read_index(self, path):
def read_data(self, path):
def check_index(self, i):
def __del__(self):
def __getitem__(self, i) -> torch.Tensor:
def __len__(self):
def num_tokens(self, index):
def size(self, index):
def exists(path):
def supports_prefetch(self):
class IndexedRawTextDataset(FairseqDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
def read_data(self, path, dictionary):
def check_index(self, i):
def __getitem__(self, i):
def get_original_text(self, i):
def __del__(self):
def __len__(self):
def num_tokens(self, index):
def size(self, index):
def exists(path):
class MMapIndexedDataset(torch.utils.data.Dataset):
def writer(cls, path, dtype):
def __enter__(self):
def _get_pointers(sizes):
def write(self, sizes):
def __exit__(self, exc_type, exc_val, exc_tb):
def __init__(self, path):
def __del__(self):
def dtype(self):
def sizes(self):
def __getitem__(self, i):
def __len__(self):
def __init__(self, path):
def __getstate__(self):
def __setstate__(self, state):
def _do_init(self, path):
def __del__(self):
def __len__(self):
def __getitem__(self, i):
def sizes(self):
def supports_prefetch(self):
def exists(path):
def dataset_exists(path, impl):
if impl == "raw":
return IndexedRawTextDataset.exists(path)
elif impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path) | null |
183,891 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a | null |
183,892 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64)) | null |
183,893 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
_code_to_dtype = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
9: np.uint32,
10: np.uint64,
}
def _dtype_header_code(dtype) -> int:
for k in _code_to_dtype.keys():
if _code_to_dtype[k] == dtype:
return k
raise ValueError(dtype) | null |
183,894 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass | null |
183,895 | import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.dataclass.constants import DATASET_IMPL_CHOICES
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
from typing import Union
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
iopath's PathManager abstraction (for transparently handling various
internal backends).
"""
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if IOPathManager:
return IOPathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if IOPathManager:
return IOPathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
def get_local_path(path: str, **kwargs) -> str:
if IOPathManager:
return IOPathManager.get_local_path(path, **kwargs)
return path
def exists(path: str) -> bool:
if IOPathManager:
return IOPathManager.exists(path)
return os.path.exists(path)
def isfile(path: str) -> bool:
if IOPathManager:
return IOPathManager.isfile(path)
return os.path.isfile(path)
def ls(path: str) -> List[str]:
if IOPathManager:
return IOPathManager.ls(path)
return os.listdir(path)
def mkdirs(path: str) -> None:
if IOPathManager:
return IOPathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
def rm(path: str) -> None:
if IOPathManager:
return IOPathManager.rm(path)
os.remove(path)
def chmod(path: str, mode: int) -> None:
if not PathManager.path_requires_pathmanager(path):
os.chmod(path, mode)
def register_handler(handler) -> None:
if IOPathManager:
return IOPathManager.register_handler(handler=handler)
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if IOPathManager:
return IOPathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
def path_requires_pathmanager(path: str) -> bool:
"""Do we require PathManager to access given path?"""
if IOPathManager:
for p in IOPathManager._path_handlers.keys():
if path.startswith(p):
return True
return False
def supports_rename(path: str) -> bool:
# PathManager doesn't yet support renames
return not PathManager.path_requires_pathmanager(path)
def rename(src: str, dst: str):
os.rename(src, dst)
"""
ioPath async PathManager methods:
"""
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
"""
Return file descriptor with asynchronous write operations.
"""
global IOPathManager
if not IOPathManager:
logging.info("ioPath is initializing PathManager.")
try:
from iopath.common.file_io import PathManager
IOPathManager = PathManager()
except Exception:
logging.exception("Failed to initialize ioPath PathManager object.")
return IOPathManager.opena(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
def async_close() -> bool:
"""
Wait for files to be written and clean up asynchronous PathManager.
NOTE: `PathManager.async_close()` must be called at the end of any
script that uses `PathManager.opena(...)`.
"""
global IOPathManager
if IOPathManager:
return IOPathManager.async_close()
return False
def get_indexed_dataset_to_local(path) -> str:
local_index_path = PathManager.get_local_path(index_file_path(path))
local_data_path = PathManager.get_local_path(data_file_path(path))
assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), (
"PathManager.get_local_path does not return files with expected patterns: "
f"{local_index_path} and {local_data_path}"
)
local_path = local_data_path[:-4] # stripping surfix ".bin"
assert local_path == local_index_path[:-4] # stripping surfix ".idx"
return local_path | null |
183,896 | from argparse import Namespace
from typing import Union
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import populate_dataclass, merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
REGISTRIES = {}
def populate_dataclass(
dataclass: FairseqDataclass,
args: Namespace,
) -> FairseqDataclass:
for k in dataclass.__dataclass_fields__.keys():
if k.startswith("_"):
# private member, skip
continue
if hasattr(args, k):
setattr(dataclass, k, getattr(args, k))
return dataclass
def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True):
if remove_missing:
if is_dataclass(dc):
target_keys = set(dc.__dataclass_fields__.keys())
else:
target_keys = set(dc.keys())
with open_dict(cfg):
for k in list(cfg.keys()):
if k not in target_keys:
del cfg[k]
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"]
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(DATACLASS_REGISTRY[choice](), cfg)
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group=registry_name, node=node, provider="fairseq")
REGISTRY[name] = cls
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY | null |
183,899 | from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
) | null |
183,900 | import contextlib
import logging
import os
from collections import OrderedDict
import torch
from fairseq import metrics, options, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import LegacyFairseqTask, register_task
def _lang_token(lang: str):
return "__{}__".format(lang)
The provided code snippet includes necessary dependencies for implementing the `_lang_token_index` function. Write a Python function `def _lang_token_index(dic: Dictionary, lang: str)` to solve the following problem:
Return language token index.
Here is the function:
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang)
return idx | Return language token index. |
183,905 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser | null |
183,906 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_parser(desc, default_task="translation"):
def add_dataset_args(parser, train=False, gen=False):
def add_distributed_training_args(parser, default_world_size=None):
def add_optimization_args(parser):
def add_checkpoint_args(parser):
def add_model_args(parser):
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser | null |
183,907 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_generation_parser(interactive=False, default_task="translation"):
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task) | null |
183,908 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser | null |
183,909 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_parser(desc, default_task="translation"):
def add_dataset_args(parser, train=False, gen=False):
def add_distributed_training_args(parser, default_world_size=None):
class CommonEvalConfig(FairseqDataclass):
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: FairseqDataclass,
delete_default: bool = False,
with_prefix: Optional[str] = None,
) -> None:
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser | null |
183,910 | import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: FairseqDataclass,
delete_default: bool = False,
with_prefix: Optional[str] = None,
) -> None:
"""
convert a dataclass instance to tailing parser arguments.
If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are
building a flat namespace from a structured dataclass (see transformer_config.py for example).
"""
def argparse_name(name: str):
if name == "data" and (with_prefix is None or with_prefix == ''):
# normally data is positional args, so we don't add the -- nor the prefix
return name
if name == "_name":
# private member, skip
return None
full_name = "--" + name.replace("_", "-")
if with_prefix is not None and with_prefix != '':
# if a prefix is specified, construct the prefixed arg name
full_name = with_prefix + "-" + full_name[2:] # strip -- when composing
return full_name
def get_kwargs_from_dc(
dataclass_instance: FairseqDataclass, k: str
) -> Dict[str, Any]:
"""k: dataclass attributes"""
kwargs = {}
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
field_const = dataclass_instance._get_argparse_const(k)
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (
isinstance(inter_type, type)
and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))
) or ("List" in str(inter_type) or "Tuple" in str(inter_type)):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError(
"parsing of type " + str(inter_type) + " is not implemented"
)
if field_default is not MISSING:
kwargs["default"] = (
",".join(map(str, field_default))
if field_default is not None
else None
)
elif (
isinstance(inter_type, type) and issubclass(inter_type, Enum)
) or "Enum" in str(inter_type):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = (
"store_false" if field_default is True else "store_true"
)
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
# build the help with the hierarchical prefix
if with_prefix is not None and with_prefix != '' and field_help is not None:
field_help = with_prefix[2:] + ': ' + field_help
kwargs["help"] = field_help
if field_const is not None:
kwargs["const"] = field_const
kwargs["nargs"] = "?"
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
field_type = dataclass_instance._get_type(k)
if field_name is None:
continue
elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass):
# for fields that are of type FairseqDataclass, we can recursively
# add their fields to the namespace (so we add the args from model, task, etc. to the root namespace)
prefix = None
if with_prefix is not None:
# if a prefix is specified, then we don't want to copy the subfields directly to the root namespace
# but we prefix them with the name of the current field.
prefix = field_name
gen_parser_from_dataclass(parser, field_type(), delete_default, prefix)
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
field_args = [field_name]
alias = dataclass_instance._get_argparse_alias(k)
if alias is not None:
field_args.append(alias)
if "default" in kwargs:
if isinstance(kwargs["default"], str) and kwargs["default"].startswith(
"${"
):
if kwargs["help"] is None:
# this is a field with a name that will be added elsewhere
continue
else:
del kwargs["default"]
if delete_default and "default" in kwargs:
del kwargs["default"]
try:
parser.add_argument(*field_args, **kwargs)
except ArgumentError:
pass
MODEL_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
TASK_REGISTRY = {}
class FairseqBMUF(FairseqOptimizer):
"""
Implements incremental block distributed data parallelism similar to
https://ieeexplore.ieee.org/document/7472805
Paper title: Scalable training of deep learning machines by incremental
block training with intra-block parallel optimization and blockwise
model-update filtering
"""
def __init__(self, cfg: FairseqBMUFConfig, optimizer):
super().__init__(cfg)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = cfg.global_sync_iter
self.block_momentum = cfg.block_momentum
self.block_lr = cfg.block_lr
self._reset_local_data()
self.warmup_iteration = cfg.warmup_iterations
self.use_nbm = cfg.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.cfg.average_sync
self.world_size = self.cfg.distributed_world_size
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
gen_parser_from_dataclass(parser, FairseqBMUFConfig())
def optimizer(self):
return self._optimizer.optimizer
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
self.initial_state = self._optimizer.state_dict()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm."""
return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if self.world_size <= 1:
return
# Update the global model using local models from all GPUs
# (Step-1) Calculate grad between previously synced model and
# currrent local model
if self.block_momentum != 0:
self._calc_grad()
# (Step-2) Average gradient from all GPUs
self._avg_grad_from_all_gpus()
# (Step-3) Calculate global momentum and update the global model
if self.block_momentum != 0:
self._update_global_model()
# (Step-4) Average local optimizer params
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
# Check whether train iterations is equal to warmup iter
if self.get_num_updates() == self.warmup_iteration:
return True
return False
def _is_bmuf_iter(self):
# Check whether train iterations is equal to bmuf sync iter
if (self.get_num_updates() > self.warmup_iteration) and (
self.get_num_updates() % self.sync_iter == 0
):
return True
return False
def _warmup_sync(self, root_rank=0):
if self.world_size <= 1:
return
# Broadcast the local model to all gpus
for param in self.params:
dist.broadcast(param.data, src=root_rank)
# Update local optimizer state
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
"""Performs a single optimization step."""
self._optimizer.step(closure)
self.set_num_updates(self.get_num_updates() + 1)
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self._optimizer.zero_grad()
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
def _reset_local_data(self):
# (Step-0) Initialize global momentum parameters and store global copy on each gpu
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
# saving the global model locally for calculating gradient during bmuf sync
for param, global_param in zip(self.params, self.global_params):
global_param.copy_(param.data)
def _calc_grad(self):
# global_params is basically the global copy from the previously finished
# synchronisation. param.data is local parameter after block_sync_freq
# for the local gpu. so grad is difference between previously synced
# model and currrent local model.
for index, (param, global_param) in enumerate(
zip(self.params, self.global_params)
):
self.grads[index] = global_param - param.data
def _avg_grad_from_all_gpus(self):
for index, param in enumerate(self.params):
sync_para = param.data if self.block_momentum == 0 else self.grads[index]
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
def _update_global_model(self):
for index, (param, global_param, smoothed_grad, grad) in enumerate(
zip(
self.params,
self.global_params,
self.smoothed_grads,
# all gpus would share the same value of smoothed_grad, since it is
# always computed on synchronized gradients.
self.grads,
)
):
# global_param is basically last syncrhornized parameter. though
# smoothed_grad is local, all processes will have same value of
# smoothed_grad and hence param is globally synchronized copy.
# smoothed_grad(t) = BM * smoothed_grad(t-1) + BM_lr * grad(t)
smoothed_grad = self.block_momentum * smoothed_grad + self.block_lr * grad
param.data.copy_(global_param - smoothed_grad)
# A Nesterov momentum here is to do a partial weight update before
# calculating the gradient
if self.use_nbm:
param.data.copy_(param.data - self.block_momentum * smoothed_grad)
# backup for the next synchronization.
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data)
REGISTRIES = {}
The provided code snippet includes necessary dependencies for implementing the `parse_args_and_arch` function. Write a Python function `def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, )` to solve the following problem:
Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values
Here is the function:
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args | Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values |
183,924 | import logging
from hydra.core.config_store import ConfigStore
from fairseq.dataclass.configs import FairseqConfig
from omegaconf import DictConfig, OmegaConf
logger = logging.getLogger(__name__)
class FairseqConfig(FairseqDataclass):
def hydra_init(cfg_name="config") -> None:
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=FairseqConfig)
for k in FairseqConfig.__dataclass_fields__:
v = FairseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f"{k} - {v}")
raise | null |
183,925 | import logging
from hydra.core.config_store import ConfigStore
from fairseq.dataclass.configs import FairseqConfig
from omegaconf import DictConfig, OmegaConf
class FairseqConfig(FairseqDataclass):
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
optimization: OptimizationConfig = OptimizationConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
generation: GenerationConfig = GenerationConfig()
eval_lm: EvalLMConfig = EvalLMConfig()
interactive: InteractiveConfig = InteractiveConfig()
model: Any = MISSING
task: Any = None
criterion: Any = None
optimizer: Any = None
lr_scheduler: Any = None
scoring: Any = None
bpe: Any = None
tokenizer: Any = None
ema: EMAConfig = EMAConfig()
REGISTRIES = {}
TASK_DATACLASS_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True):
if remove_missing:
if is_dataclass(dc):
target_keys = set(dc.__dataclass_fields__.keys())
else:
target_keys = set(dc.keys())
with open_dict(cfg):
for k in list(cfg.keys()):
if k not in target_keys:
del cfg[k]
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"]
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg
The provided code snippet includes necessary dependencies for implementing the `add_defaults` function. Write a Python function `def add_defaults(cfg: DictConfig) -> None` to solve the following problem:
This function adds default values that are stored in dataclasses that hydra doesn't know about
Here is the function:
def add_defaults(cfg: DictConfig) -> None:
"""This function adds default values that are stored in dataclasses that hydra doesn't know about """
from fairseq.registry import REGISTRIES
from fairseq.tasks import TASK_DATACLASS_REGISTRY
from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY
from fairseq.dataclass.utils import merge_with_parent
from typing import Any
OmegaConf.set_struct(cfg, False)
for k, v in FairseqConfig.__dataclass_fields__.items():
field_cfg = cfg.get(k)
if field_cfg is not None and v.type == Any:
dc = None
if isinstance(field_cfg, str):
field_cfg = DictConfig({"_name": field_cfg})
field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"]
name = field_cfg.get("_name")
if k == "task":
dc = TASK_DATACLASS_REGISTRY.get(name)
elif k == "model":
name = ARCH_MODEL_NAME_REGISTRY.get(name, name)
dc = MODEL_DATACLASS_REGISTRY.get(name)
elif k in REGISTRIES:
dc = REGISTRIES[k]["dataclass_registry"].get(name)
if dc is not None:
cfg[k] = merge_with_parent(dc, field_cfg) | This function adds default values that are stored in dataclasses that hydra doesn't know about |
183,926 | import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import FairseqConfig
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose, initialize
from omegaconf import DictConfig, OmegaConf, open_dict
def eval_str_list(x, x_type=float):
if x is None:
return None
if isinstance(x, str):
if len(x) == 0:
return []
x = ast.literal_eval(x)
try:
return list(map(x_type, x))
except TypeError:
return [x_type(x)]
def interpret_dc_type(field_type):
if isinstance(field_type, str):
raise RuntimeError("field should be a type")
if field_type == Any:
return str
typestring = str(field_type)
if re.match(r"(typing.|^)Union\[(.*), NoneType\]$", typestring) or typestring.startswith("typing.Optional"):
return field_type.__args__[0]
return field_type
The provided code snippet includes necessary dependencies for implementing the `gen_parser_from_dataclass` function. Write a Python function `def gen_parser_from_dataclass( parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False, ) -> None` to solve the following problem:
convert a dataclass instance to tailing parser arguments
Here is the function:
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: FairseqDataclass,
delete_default: bool = False,
) -> None:
"""convert a dataclass instance to tailing parser arguments"""
def argparse_name(name: str):
if name == "data":
# normally data is positional args
return name
if name == "_name":
# private member, skip
return None
return "--" + name.replace("_", "-")
def get_kwargs_from_dc(
dataclass_instance: FairseqDataclass, k: str
) -> Dict[str, Any]:
"""k: dataclass attributes"""
kwargs = {}
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
field_const = dataclass_instance._get_argparse_const(k)
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (
isinstance(inter_type, type)
and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))
) or ("List" in str(inter_type) or "Tuple" in str(inter_type)):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError(
"parsing of type " + str(inter_type) + " is not implemented"
)
if field_default is not MISSING:
kwargs["default"] = (
",".join(map(str, field_default))
if field_default is not None
else None
)
elif (
isinstance(inter_type, type) and issubclass(inter_type, Enum)
) or "Enum" in str(inter_type):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = (
"store_false" if field_default is True else "store_true"
)
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
kwargs["help"] = field_help
if field_const is not None:
kwargs["const"] = field_const
kwargs["nargs"] = "?"
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
field_type = dataclass_instance._get_type(k)
if field_name is None:
continue
elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass):
gen_parser_from_dataclass(parser, field_type(), delete_default)
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
field_args = [field_name]
alias = dataclass_instance._get_argparse_alias(k)
if alias is not None:
field_args.append(alias)
if "default" in kwargs:
if isinstance(kwargs["default"], str) and kwargs["default"].startswith(
"${"
):
if kwargs["help"] is None:
# this is a field with a name that will be added elsewhere
continue
else:
del kwargs["default"]
if delete_default and "default" in kwargs:
del kwargs["default"]
try:
parser.add_argument(*field_args, **kwargs)
except ArgumentError:
pass | convert a dataclass instance to tailing parser arguments |
183,927 | import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import FairseqConfig
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose, initialize
from omegaconf import DictConfig, OmegaConf, open_dict
logger = logging.getLogger(__name__)
def _set_legacy_defaults(args, cls):
"""Helper to set default arguments based on *add_args*."""
if not hasattr(cls, "add_args"):
return
import argparse
parser = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS, allow_abbrev=False
)
cls.add_args(parser)
# copied from argparse.py:
defaults = argparse.Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(defaults, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(defaults, action.dest, action.default)
for key, default_value in vars(defaults).items():
if not hasattr(args, key):
setattr(args, key, default_value)
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
for k in FairseqConfig.__dataclass_fields__.keys():
overrides.extend(
_override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args)
)
if args is not None:
if hasattr(args, "task"):
from fairseq.tasks import TASK_DATACLASS_REGISTRY
migrate_registry(
"task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes
)
else:
deletes.append("task")
# these options will be set to "None" if they have not yet been migrated
# so we can populate them with the entire flat args
CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"}
from fairseq.registry import REGISTRIES
for k, v in REGISTRIES.items():
if hasattr(args, k):
migrate_registry(
k,
getattr(args, k),
v["dataclass_registry"],
args,
overrides,
deletes,
use_name_as_val=k not in CORE_REGISTRIES,
)
else:
deletes.append(k)
no_dc = True
if hasattr(args, "arch"):
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY
if args.arch in ARCH_MODEL_REGISTRY:
m_cls = ARCH_MODEL_REGISTRY[args.arch]
dc = getattr(m_cls, "__dataclass", None)
if dc is not None:
m_name = ARCH_MODEL_NAME_REGISTRY[args.arch]
overrides.append("model={}".format(m_name))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(_override_attr("model", dc, args))
no_dc = False
if no_dc:
deletes.append("model")
return overrides, deletes
ARCH_MODEL_REGISTRY = {}
TASK_REGISTRY = {}
The provided code snippet includes necessary dependencies for implementing the `convert_namespace_to_omegaconf` function. Write a Python function `def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig` to solve the following problem:
Convert a flat argparse.Namespace to a structured DictConfig.
Here is the function:
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in fairseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides, strict=False)
except:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = lambda _: True
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler])
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(cfg, True)
return cfg | Convert a flat argparse.Namespace to a structured DictConfig. |
183,928 | import ast
import inspect
import logging
import os
import re
from argparse import ArgumentError, ArgumentParser, Namespace
from dataclasses import _MISSING_TYPE, MISSING
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import FairseqConfig
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose, initialize
from omegaconf import DictConfig, OmegaConf, open_dict
REGISTRIES = {}
def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]):
# this will be deprecated when we get rid of argparse and model_overrides logic
from fairseq.registry import REGISTRIES
with open_dict(cfg):
for k in cfg.keys():
# "k in cfg" will return false if its a "mandatory value (e.g. ???)"
if k in cfg and isinstance(cfg[k], DictConfig):
if k in overrides and isinstance(overrides[k], dict):
for ok, ov in overrides[k].items():
if isinstance(ov, dict):
overwrite_args_by_name(cfg[k][ok], ov)
else:
cfg[k][ok] = ov
else:
overwrite_args_by_name(cfg[k], overrides)
elif k in cfg and isinstance(cfg[k], Namespace):
for override_key, val in overrides.items():
setattr(cfg[k], override_key, val)
elif k in overrides:
if (
k in REGISTRIES
and overrides[k] in REGISTRIES[k]["dataclass_registry"]
):
cfg[k] = DictConfig(
REGISTRIES[k]["dataclass_registry"][overrides[k]]
)
overwrite_args_by_name(cfg[k], overrides)
cfg[k]._name = overrides[k]
else:
cfg[k] = overrides[k] | null |
183,935 | import io
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig
from omegaconf import open_dict
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
num_pipelines_per_node = None
if cfg.pipeline_model_parallel:
num_pipeline_devices, num_pipelines_per_node = _pipeline_parallel_pre_init(cfg)
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
# support torch.distributed.launch
_infer_torch_distributed_launch_init(cfg)
elif cfg.distributed_port > 0:
# we can determine the init method automatically for Slurm
_infer_slurm_init(cfg, num_pipelines_per_node)
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
_infer_single_node_init(cfg)
if cfg.pipeline_model_parallel:
_pipeline_parallel_post_init(cfg, num_pipeline_devices, num_pipelines_per_node)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
def distributed_main(i, main, cfg: FairseqConfig, kwargs):
cfg.distributed_training.device_id = i
if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn
cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i
cfg.distributed_training.distributed_rank = distributed_init(cfg)
after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None)
if after_distributed_init_fn:
cfg = after_distributed_init_fn(cfg)
main(cfg, **kwargs)
if torch.distributed.is_initialized():
torch.distributed.barrier(get_global_group())
class FairseqConfig(FairseqDataclass):
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
optimization: OptimizationConfig = OptimizationConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
generation: GenerationConfig = GenerationConfig()
eval_lm: EvalLMConfig = EvalLMConfig()
interactive: InteractiveConfig = InteractiveConfig()
model: Any = MISSING
task: Any = None
criterion: Any = None
optimizer: Any = None
lr_scheduler: Any = None
scoring: Any = None
bpe: Any = None
tokenizer: Any = None
ema: EMAConfig = EMAConfig()
def call_main(cfg: FairseqConfig, main, **kwargs):
if cfg.distributed_training.distributed_init_method is None:
infer_init_method(cfg.distributed_training)
if cfg.distributed_training.distributed_init_method is not None:
# distributed training
if not cfg.distributed_training.distributed_no_spawn:
start_rank = cfg.distributed_training.distributed_rank
cfg.distributed_training.distributed_rank = None # assign automatically
kwargs["start_rank"] = start_rank
torch.multiprocessing.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=min(
torch.cuda.device_count(),
cfg.distributed_training.distributed_world_size,
),
join=True,
)
else:
distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs)
elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1:
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(main, cfg, kwargs),
nprocs=8, # use all 8 TPU cores
)
else:
# single GPU main
main(cfg, **kwargs) | null |
183,943 | import io
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from argparse import Namespace
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional
import torch
import torch.distributed as dist
from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig
from omegaconf import open_dict
def get_rank(group):
if use_xla():
assert group[0] == "tpu"
my_group = _find_my_group(group[1])
return my_group.index(get_global_rank())
else:
return dist.get_rank(group=group)
def broadcast_tensors(
tensors: Optional[List[torch.Tensor]],
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
"""
Broadcasts a list of tensors without other (non-src) ranks needing to know
the dtypes/shapes of the tensors.
"""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
# share metadata first to simplify transfer
is_src_rank = (get_rank(group) == src_rank)
if is_src_rank:
metadata = [
{"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors
]
metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device)
else:
metadata = _broadcast_object_slow(None, src_rank, group, dist_device)
out_tensors = []
for i, meta in enumerate(metadata):
if is_src_rank:
tensor = tensors[i]
broadcast(tensors[i].to(dist_device), src=src_rank, group=group)
else:
tensor = torch.zeros(
[meta["size"].numel()], dtype=meta["dtype"], device=dist_device
)
broadcast(tensor, src=src_rank, group=group)
tensor = tensor.view(meta["size"]).to(meta["device"])
out_tensors.append(tensor)
return out_tensors
def _broadcast_object_slow(
obj: Any, src_rank: int, group: object, dist_device: torch.device,
) -> Any:
if get_rank(group) == src_rank:
# Emit data
buffer = io.BytesIO()
torch.save(obj, buffer)
buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device)
length = torch.LongTensor([len(buffer)]).to(dist_device)
broadcast(length, src=src_rank, group=group)
broadcast(buffer, src=src_rank, group=group)
else:
# Fetch from the source
length = torch.LongTensor([0]).to(dist_device)
broadcast(length, src=src_rank, group=group)
buffer = torch.ByteTensor(int(length.item())).to(dist_device)
broadcast(buffer, src=src_rank, group=group)
buffer = io.BytesIO(buffer.cpu().numpy())
obj = torch.load(buffer, map_location="cpu")
return obj
def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if torch.is_tensor(obj):
placeholder = _TensorPlaceholder(index=len(tensors))
tensors.append(obj)
return placeholder
elif isinstance(obj, dict):
return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_split_tensors_from_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_split_tensors_from_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_split_tensors_from_obj(v, tensors) for v in obj}
else:
return obj
def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any:
if isinstance(obj, _TensorPlaceholder):
return tensors[obj.index]
elif isinstance(obj, dict):
return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()}
elif isinstance(obj, list):
return [_put_tensors_in_obj(v, tensors) for v in obj]
elif isinstance(obj, tuple):
return tuple(_put_tensors_in_obj(v, tensors) for v in obj)
elif isinstance(obj, set):
return {_put_tensors_in_obj(v, tensors) for v in obj}
else:
return obj
The provided code snippet includes necessary dependencies for implementing the `broadcast_object` function. Write a Python function `def broadcast_object( obj: Any, src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> Any` to solve the following problem:
Broadcast an arbitrary Python object to other workers.
Here is the function:
def broadcast_object(
obj: Any,
src_rank: int,
group: object,
dist_device: Optional[torch.device] = None,
) -> Any:
"""Broadcast an arbitrary Python object to other workers."""
if dist_device is None:
if torch.distributed.get_backend(group) == "nccl":
dist_device = torch.device("cuda")
else:
dist_device = torch.device("cpu")
if get_rank(group) == src_rank:
# split the tensors from the non-tensors so we can broadcast them
# directly, avoiding unnecessary serialization/deserialization
tensors = []
obj = _split_tensors_from_obj(obj, tensors)
obj = _broadcast_object_slow(obj, src_rank, group, dist_device)
tensors = broadcast_tensors(tensors, src_rank, group, dist_device)
else:
obj = _broadcast_object_slow(None, src_rank, group, dist_device)
tensors = broadcast_tensors(None, src_rank, group, dist_device)
return _put_tensors_in_obj(obj, tensors) | Broadcast an arbitrary Python object to other workers. |
183,946 | import types
import torch
class FusedAdamV1(torch.optim.Optimizer):
"""
Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.0,
max_grad_norm=0.0,
amsgrad=False,
):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"max_grad_norm": max_grad_norm,
}
super().__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return True
def supports_step_with_scale(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
for group, grads_this_group, grad_norm in zip(
self.param_groups, grads_group, grad_norms
):
if grads_this_group is None:
grads_this_group = [None] * len(group["params"])
# compute combined scale factor for this group
combined_scale = scale
if group.get("max_grad_norm", 0) > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"]
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group.get("bias_correction", 1) else 0
for p, grad in zip(group["params"], grads_this_group):
# note: p.grad should not ever be set for correct
# operation of mixed precision optimizer that sometimes
# sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
out_p = p.data
with torch.cuda.device(p.device):
fused_adam_cuda.adam(
p_data_fp32,
out_p,
exp_avg,
exp_avg_sq,
grad,
group["lr"],
beta1,
beta2,
group["eps"],
combined_scale,
state["step"],
self.eps_mode,
bias_correction,
group["weight_decay"],
)
return loss
try:
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdamV2(FusedAdam):
"""
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self, "multi_tensor_adam"):
raise Exception(
"Apex installation is outdated. Please install an updated version of apex."
)
def supports_memory_efficient_fp16(self):
return True
def supports_flat_params(self):
return True
def step(
self,
closure=None,
grads=None,
output_params=None,
scale=None,
grad_norms=None,
):
"""Performs a single optimization step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group["bias_correction"] else 0
beta1, beta2 = group["betas"]
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if "step" in group:
group["step"] += 1
else:
group["step"] = 1
# create lists for multi-tensor apply
g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group["params"]:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=torch.float
)
else:
state["exp_avg"] = state["exp_avg"].to(
device=p.data.device, dtype=torch.float
)
state["exp_avg_sq"] = state["exp_avg_sq"].to(
device=p.data.device, dtype=torch.float
)
if p.dtype == torch.float16:
g_16.append(p.grad.data.float())
p_16.append(p.data.float())
orig_p_16.append(p.data)
m_16.append(state["exp_avg"])
v_16.append(state["exp_avg_sq"])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state["exp_avg"])
v_32.append(state["exp_avg_sq"])
else:
raise RuntimeError("FusedAdam only support fp16 and fp32.")
with torch.cuda.device(p.device):
if len(g_16) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
for orig_p, p in zip(orig_p_16, p_16):
orig_p.copy_(p.data)
if len(g_32) > 0:
multi_tensor_applier(
self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group["lr"],
beta1,
beta2,
group["eps"],
group["step"],
self.adam_w_mode,
bias_correction,
group["weight_decay"],
)
return loss
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `get_fused_adam_class` function. Write a Python function `def get_fused_adam_class()` to solve the following problem:
Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated.
Here is the function:
def get_fused_adam_class():
"""
Look for the FusedAdam optimizer from apex. We first try to load the
"contrib" interface, which is a bit faster than the main interface,
but is technically deprecated.
"""
try:
# The "deprecated" interface in recent versions of apex is a bit
# faster than the main interface, since we don't use the apex
# optimizer. This can be installed by passing the
# `--deprecated_fused_adam` option when building apex.
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
return FusedAdamV1
except ImportError:
try:
# fallback to the newer interface
from apex.optimizers import FusedAdam as _FusedAdam # noqa
from apex.multi_tensor_apply import multi_tensor_applier
if multi_tensor_applier.available:
return FusedAdamV2
except ImportError:
pass
return None | Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. |
183,947 | from collections import OrderedDict
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
)
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False)
args.share_encoders = getattr(args, "share_encoders", False)
args.share_decoders = getattr(args, "share_decoders", False)
"multilingual_transformer", "multilingual_transformer_iwslt_de_en"
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_multilingual_architecture(args) | null |
183,950 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
183,951 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
183,952 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 7)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim)
args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim)
args.encoder_kernel_size_list = getattr(
args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31]
)
args.decoder_kernel_size_list = getattr(
args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31]
)
if len(args.encoder_kernel_size_list) == 1:
args.encoder_kernel_size_list = (
args.encoder_kernel_size_list * args.encoder_layers
)
if len(args.decoder_kernel_size_list) == 1:
args.decoder_kernel_size_list = (
args.decoder_kernel_size_list * args.decoder_layers
)
assert (
len(args.encoder_kernel_size_list) == args.encoder_layers
), "encoder_kernel_size_list doesn't match encoder_layers"
assert (
len(args.decoder_kernel_size_list) == args.decoder_layers
), "decoder_kernel_size_list doesn't match decoder_layers"
args.encoder_glu = getattr(args, "encoder_glu", True)
args.decoder_glu = getattr(args, "decoder_glu", True)
args.input_dropout = getattr(args, "input_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout)
def lightconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 7)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", 0.1)
args.encoder_glu = getattr(args, "encoder_glu", False)
args.decoder_glu = getattr(args, "decoder_glu", False)
args.input_dropout = getattr(args, "input_dropout", 0.0)
base_architecture(args) | null |
183,953 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 7)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim)
args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim)
args.encoder_kernel_size_list = getattr(
args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31]
)
args.decoder_kernel_size_list = getattr(
args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31]
)
if len(args.encoder_kernel_size_list) == 1:
args.encoder_kernel_size_list = (
args.encoder_kernel_size_list * args.encoder_layers
)
if len(args.decoder_kernel_size_list) == 1:
args.decoder_kernel_size_list = (
args.decoder_kernel_size_list * args.decoder_layers
)
assert (
len(args.encoder_kernel_size_list) == args.encoder_layers
), "encoder_kernel_size_list doesn't match encoder_layers"
assert (
len(args.decoder_kernel_size_list) == args.decoder_layers
), "decoder_kernel_size_list doesn't match decoder_layers"
args.encoder_glu = getattr(args, "encoder_glu", True)
args.decoder_glu = getattr(args, "decoder_glu", True)
args.input_dropout = getattr(args, "input_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout)
def lightconv_wmt_en_de(args):
base_architecture(args) | null |
183,954 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def lightconv_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def lightconv_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
lightconv_wmt_en_de_big(args) | null |
183,955 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
def lightconv_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def lightconv_wmt_zh_en_big(args):
args.dropout = getattr(args, "dropout", 0.2)
args.attention_dropout = getattr(args, "attention_dropout", 0.2)
args.weight_dropout = getattr(args, "weight_dropout", 0.2)
lightconv_wmt_en_de_big(args) | null |
183,956 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import RobertaHubInterface
def base_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.max_source_positions = getattr(args, "max_positions", 512)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
# BERT has a few structural differences compared to the original Transformer
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.untie_weights_roberta = getattr(args, "untie_weights_roberta", False)
# Adaptive input config
args.adaptive_input = getattr(args, "adaptive_input", False)
# LayerDrop config
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# Quantization noise config
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
# R4F config
args.spectral_norm_classification_head = getattr(
args, "spectral_norm_classification_head", False
)
def roberta_prenorm_architecture(args):
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
base_architecture(args) | null |
183,957 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import RobertaHubInterface
def base_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.max_source_positions = getattr(args, "max_positions", 512)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
# BERT has a few structural differences compared to the original Transformer
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.untie_weights_roberta = getattr(args, "untie_weights_roberta", False)
# Adaptive input config
args.adaptive_input = getattr(args, "adaptive_input", False)
# LayerDrop config
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# Quantization noise config
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
# R4F config
args.spectral_norm_classification_head = getattr(
args, "spectral_norm_classification_head", False
)
def roberta_base_architecture(args):
base_architecture(args) | null |
183,958 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import RobertaHubInterface
def base_architecture(args):
def roberta_large_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
base_architecture(args) | null |
183,959 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import RobertaHubInterface
def base_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.max_source_positions = getattr(args, "max_positions", 512)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
# BERT has a few structural differences compared to the original Transformer
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.untie_weights_roberta = getattr(args, "untie_weights_roberta", False)
# Adaptive input config
args.adaptive_input = getattr(args, "adaptive_input", False)
# LayerDrop config
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# Quantization noise config
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
# R4F config
args.spectral_norm_classification_head = getattr(
args, "spectral_norm_classification_head", False
)
def xlm_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1280)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1280 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
base_architecture(args) | null |
183,963 | import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
183,964 | import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
183,965 | import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
def transformer_wmt_en_de(args):
base_architecture(args) | null |
183,966 | import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args) | null |
183,967 | import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args) | null |
183,970 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def transformer_lm_big(args):
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
def transformer_lm_baevski_wiki103(args):
args.decoder_layers = getattr(args, "decoder_layers", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.3)
args.adaptive_input = getattr(args, "adaptive_input", True)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", True)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", "20000,60000")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "20000,60000"
)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0.2)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", True)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", True)
transformer_lm_big(args) | null |
183,971 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def transformer_lm_big(args):
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", True)
transformer_lm_big(args) | null |
183,972 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 3072)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
183,973 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_layers = getattr(args, "decoder_layers", 24)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
183,974 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
def transformer_lm_gpt2_tiny(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 64)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 64)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 1)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
183,975 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def base_lm_architecture(args):
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1280)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 5120)
args.decoder_layers = getattr(args, "decoder_layers", 36)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 20)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
183,976 | from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from omegaconf import II
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1600)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 6400)
args.decoder_layers = getattr(args, "decoder_layers", 48)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 25)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
183,977 | from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerModel,
base_architecture,
transformer_wmt_en_de_big,
)
def base_architecture(args):
def transformer_align(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", 4)
args.full_context_alignment = getattr(args, "full_context_alignment", False)
base_architecture(args) | null |
183,985 | import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATEncoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
def _mean_pooling(enc_feats, src_masks):
# enc_feats: T x B x C
# src_masks: B x T or None
if src_masks is None:
enc_feats = enc_feats.mean(0)
else:
src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
enc_feats = (
(enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
).sum(0)
return enc_feats | null |
183,986 | import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATEncoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
def _argmax(x, dim):
return (x == x.max(dim, keepdim=True)[0]).type_as(x) | null |
183,987 | import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATEncoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
def _uniform_assignment(src_lens, trg_lens):
max_trg_len = trg_lens.max()
steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size
# max_trg_len
index_t = utils.new_arange(trg_lens, max_trg_len).float()
index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len
index_t = torch.round(index_t).long().detach()
return index_t | null |
183,988 | import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATEncoder, FairseqNATModel, ensemble_decoder, ensemble_encoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
def base_architecture(args):
def nonautoregressive_transformer_wmt_en_de(args):
base_architecture(args) | null |
183,989 | import logging
import os
import signal
import threading
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from fairseq.distributed import (
DistributedTimeoutWrapper,
LegacyDistributedDataParallel,
ModuleProxyWrapper,
TPUDistributedDataParallel,
)
_GOSSIP_DISABLED = False
try:
import gossip
except ImportError:
_GOSSIP_DISABLED = True
The provided code snippet includes necessary dependencies for implementing the `DistributedFairseqModel` function. Write a Python function `def DistributedFairseqModel(args, model, process_group, device)` to solve the following problem:
Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to
Here is the function:
def DistributedFairseqModel(args, model, process_group, device):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
device: device to move model to
"""
assert isinstance(model, nn.Module)
if args.tpu:
wrapped_model = TPUDistributedDataParallel(
module=model.to(device),
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"c10d", "pytorch_ddp"}:
wrapped_model = DistributedDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
find_unused_parameters=args.find_unused_parameters,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"no_c10d", "legacy_ddp"}:
wrapped_model = LegacyDistributedDataParallel(
module=model.to(device),
buffer_size=2 ** 28,
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "slow_mo":
if _GOSSIP_DISABLED:
raise ImportError(
"Cannot find gossip library. Please install from: "
"github.com/facebookresearch/stochastic_gradient_push"
)
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
wrapped_model = gossip.GossipDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
localsgd=(args.slowmo_algorithm == "LocalSGD"),
localsgd_frequency=args.localsgd_frequency,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
# kill hung distributed jobs after a timeout
wrapped_model = DistributedTimeoutWrapper(
wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1)
)
return wrapped_model | Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap process_group: the c10d process group to be used for distributed data parallel all-reduction. device: device to move model to |
184,001 | from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
def fconv_lm_dauphin_wikitext103(args):
layers = "[(850, 6)] * 3"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 5)] * 4"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 4)] * 3"
layers += " + [(1024, 4)] * 1"
layers += " + [(2048, 4)] * 1"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,20000,200000"
)
base_lm_architecture(args) | null |
184,002 | from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
def fconv_lm_dauphin_gbw(args):
layers = "[(512, 5)]"
layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3"
layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3"
layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6"
layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
base_lm_architecture(args) | null |
184,003 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
def s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args)
def s2t_transformer_xs(args):
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 3)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.dropout = getattr(args, "dropout", 0.3)
s2t_transformer_s(args) | null |
184,004 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
def s2t_transformer_s(args):
def s2t_transformer_sp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_s(args) | null |
184,005 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
def s2t_transformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
base_architecture(args)
def s2t_transformer_mp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_m(args) | null |
184,006 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
def s2t_transformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
base_architecture(args)
def s2t_transformer_lp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_l(args) | null |
184,007 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer
from torch import Tensor
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.max_source_positions = getattr(args, "max_source_positions", 3000)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim) | null |
184,008 | import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer
from torch import Tensor
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) | null |
184,018 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BeamableMM,
FairseqDropout,
GradMultiply,
LearnedPositionalEmbedding,
LinearizedConvolution,
)
def base_architecture(args):
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
base_architecture(args) | null |
184,029 | from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
184,030 | from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
184,031 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
SinusoidalPositionalEmbedding,
TransformerSentenceEncoder,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def bert_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.num_segment = getattr(args, "num_segment", 2)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.sentence_class_num = getattr(args, "sentence_class_num", 2)
args.sent_loss = getattr(args, "sent_loss", True)
args.apply_bert_init = getattr(args, "apply_bert_init", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
base_architecture(args)
def bert_large_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
bert_base_architecture(args) | null |
184,032 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
SinusoidalPositionalEmbedding,
TransformerSentenceEncoder,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.act_dropout = getattr(args, "act_dropout", 0.0)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.num_segment = getattr(args, "num_segment", 2)
args.sentence_class_num = getattr(args, "sentence_class_num", 2)
args.sent_loss = getattr(args, "sent_loss", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
def xlm_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.num_segment = getattr(args, "num_segment", 1)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.sent_loss = getattr(args, "sent_loss", False)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.apply_bert_init = getattr(args, "apply_bert_init", True)
base_architecture(args) | null |
184,035 | import os
import sys
import time
import logging
from tqdm import tqdm
import torch
from fairseq import utils, tasks, options
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from torch import Tensor
from typing import Dict, List, Optional
logger = logging.getLogger("inference")
def baseline_forward_decoder(model,
input_tokens,
encoder_out: Dict[str, List[Tensor]],
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
parallel_forward_start_pos=None,
temperature: float = 1.0):
decoder_out = model.decoder.forward(input_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
parallel_forward_start_pos=parallel_forward_start_pos)
decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1])
pred_tokens = torch.argmax(decoder_out_tuple[0], dim=-1).squeeze(0)
return pred_tokens
def baseline_generate(data_lines, model, task, device, max_len=200):
# simplified AR greedy decoding
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
data_size = len(data_lines)
all_results = []
logger.info(f'Baseline generate')
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size)):
bpe_line = data_lines[start_idx]
src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long()
net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device),
'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)}
encoder_out = model.encoder.forward_torchscript(net_input)
incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]],
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}))
tokens = [tgt_dict.eos()]
for step in range(0, max_len):
cur_input_tokens = torch.tensor([tokens]).to(device).long()
pred_token = baseline_forward_decoder(model,
cur_input_tokens,
encoder_out,
incremental_state).item()
if pred_token == tgt_dict.eos():
break
else:
tokens.append(pred_token)
all_results.append(tgt_dict.string(tokens[1:]))
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,036 | import os
import sys
import time
import logging
from tqdm import tqdm
import torch
from fairseq import utils, tasks, options
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from torch import Tensor
from typing import Dict, List, Optional
logger = logging.getLogger("inference")
def gad_forward(incremental_state, encoder_state_ids, start_pos, block_size, tgt_dict, prev_output_tokens,
encoder_out, AR_encoder_out, model, AR_model, beta, tau, max_len=200):
output_tokens = torch.tensor([prev_output_tokens]).to(device)
_scores, _tokens = model.decoder(
normalize=False,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
).max(-1)
prev_output_tokens[start_pos:start_pos + block_size] = _tokens[0].tolist()[start_pos:start_pos + block_size]
cut_incremental_state(incremental_state, keep_len=start_pos, encoder_state_ids=encoder_state_ids)
cur_span_input_tokens = torch.tensor([[tgt_dict.eos()] + prev_output_tokens]).to(device)
AR_topk_tokens = forward_decoder(AR_model,
cur_span_input_tokens,
AR_encoder_out,
incremental_state,
parallel_forward_start_pos=start_pos,
beta=beta,
tau=tau)
bifurcation = block_size
for i, (token, AR_topk_token) in enumerate(zip(prev_output_tokens[start_pos:], AR_topk_tokens[:-1][:])):
if token not in AR_topk_token:
bifurcation = i
break
next_output_tokens = prev_output_tokens[:start_pos + bifurcation] + [AR_topk_tokens[bifurcation][0]] + [
tgt_dict.unk()] * block_size
pass_token = 0
find_eos = False
for i, o in enumerate(next_output_tokens[start_pos:start_pos + bifurcation + 1]):
if o == tgt_dict.eos() or i + start_pos == max_len:
next_output_tokens = next_output_tokens[0:start_pos + i]
start_pos = -1
pass_token = i
find_eos = True
break
if not find_eos:
start_pos = start_pos + bifurcation + 1
pass_token = bifurcation + 1
return start_pos, next_output_tokens, pass_token
def gad_generate(data_lines, model, AR_model, task, block_size, device, beta=1, tau=0, max_len=200):
# Generalized Aggressive Decoding
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
encoder_state_ids = []
for i in range(len(AR_model.decoder.layers)):
encoder_state_ids.append(AR_model.decoder.layers[i].encoder_attn._incremental_state_id)
data_size = len(data_lines)
all_results = []
logger.info(f'GAD generate')
pass_tokens = [0] * max_len
sent_nums = [0] * max_len
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size)):
bpe_line = data_lines[start_idx]
src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long()
net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device),
'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)}
AR_encoder_out = AR_model.encoder.forward_torchscript(net_input)
encoder_out = model.encoder.forward_torchscript(net_input)
incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]],
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}))
prev_output_tokens = [tgt_dict.unk()] * block_size
start_pos = 0
for step in range(0, max_len):
start_pos, prev_output_tokens, pass_token = gad_forward(incremental_state, encoder_state_ids,
start_pos, block_size, tgt_dict,
prev_output_tokens,
encoder_out, AR_encoder_out, model,
AR_model, beta, tau)
pass_tokens[step] += pass_token
sent_nums[step] += 1
if start_pos == -1:
break
all_results.append(tgt_dict.string(prev_output_tokens))
total_pass_tokens = 0
total_sent_nums = 0
for step in range(max_len):
if sent_nums[step] > 0:
total_pass_tokens += pass_tokens[step]
total_sent_nums += sent_nums[step]
print("Avg accepted tokens:", total_pass_tokens / total_sent_nums)
total_iter = 0
for step in range(max_len):
if sent_nums[step - 1] > 0:
if step == 0:
last_num = data_size
else:
last_num = sent_nums[step - 1]
if (last_num - sent_nums[step]) > 0:
total_iter += (last_num - sent_nums[step]) * (step)
print("Avg decoding iteration:", total_iter / data_size)
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,042 | import argparse
import collections
import os
import re
import torch
from fairseq.file_io import PathManager
class PathManager:
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
def get_local_path(path: str, **kwargs) -> str:
def exists(path: str) -> bool:
def isfile(path: str) -> bool:
def ls(path: str) -> List[str]:
def mkdirs(path: str) -> None:
def rm(path: str) -> None:
def chmod(path: str, mode: int) -> None:
def register_handler(handler) -> None:
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
def path_requires_pathmanager(path: str) -> bool:
def supports_rename(path: str) -> bool:
def rename(src: str, dst: str):
def opena(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
def async_close() -> bool:
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt")
else:
pt_regexp = re.compile(r"checkpoint(\d+)\.pt")
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception(
"Found {} checkpoint files but need at least {}", len(entries), n
)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] | null |
184,043 | import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed_utils import is_master
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
logger = logging.getLogger("fairseq_cli.train")
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
log_file: Optional[str] = None,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = "tqdm",
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
azureml_logging: Optional[bool] = False,
):
if log_format is None:
log_format = default_log_format
if log_file is not None:
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if log_format == "tqdm" and not sys.stderr.isatty():
log_format = "simple"
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "none":
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == "simple":
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "tqdm":
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning(
"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf"
)
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu
if self.cuda:
self.device = torch.device("cuda")
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device("cpu")
if self.is_fsdp:
import fairscale
if self.cfg.common.bf16:
raise ValueError(
"FullyShardedDataParallel is not compatible with --bf16 or "
"--memory-efficient-bf16"
)
if self.cfg.distributed_training.zero_sharding != "none":
raise ValueError(
"FullyShardedDataParallel is not compatible with --zero-sharding "
"option (it's already built in)"
)
if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0":
raise RuntimeError(
"Please update to fairscale 0.4.0 or newer when combining "
"--update-freq with FullyShardedDataParallel"
)
else:
if (
hasattr(self.cfg.distributed_training, "cpu_offload")
and self.cfg.distributed_training.cpu_offload
):
raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded")
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if not self.is_fsdp:
if cfg.common.fp16:
assert not cfg.common.amp, "Cannot use fp16 and AMP together"
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
elif cfg.common.amp:
self._amp_retries = 0
if (
not cfg.distributed_training.pipeline_model_parallel
# the DistributedFairseqModel wrapper will handle moving to device,
# so only handle cases which don't use the wrapper
and not self.use_distributed_wrapper
):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(
cfg.distributed_training.pipeline_devices[-1]
)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
"detected shared parameter: {} <- {}".format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
self._ema = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(
self.cuda_env, group=distributed_utils.get_global_group()
)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def is_data_parallel_master(self):
# NOTE: this returns true for all model parallel replicas with data
# parallel rank 0
return self.data_parallel_rank == 0
def use_distributed_wrapper(self) -> bool:
return (
self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf
) or (
self.is_fsdp and self.cfg.distributed_training.cpu_offload
)
def should_save_checkpoint_on_current_rank(self) -> bool:
"""Indicates whether to save checkpoints on the current DDP rank."""
if (
self.is_fsdp and self.cfg.distributed_training.use_sharded_state
) or getattr(self.cfg.model, "base_layers", 0) > 0:
return True
else:
return self.is_data_parallel_master
def always_call_state_dict_during_save_checkpoint(self) -> bool:
if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state:
# FSDP calls communication collective when consolidating checkpoints
return True
else:
return False
def checkpoint_suffix(self) -> str:
"""Suffix to add to the checkpoint file name."""
if self.is_fsdp and self.cfg.distributed_training.use_sharded_state:
return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format(
self.data_parallel_rank
)
else:
return self.cfg.checkpoint.checkpoint_suffix or ""
def criterion(self):
if self._wrapped_criterion is None:
if utils.has_parameters(self._criterion) and self.use_distributed_wrapper:
self._wrapped_criterion = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._criterion,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
def model(self):
if self._wrapped_model is None:
if self.use_distributed_wrapper:
self._wrapped_model = models.DistributedFairseqModel(
self.cfg.distributed_training,
self._model,
process_group=self.data_parallel_process_group,
device=self.device,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
def ema(self):
if self._ema is None:
self._build_ema()
return self._ema
def _build_ema(self):
if self.cfg.ema.store_ema:
self._ema = build_ema(self._model, self.cfg.ema, self.device)
logger.info(
"Exponential Moving Average Shadow Model is initialized."
)
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.is_fsdp and self.cfg.common.fp16:
# FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,
# mostly for the grad scaling. But if we don't have the
# --memory-efficient-fp16 flag set, then we're effectively doing
# regular --fp16 and can allow the use of optimizers that would
# otherwise be unsupported by MemoryEfficientFP16Optimizer.
allow_unsupported = not self.cfg.common.memory_efficient_fp16
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params, allow_unsupported=allow_unsupported
)
elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16 or --amp, "
"please switch to FP32 which is likely to be faster"
)
if (
self.cfg.common.memory_efficient_fp16
or self.cfg.common.memory_efficient_bf16
):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.cfg, params
)
elif self.cfg.common.amp:
self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16 or --amp")
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.is_fsdp:
assert (
not self.cfg.optimization.use_bmuf
), "--ddp-backend=fully_sharded is not compatible with BMUF"
assert self._optimizer.supports_flat_params, (
"--ddp-backend=fully_sharded is only compatible with pointwise "
"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). "
"However, the sharding will result in slightly different results when "
"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)"
)
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(
self.cfg.bmuf,
self._optimizer,
)
if self.cfg.distributed_training.zero_sharding == "os":
if (
self.cfg.common.fp16
and not self.cfg.common.memory_efficient_fp16
and not self.cfg.common.memory_efficient_bf16
) and not self.cfg.common.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(
self.cfg.lr_scheduler,
self.optimizer,
)
self._lr_scheduler.step_update(0)
def is_fsdp(self):
return self.cfg.distributed_training.ddp_backend == "fully_sharded"
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if self.cfg.checkpoint.no_save_optimizer_state:
return
self._gathered_optim_state = None
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
elif self.is_fsdp and not self.model.use_sharded_state:
st = self.model.gather_full_optim_state_dict(
self.optimizer
) # only returns on rank 0
self._gathered_optim_state = st
def state_dict(self):
state_dict = {
"args": None, # legacy
"cfg": (
OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True)
if OmegaConf.is_config(self.cfg)
else self.cfg
),
"model": self.model.state_dict(),
"criterion": (
self.criterion.state_dict()
if utils.has_parameters(self.criterion)
else None
),
"optimizer_history": (self._optim_history or [])
+ [
{
"criterion_name": self.get_criterion().__class__.__name__,
"optimizer_name": self.optimizer.__class__.__name__,
"lr_scheduler_state": self.lr_scheduler.state_dict(),
"num_updates": self.get_num_updates(),
}
],
"task_state": self.task.state_dict() if self.task is not None else {},
"extra_state": {
"metrics": metrics.state_dict(),
"previous_training_time": self.cumulative_training_time(),
},
}
if self.cfg.ema.store_ema:
# Save EMA model state as extra state
state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict()
if self.cfg.ema.ema_fp32:
# Save EMA params in fp32
state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params
if not self.cfg.checkpoint.no_save_optimizer_state:
if self._gathered_optim_state is not None:
state_dict["last_optimizer_state"] = self._gathered_optim_state
self._gathered_optim_state = None
else:
state_dict["last_optimizer_state"] = self.optimizer.state_dict()
if self.is_fsdp:
# save meta data for recombining checkpoint upon loading
state_dict["fsdp_metadata"] = self.model.local_metadata_dict()
return state_dict
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
logger.info(f"Saving checkpoint to {filename}")
# call state_dict on all ranks in case it needs internal communication
state_dict = utils.move_to_cpu(self.state_dict())
state_dict["extra_state"].update(extra_state)
if self.should_save_checkpoint_on_current_rank:
checkpoint_utils.torch_persistent_save(
state_dict,
filename,
async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,
)
logger.info(f"Finished saving checkpoint to {filename}")
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""
Load all training state from a checkpoint file.
rank = 0 will load the checkpoint, and then broadcast it to all
other ranks.
"""
extra_state, self._optim_history, last_optim_state = None, [], None
logger.info(f"Preparing to load checkpoint {filename}")
is_distributed = self.data_parallel_world_size > 1
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (
self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks
# TPUs don't support broadcast yet, so load checkpoints
# on every worker for now
or self.tpu
# FSDP requires loading checkpoint shards on all ranks
or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state)
or getattr(self.cfg.model, "base_layers", 0) > 0
)
if load_on_all_ranks or self.data_parallel_rank == 0:
state = checkpoint_utils.load_checkpoint_to_cpu(
filename, load_on_all_ranks=load_on_all_ranks
)
last_optim_state = state.get("last_optimizer_state", None)
# If doing zero_sharding, do not broadcast global optimizer
# state. Later we will broadcast sharded states to each rank
# to avoid memory from exploding.
if (
not load_on_all_ranks
and self.cfg.distributed_training.zero_sharding == "os"
and "last_optimizer_state" in state
and is_distributed
):
state["last_optimizer_state"] = "SHARDED"
else:
last_optim_state = None
state = None
if is_distributed and not load_on_all_ranks:
state = distributed_utils.broadcast_object(
state,
src_rank=0,
group=self.data_parallel_process_group,
dist_device=self.device,
)
if self.data_parallel_rank > 0:
last_optim_state = state.get("last_optimizer_state", None)
# load model parameters
try:
self.model.load_state_dict(
state["model"], strict=True, model_cfg=self.cfg.model
)
# save memory for later steps
del state["model"]
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
del state["criterion"]
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}"
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}"
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
if self.is_fsdp and not self.model.use_sharded_state:
# if use_sharded_state, the last_optim_state is already sharded, skip this
last_optim_state = self.model.get_shard_from_optim_state_dict(
last_optim_state
)
elif not load_on_all_ranks and is_distributed:
last_optim_state = self.optimizer.broadcast_global_state_dict(
last_optim_state
)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
itr_state = extra_state["train_iterator"]
if type(itr_state) == list:
# assert len(itr_state) == self.data_parallel_world_size
itr_state = itr_state[self.data_parallel_rank]
extra_state["train_iterator"] = itr_state
epoch = itr_state.get("epoch", 1)
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if (
itr_state.get("version", 1) >= 2
and itr_state.get("iterations_in_epoch", 0) == 0
):
# reset meters at start of epoch
reset_meters = True
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
if self.cfg.ema.store_ema:
if "ema" not in extra_state:
logger.warn(
"EMA not found in checkpoint. But store_ema is True. "
"EMA is re-initialized from checkpoint."
)
self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32)
else:
logger.info(
"Loading EMA from checkpoint"
)
self.ema.restore(extra_state["ema"], build_fp32_params=False)
if self.cfg.ema.ema_fp32:
if "ema_fp32_params" in extra_state:
logger.info(
"Loading EMA fp32 params from checkpoint"
)
self.ema.build_fp32_params(extra_state["ema_fp32_params"])
else:
logger.info(
"Building EMA fp32 params from EMA model in checkpoint"
)
self.ema.build_fp32_params()
logger.info(
"Loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
else:
logger.info("No existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.cfg.dataset.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
tpu=self.tpu,
)
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.train_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.cfg.dataset.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.cfg.dataset.num_workers,
epoch=epoch,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.cfg.dataset.max_tokens_valid,
max_sentences=self.cfg.dataset.batch_size_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
# always pass a fixed "epoch" to keep validation data consistent
# across training epochs
epoch=1,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
def begin_valid_epoch(self, epoch):
"""Called at the beginning of each validation epoch."""
# task specific setup per validation epoch
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples): # delayed update loop
sample, is_dummy_batch = self._prepare_sample(sample)
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
# The no_sync context manager results in increased memory
# usage with FSDP, since full-size gradients will be
# accumulated on each GPU. It's typically a better tradeoff
# to do the extra communication with FSDP.
and not self.is_fsdp
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
**extra_kwargs,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.cfg.distributed_training.distributed_world_size == 1:
return None
else:
raise e
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
self._xla_markstep_and_send_to_cpu()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
logging_outputs, (
sample_size,
ooms,
total_train_time,
) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch
)
self._cumulative_training_time = (
total_train_time / self.data_parallel_world_size
)
overflow = False
try:
with torch.autograd.profiler.record_function("reduce-grads"):
# reduce gradients across workers
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (data_parallel_size / sample_size) since
# DDP normalizes by the number of data parallel workers for
# improved fp16 precision.
# Thus we get (sum_of_gradients / sample_size) at the end.
# In case of fp16, this step also undoes loss scaling.
# (Debugging note: Some optimizers perform this scaling on the
# fly, so inspecting model.parameters() or optimizer.params may
# still show the original, unscaled gradients.)
numer = (
self.data_parallel_world_size
if not self.cfg.optimization.use_bmuf or self._sync_stats()
else 1
)
self.optimizer.multiply_grads(numer / (sample_size or 1.0))
# Note: (sample_size or 1.0) handles the case of a zero gradient, in a
# way that avoids CPU/device transfers in case sample_size is a GPU or
# TPU object. The assumption is that the gradient itself is also 0.
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
# check that grad norms are consistent across workers
# on tpu check tensor is slow
if not self.tpu:
if (
not self.cfg.optimization.use_bmuf
and self.cfg.distributed_training.ddp_backend != "slow_mo"
):
self._check_grad_norms(grad_norm)
if not torch.isfinite(grad_norm).all():
# in case of AMP, if gradients are Nan/Inf then
# optimizer step is still required
if self.cfg.common.amp:
overflow = True
else:
# check local gradnorm single GPU case, trigger NanDetector
raise FloatingPointError("gradients are Nan/Inf")
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.task.optimizer_step(
self.optimizer, model=self.model, update_num=self.get_num_updates()
)
if self.cfg.common.amp and overflow:
if self._amp_retries == self.cfg.common.amp_batch_retries:
logger.info("AMP: skipping this batch.")
self._amp_retries = 0
else:
self._amp_retries += 1
return self.train_step(samples, raise_oom) # recursion to feed in same batch
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
self.zero_grad()
with NanDetector(self.get_model()):
for _, sample in enumerate(samples):
sample, _ = self._prepare_sample(sample)
self.task.train_step(
sample,
self.model,
self.criterion,
self.optimizer,
self.get_num_updates(),
ignore_grad=False,
**extra_kwargs,
)
raise
except OverflowError as e:
overflow = True
logger.info(
f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}"
)
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer
# after the step
if hasattr(self.model, "perform_additional_optimizer_actions"):
if hasattr(self.optimizer, "fp32_params"):
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer, self.optimizer.fp32_params
)
else:
self.model.perform_additional_optimizer_actions(
self.optimizer.optimizer
)
logging_output = None
if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo":
self.set_num_updates(self.get_num_updates() + 1)
if self.cfg.ema.store_ema:
# Step EMA forward with new model.
self.ema.step(
self.get_model(),
self.get_num_updates(),
)
metrics.log_scalar(
"ema_decay",
self.ema.get_decay(),
priority=10000,
round=5,
weight=0,
)
if self.tpu:
import torch_xla.core.xla_model as xm
# mark step on TPUs
self._xla_markstep_and_send_to_cpu()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.cfg.common.log_interval == 0:
# log memory usage
mem_info = xm.get_memory_info(self.device)
gb_free = mem_info["kb_free"] / 1024 / 1024
gb_total = mem_info["kb_total"] / 1024 / 1024
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
metrics.log_scalar(
"gb_total", gb_total, priority=1600, round=1, weight=0
)
logging_outputs = self._xla_markstep_and_send_to_cpu(
logging_outputs
)
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
if self.cuda and self.cuda_env is not None:
# log minimum free memory over the iteration
gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
torch.cuda.reset_peak_memory_stats()
gb_free = self.cuda_env.total_memory_in_GB - gb_used
metrics.log_scalar(
"gb_free", gb_free, priority=1500, round=1, weight=0
)
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.cfg.common.empty_cache_freq > 0
and (
(self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)
% self.cfg.common.empty_cache_freq
)
== 0
):
torch.cuda.empty_cache()
if self.cfg.common.fp16 or self.cfg.common.amp:
metrics.log_scalar(
"loss_scale",
(
self.optimizer.scaler.loss_scale
if self.cfg.common.fp16
else self.optimizer.scaler.get_scale()
),
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("valid_step") # wait for all workers
# If EMA is enabled through store_ema=True
# and task.uses_ema is True, pass the EMA model as a keyword
# argument to the task.
extra_kwargs = {}
if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False):
extra_kwargs["ema_model"] = self.ema.get_model()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample, is_dummy_batch = self._prepare_sample(sample)
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion, **extra_kwargs
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size,) = self._aggregate_logging_outputs(
logging_outputs,
sample_size,
ignore=is_dummy_batch,
)
# log validation stats
if self.tpu:
logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)
# logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_outputs
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
"""Adjust the learning rate at the beginning of the epoch."""
self.lr_scheduler.step_begin_epoch(epoch)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for k, v in new_lr.items():
metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300)
new_lr = new_lr.get("default", next(iter(new_lr.values())))
else:
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if "get_meter" not in self._warn_once:
self._warn_once.add("get_meter")
utils.deprecation_warning(
"Trainer.get_meter is deprecated. Please use fairseq.metrics instead."
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_") :]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
def agg_norm_fn(total_norm):
total_norm = total_norm.cuda().float() ** 2
total_norm = distributed_utils.all_reduce(
total_norm, group=self.data_parallel_process_group
)
return total_norm ** 0.5
should_agg_norm = (
self.is_fsdp
and (
self.data_parallel_process_group is not None
or torch.distributed.is_initialized()
)
)
return self.optimizer.clip_grad_norm(
clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None
)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _fp_convert_sample(self, sample):
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _prepare_sample(self, sample, is_dummy=False):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
assert (
self._dummy_batch is not None and len(self._dummy_batch) > 0
), "Invalid dummy batch: {}".format(self._dummy_batch)
sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)
return sample, True
# Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth
# it makes sense to do the format conversion on the CPU and then transfer
# a smaller buffer to the device. This also saves GPU memory capacity.
if self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self.cuda:
if self.pipeline_model_parallel:
if 'target' in sample:
sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device)
else:
sample = utils.move_to_cuda(sample)
elif self.tpu and is_dummy:
# the dummy batch may not be on the appropriate device
sample = utils.move_to_cuda(sample, device=self.device)
if not self.cfg.common.on_cpu_convert_precision:
sample = self._fp_convert_sample(sample)
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
return sample, False
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.cfg.common.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.cfg.optimization.use_bmuf:
return (
self.get_num_updates() + 1
) % self.cfg.bmuf.global_sync_iter == 0 and (
self.get_num_updates() + 1
) > self.cfg.bmuf.warmup_iterations
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(
zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.cfg.common, "all_gather_list_size", 16384),
group=self.data_parallel_process_group,
)
)
)
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data["extra_stats_" + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data["logging_outputs_" + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data, device=self.device, group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf, group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
(torch.isfinite(tensor).all()
and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all())
or
(self.cfg.common.amp and not torch.isfinite(tensor).all())
# in case of amp non-finite grads are fine
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(
pretty_detail
)
# use FloatingPointError to trigger NanDetector
raise FloatingPointError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=legacy_ddp. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None and (
not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)
):
metrics.log_speed("ups", 1.0, priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.cfg.optimization.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.cfg.optimization.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
logger.warning(
"XLA compilation detected on device #{}; too many of these can lead "
"to slow training, but we expect a few in the beginning".format(
self.cfg.distributed_training.distributed_rank
)
)
self._num_xla_compiles = num_xla_compiles
def _xla_markstep_and_send_to_cpu(self, data=None):
import torch_xla.core.xla_model as xm
xm.mark_step()
if data is not None:
from fairseq.utils import xla_device_to_cpu
return xla_device_to_cpu(data)
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]` to solve the following problem:
Train the model for one epoch and return validation losses.
Here is the function:
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop | Train the model for one epoch and return validation losses. |
184,044 | import argparse
import logging
import math
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed_utils import is_master
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
def main(cfg: FairseqConfig) -> None:
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main) | null |
184,048 | import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from omegaconf import DictConfig
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
"""Convert a flat argparse.Namespace to a structured DictConfig."""
# Here we are using field values provided in args to override counterparts inside config object
overrides, deletes = override_module_args(args)
# configs will be in fairseq/config after installation
config_path = os.path.join("..", "config")
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose("config", overrides=overrides, strict=False)
except:
logger.error("Error when composing. Overrides: " + str(overrides))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(
OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)
)
# hack to be able to set Namespace in dict config. this should be removed when we update to newer
# omegaconf version that supports object flags, or when we migrate all existing models
from omegaconf import _utils
with omegaconf_no_object_check():
if cfg.task is None and getattr(args, "task", None):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if cfg.model is None and getattr(args, "arch", None):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if cfg.optimizer is None and getattr(args, "optimizer", None):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(
cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler]
)
cfg.lr_scheduler._name = args.lr_scheduler
if cfg.criterion is None and getattr(args, "criterion", None):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
OmegaConf.set_struct(cfg, True)
return cfg
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(
override_parser, suppress_defaults=True
)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
) | null |
184,049 | import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
def dataset_dest_file(args, output_prefix, lang, extension):
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
) -> Dict[str, int]:
def replaced_consumer(word, idx):
def binarize_alignments(
filename, alignment_parser, consumer, offset=0, end=-1
) -> Dict[str, int]:
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res | null |
184,050 | import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
def dataset_dest_file(args, output_prefix, lang, extension):
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
) -> Dict[str, int]:
def replaced_consumer(word, idx):
def binarize_alignments(
filename, alignment_parser, consumer, offset=0, end=-1
) -> Dict[str, int]:
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res | null |
184,051 | import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
) -> Dict[str, int]:
def replaced_consumer(word, idx):
def binarize_alignments(
filename, alignment_parser, consumer, offset=0, end=-1
) -> Dict[str, int]:
def get_offsets(input_file, num_workers):
return Binarizer.find_offsets(input_file, num_workers) | null |
184,052 | import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
offsets[worker_id],
offsets[worker_id + 1],
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t), offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
offsets[worker_id],
offsets[worker_id + 1],
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=0,
end=offsets[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args) | null |
184,054 | import logging
import os
import sys
from fairseq.dataclass.initialize import add_defaults, hydra_init
from fairseq_cli.train import main as pre_main
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
import hydra
from hydra.core.hydra_config import HydraConfig
import torch
from omegaconf import OmegaConf, open_dict
logger = logging.getLogger("fairseq_cli.hydra_train")
def hydra_main(cfg: FairseqConfig) -> float:
def hydra_init(cfg_name="config") -> None:
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main() | null |
184,055 | import logging
import math
import os
import sys
from argparse import Namespace
from typing import Iterable, List, Optional
import torch
import fairseq
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter
from fairseq.sequence_scorer import SequenceScorer
from omegaconf import DictConfig
logger = logging.getLogger("fairseq_cli.eval_lm")
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
"""increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen"""
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return "{}\t{}\t{}\t{}\t{}\t{}".format(
self.word,
self.count,
self.log_prob,
self.is_bpe,
self.next_word_prob,
self.count - self.missing_next_words,
)
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
"sum": self.sum,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict["sum"]
self.n = state_dict["n"]
self.start_time = None
self.round = state_dict.get("round", None)
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
def elapsed_time(self):
if self.start_time is None:
return 0.0
return time.perf_counter() - self.start_time
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(
self,
tgt_dict,
softmax_batch=None,
compute_alignment=False,
eos=None,
symbols_to_strip_from_output=None,
):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_alignment = compute_alignment
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample["target"]
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get("attn", None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample["target"] = tgt
curr_prob = model.get_normalized_probs(
bd, log_probs=len(models) == 1, sample=sample
).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(
curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt
)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample["target"] = orig_target
probs = probs.view(sample["target"].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if torch.is_tensor(attn):
attn = attn.data
else:
attn = attn[0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = (
utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad)
if sample["target"] is not None
else None
)
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
if self.compute_alignment:
alignment = utils.extract_hard_alignment(
avg_attn_i,
sample["net_input"]["src_tokens"][i],
sample["target"][i],
self.pad,
self.eos,
)
else:
alignment = None
else:
avg_attn_i = alignment = None
hypos.append(
[
{
"tokens": ref,
"score": score_i,
"attention": avg_attn_i,
"alignment": alignment,
"positional_scores": avg_probs_i,
}
]
)
return hypos
The provided code snippet includes necessary dependencies for implementing the `eval_lm` function. Write a Python function `def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = False, remove_bos_token: bool = False, device: Optional[torch.device] = None, )` to solve the following problem:
Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter)
Here is the function:
def eval_lm(
models: List[fairseq.models.FairseqModel],
source_dictionary: fairseq.data.Dictionary,
batch_iterator: Iterable,
post_process: Optional[str] = None,
output_word_probs: bool = False,
output_word_stats: bool = False,
target_dictionary: Optional[fairseq.data.Dictionary] = None,
softmax_batch: int = False,
remove_bos_token: bool = False,
device: Optional[torch.device] = None,
):
"""
Args:
models (List[~fairseq.models.FairseqModel]): list of models to
evaluate. Models are essentially `nn.Module` instances, but
must be compatible with fairseq's `SequenceScorer`.
source_dictionary (~fairseq.data.Dictionary): dictionary for
applying any relevant post processing or outputing word
probs/stats.
batch_iterator (Iterable): yield batches of data
post_process (Optional[str]): post-process text by removing BPE,
letter segmentation, etc. Valid options can be found in
fairseq.data.utils.post_process, although not all options
are implemented here.
output_word_probs (Optional[bool]): output words and their
predicted log probabilities
output_word_stats (Optional[bool]): output word statistics such
as word count and average probability
target_dictionary (Optional[~fairseq.data.Dictionary]): output
dictionary (defaults to *source_dictionary*)
softmax_batch (Optional[bool]): if BxT is more than this, will
batch the softmax over vocab to this amount of tokens, in
order to fit into GPU memory
remove_bos_token (Optional[bool]): if True, confirm that the
first token is the beginning-of-sentence symbol (according
to the relevant dictionary) and remove it from the output
device (Optional[torch.device]): device to use for evaluation
(defaults to device of first model parameter)
"""
if target_dictionary is None:
target_dictionary = source_dictionary
if device is None:
device = next(models[0].parameters()).device
gen_timer = StopwatchMeter()
scorer = SequenceScorer(target_dictionary, softmax_batch)
score_sum = 0.0
count = 0
if post_process is not None:
if post_process in {"subword_nmt", "@@ "}:
bpe_cont = post_process.rstrip()
bpe_toks = {
i
for i in range(len(source_dictionary))
if source_dictionary[i].endswith(bpe_cont)
}
else:
raise NotImplementedError(
"--post-process={post_process} is not implemented"
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
for sample in batch_iterator:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample, device=device)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample["ntokens"])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample["id"][i]
tokens = hypo["tokens"]
tgt_len = tokens.numel()
pos_scores = hypo["positional_scores"].float()
if remove_bos_token:
assert hypo["tokens"][0].item() == target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf"))
if inf_scores.any():
logger.info(
"skipping tokens with inf scores:",
target_dictionary.string(tokens[inf_scores.nonzero()]),
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if output_word_probs or output_word_stats:
w = ""
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(
pos_scores[i].item(), next_prob
)
is_bpe = False
w = ""
if output_word_probs:
logger.info(
str(int(sample_id))
+ " "
+ (
"\t".join(
"{} [{:2f}]".format(x[0], x[1]) for x in word_prob
)
)
)
avg_nll_loss = (
-score_sum / count / math.log(2) if count > 0 else 0
) # convert to base 2
logger.info(
"Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format(
gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0
)
)
if output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
return {
"loss": avg_nll_loss,
"perplexity": 2 ** avg_nll_loss,
} | Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) |
184,058 | import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from omegaconf import DictConfig
def main(cfg: DictConfig):
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args) | null |
184,059 | import logging, os, sys
import time
import torch
from torch import Tensor
from typing import Dict, List, Optional
import copy
from tqdm import tqdm
from omegaconf import open_dict
import fairseq
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq import utils
from fairseq.data import data_utils
import argparse
def write_result(results, output_file):
with open(output_file, 'w') as f:
for line in results:
f.write(line + '\n') | null |
184,060 | import logging, os, sys
import time
import torch
from torch import Tensor
from typing import Dict, List, Optional
import copy
from tqdm import tqdm
from omegaconf import open_dict
import fairseq
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq import utils
from fairseq.data import data_utils
import argparse
logger = logging.getLogger("inference")
def fairseq_generate(data_lines, args, models, task, batch_size, beam_size, device):
# beam search | greedy decoding implemented by fairseq
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
gen_args = copy.copy(args)
with open_dict(gen_args):
gen_args.beam = beam_size
generator = task.build_generator(models, gen_args)
data_size = len(data_lines)
all_results = []
logger.info(f'Fairseq generate batch {batch_size}, beam {beam_size}')
start = time.perf_counter()
for start_idx in tqdm(range(0, data_size, batch_size)):
batch_lines = [line for line in data_lines[start_idx: min(start_idx + batch_size, data_size)]]
batch_ids = [src_dict.encode_line(sentence, add_if_not_exist=False).long() for sentence in batch_lines]
lengths = torch.LongTensor([t.numel() for t in batch_ids])
batch_dataset = task.build_dataset_for_inference(batch_ids, lengths)
batch_dataset.left_pad_source = True
batch = batch_dataset.collater(batch_dataset)
batch = utils.apply_to_sample(lambda t: t.to(device), batch)
translations = generator.generate(models, batch, prefix_tokens=None)
results = []
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
batched_hypos = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
all_results.extend([tgt_dict.string(hypos[0]['tokens']) for hypos in batched_hypos])
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,061 | import logging, os, sys
import time
import torch
from torch import Tensor
from typing import Dict, List, Optional
import copy
from tqdm import tqdm
from omegaconf import open_dict
import fairseq
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq import utils
from fairseq.data import data_utils
import argparse
logger = logging.getLogger("inference")
def forward_decoder(model,
input_tokens,
encoder_out,
incremental_state,
parallel_forward_start_pos=None,
temperature=1.0,
use_log_softmax=True):
def baseline_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200):
# simplified greedy decoding
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
data_size = len(data_lines)
all_results = []
start = time.perf_counter()
logger.info(f'Baseline generate')
for start_idx in tqdm(range(0, data_size)):
bpe_line = data_lines[start_idx]
src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long()
net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device),
'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)}
encoder_out = model.encoder.forward_torchscript(net_input)
incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]],
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}))
tokens = [tgt_dict.eos()]
for step in range(0, max_len):
cur_input_tokens = torch.tensor([tokens]).to(device).long()
# scalar
pred_token = forward_decoder(model,
cur_input_tokens,
encoder_out,
incremental_state,
use_log_softmax=not no_use_logsoft).item()
if pred_token == tgt_dict.eos():
break
else:
tokens.append(pred_token)
all_results.append(tgt_dict.string(tokens[1:]))
delta = time.perf_counter() - start
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,062 | import logging, os, sys
import time
import torch
from torch import Tensor
from typing import Dict, List, Optional
import copy
from tqdm import tqdm
from omegaconf import open_dict
import fairseq
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq import utils
from fairseq.data import data_utils
import argparse
def forward_decoder(model,
input_tokens,
encoder_out,
incremental_state,
parallel_forward_start_pos=None,
temperature=1.0,
use_log_softmax=True):
decoder_out = model.decoder.forward(input_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
parallel_forward_start_pos=parallel_forward_start_pos)
decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1])
if use_log_softmax:
# 1, len, vocab
probs = model.get_normalized_probs(decoder_out_tuple, log_probs=True, sample=None)
else:
probs = decoder_out_tuple[0]
# len
pred_tokens = torch.argmax(probs, dim=-1).squeeze(0)
return pred_tokens
def construct_hash_sets(sent, min_gram=1, max_gram=3):
hash_dict = {}
for i in range(0, len(sent) - min_gram + 1):
for j in range(min_gram, max_gram+1):
if i + j <= len(sent):
ngram = tuple(sent[i: i+j])
if ngram not in hash_dict:
hash_dict[ngram] = []
hash_dict[ngram].append(i+j)
return hash_dict
def find_hash_sets(hash_set, tokens, min_gram=1, max_gram=3):
for i in range(min_gram, max_gram+1):
if len(tokens) < i:
return -1
ngram = tuple(tokens[-i:])
if ngram not in hash_set:
return -1
if len(hash_set[ngram]) == 1:
return hash_set[ngram][0]
return -1
def cut_incremental_state(incremental_state, keep_len, encoder_state_ids):
for n in incremental_state:
if n[: n.index('.')] in encoder_state_ids:
continue
for k in incremental_state[n]:
if incremental_state[n][k] is not None:
if incremental_state[n][k].dim() == 4:
incremental_state[n][k] = incremental_state[n][k][:, :, :keep_len]
elif incremental_state[n][k].dim() == 2:
incremental_state[n][k] = incremental_state[n][k][:, :keep_len]
def aggressive_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200):
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
encoder_state_ids = []
for i in range(len(model.decoder.layers)):
encoder_state_ids.append(model.decoder.layers[i].encoder_attn._incremental_state_id)
data_size = len(data_lines)
all_results = []
start_time = time.perf_counter()
for start_idx in tqdm(range(0, data_size)):
bpe_line = data_lines[start_idx]
src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long()
net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device),
'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)}
encoder_out = model.encoder.forward_torchscript(net_input)
incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]],
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}))
src_tokens_remove_eos_list = src_tokens[:-1].tolist()
src_hash = construct_hash_sets(src_tokens_remove_eos_list)
start = 0
tokens = [tgt_dict.eos()]
while start < len(src_tokens_remove_eos_list) and len(tokens) < max_len + 1:
cur_span_input_tokens = torch.tensor([tokens + src_tokens_remove_eos_list[start:]]).to(device).long()
pred_tokens = forward_decoder(model,
cur_span_input_tokens,
encoder_out,
incremental_state,
parallel_forward_start_pos=len(tokens) - 1,
use_log_softmax=not no_use_logsoft)
pred_judge = pred_tokens.cpu() == src_tokens[start:]
if all(pred_judge):
tokens += src_tokens[start:].tolist()
break
else:
wrong_pos = pred_judge.tolist().index(False)
start += wrong_pos
tokens.extend(pred_tokens.cpu().tolist()[: wrong_pos + 1])
cut_incremental_state(incremental_state, keep_len=len(tokens) - 1, encoder_state_ids=encoder_state_ids)
cur_len = len(tokens)
for step in range(cur_len, max_len + 1):
cur_input_tokens = torch.tensor([tokens]).to(device).long()
pred_token = forward_decoder(model,
cur_input_tokens,
encoder_out,
incremental_state,
use_log_softmax=not no_use_logsoft).item()
if pred_token == tgt_dict.eos():
start = len(src_tokens_remove_eos_list)
break
else:
tokens.append(pred_token)
find_end_idx = find_hash_sets(src_hash, tokens)
if find_end_idx != -1:
start = find_end_idx
if start < len(src_tokens_remove_eos_list):
break
if len(tokens) > max_len + 1:
tokens = tokens[:max_len + 1]
all_results.append(tgt_dict.string(tokens[1:]))
delta = time.perf_counter() - start_time
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,063 | import logging, os, sys
import time
import torch
from torch import Tensor
from typing import Dict, List, Optional
import copy
from tqdm import tqdm
from omegaconf import open_dict
import fairseq
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq import utils
from fairseq.data import data_utils
import argparse
def forward_decoder(model,
input_tokens,
encoder_out,
incremental_state,
parallel_forward_start_pos=None,
temperature=1.0,
use_log_softmax=True):
decoder_out = model.decoder.forward(input_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
parallel_forward_start_pos=parallel_forward_start_pos)
decoder_out_tuple = (decoder_out[0].div_(temperature), decoder_out[1])
if use_log_softmax:
# 1, len, vocab
probs = model.get_normalized_probs(decoder_out_tuple, log_probs=True, sample=None)
else:
probs = decoder_out_tuple[0]
# len
pred_tokens = torch.argmax(probs, dim=-1).squeeze(0)
return pred_tokens
def construct_hash_sets(sent, min_gram=1, max_gram=3):
hash_dict = {}
for i in range(0, len(sent) - min_gram + 1):
for j in range(min_gram, max_gram+1):
if i + j <= len(sent):
ngram = tuple(sent[i: i+j])
if ngram not in hash_dict:
hash_dict[ngram] = []
hash_dict[ngram].append(i+j)
return hash_dict
def find_hash_sets(hash_set, tokens, min_gram=1, max_gram=3):
for i in range(min_gram, max_gram+1):
if len(tokens) < i:
return -1
ngram = tuple(tokens[-i:])
if ngram not in hash_set:
return -1
if len(hash_set[ngram]) == 1:
return hash_set[ngram][0]
return -1
def cut_incremental_state(incremental_state, keep_len, encoder_state_ids):
for n in incremental_state:
if n[: n.index('.')] in encoder_state_ids:
continue
for k in incremental_state[n]:
if incremental_state[n][k] is not None:
if incremental_state[n][k].dim() == 4:
incremental_state[n][k] = incremental_state[n][k][:, :, :keep_len]
elif incremental_state[n][k].dim() == 2:
incremental_state[n][k] = incremental_state[n][k][:, :keep_len]
def paper_aggressive_generate(data_lines, model, task, device, no_use_logsoft=False, max_len=200):
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
encoder_state_ids = []
for i in range(len(model.decoder.layers)):
encoder_state_ids.append(model.decoder.layers[i].encoder_attn._incremental_state_id)
data_size = len(data_lines)
all_results = []
start_time = time.perf_counter()
for start_idx in tqdm(range(0, data_size)):
bpe_line = data_lines[start_idx]
src_tokens = src_dict.encode_line(bpe_line, add_if_not_exist=False).long()
net_input = {'src_tokens': src_tokens.unsqueeze(0).to(device),
'src_lengths': torch.LongTensor([src_tokens.numel()]).to(device)}
encoder_out = model.encoder.forward_torchscript(net_input)
incremental_state = torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]],
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}))
src_tokens_remove_eos_list = src_tokens[:-1].tolist()
src_hash = construct_hash_sets(src_tokens_remove_eos_list)
src_tokens_add_pad_list = torch.tensor(src_tokens_remove_eos_list + [-1]) # [..., -1]
tokens = [tgt_dict.eos()]
while (len(tokens) == 1 or tokens[-1] != tgt_dict.eos()) and len(tokens) < max_len + 1:
if len(tokens) == 1:
find_end_idx = 0
else:
find_end_idx = find_hash_sets(src_hash, tokens)
if find_end_idx != -1 and find_end_idx < len(src_tokens_remove_eos_list):
cur_span_input_tokens = torch.tensor([tokens + src_tokens_remove_eos_list[find_end_idx:]]).to(device).long()
pred_tokens = forward_decoder(model,
cur_span_input_tokens,
encoder_out,
incremental_state,
parallel_forward_start_pos=len(tokens) - 1,
use_log_softmax=not no_use_logsoft)
pred_judge = pred_tokens.cpu() == src_tokens_add_pad_list[find_end_idx:]
wrong_pos = pred_judge.tolist().index(False)
tokens.extend(pred_tokens.cpu().tolist()[: wrong_pos + 1])
cut_incremental_state(incremental_state, keep_len=len(tokens) - 1, encoder_state_ids=encoder_state_ids)
else:
cur_input_tokens = torch.tensor([tokens]).to(device).long()
pred_token = forward_decoder(model,
cur_input_tokens,
encoder_out,
incremental_state,
use_log_softmax=not no_use_logsoft).item()
tokens.append(pred_token)
if len(tokens) > max_len + 1:
tokens = tokens[:max_len + 1]
if tokens[-1] == tgt_dict.eos():
tokens = tokens[:-1]
all_results.append(tgt_dict.string(tokens[1:]))
delta = time.perf_counter() - start_time
remove_bpe_results = [line.replace('@@ ', '') for line in all_results]
return remove_bpe_results, delta | null |
184,075 | import ast
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def post_process(sentence: str, symbol: str):
if symbol == "sentencepiece":
sentence = sentence.replace(" ", "").replace("\u2581", " ").strip()
elif symbol == "wordpiece":
sentence = sentence.replace(" ", "").replace("_", " ").strip()
elif symbol == "letter":
sentence = sentence.replace(" ", "").replace("|", " ").strip()
elif symbol == "silence":
import re
sentence = sentence.replace("<SIL>", "")
sentence = re.sub(' +', ' ', sentence).strip()
elif symbol == "_EOW":
sentence = sentence.replace(" ", "").replace("_EOW", " ").strip()
elif symbol in {"subword_nmt", "@@ ", "@@"}:
if symbol == "subword_nmt":
symbol = "@@ "
sentence = (sentence + " ").replace(symbol, "").rstrip()
elif symbol == "none":
pass
elif symbol is not None:
raise NotImplementedError(f"Unknown post_process option: {symbol}")
return sentence
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
)
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words) | null |
184,078 | import ast
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def main(args, task=None, model_state=None):
def make_parser():
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args) | null |
184,081 | import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args | null |
184,096 | from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
def process(self, input):
def report_result(self):
def wer(self):
def stats(self):
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer() | null |
184,129 | import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
)
def base_monotonic_rchitecture(args):
def transformer_iwslt_de_en(args):
def transformer_monotonic_iwslt_de_en(args):
transformer_iwslt_de_en(args)
base_monotonic_rchitecture(args) | null |
184,130 | import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
)
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
def transformer_monotonic_vaswani_wmt_en_de_big(args):
transformer_vaswani_wmt_en_de_big(args) | null |
184,131 | import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
)
def transformer_monotonic_vaswani_wmt_en_fr_big(args):
transformer_monotonic_vaswani_wmt_en_fr_big(args) | null |
184,132 | import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
)
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
def transformer_unidirectional_iwslt_de_en(args):
transformer_iwslt_de_en(args) | null |
184,133 | import torch
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
An implementation of cumprod to prevent precision issue.
cumprod(x)
= [x1, x1x2, x1x2x3, ....]
= [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
= exp(cumsum(log(x)))
"""
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
The provided code snippet includes necessary dependencies for implementing the `exclusive_cumprod` function. Write a Python function `def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10)` to solve the following problem:
Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
Here is the function:
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
Implementing exclusive cumprod.
There is cumprod in pytorch, however there is no exclusive mode.
cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]
exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
"""
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError("Cumprod on dimension 3 and more is not implemented") | Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] |
184,135 | import torch
The provided code snippet includes necessary dependencies for implementing the `moving_sum` function. Write a Python function `def moving_sum(x, start_idx: int, end_idx: int)` to solve the following problem:
From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]]
Here is the function:
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
# batch_size, 1, src_len
x = x.t().unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum | From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] |
184,136 | import argparse
import json
import sys
from scorers import build_scorer
from tornado import ioloop, web
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 12321
def add_args():
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME,
help='Server hostname')
parser.add_argument('--port', type=int, default=DEFAULT_PORT,
help='Server port number')
args, _ = parser.parse_known_args()
# fmt: on
return args | null |
184,137 | import argparse
import json
import sys
from scorers import build_scorer
from tornado import ioloop, web
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 12321
class EvalSessionHandler(ScorerHandler):
def post(self):
self.scorer.reset()
def get(self):
r = json.dumps(self.scorer.get_info())
self.write(r)
class ResultHandler(ScorerHandler):
def get(self):
r = json.dumps(self.scorer.score())
self.write(r)
class SourceHandler(ScorerHandler):
def get(self):
sent_id = int(self.get_argument("sent_id"))
segment_size = None
if "segment_size" in self.request.arguments:
string = self.get_argument("segment_size")
if len(string) > 0:
segment_size = int(string)
r = json.dumps(self.scorer.send_src(int(sent_id), segment_size))
self.write(r)
class HypothesisHandler(ScorerHandler):
def put(self):
sent_id = int(self.get_argument("sent_id"))
list_of_tokens = self.request.body.decode("utf-8").strip().split()
self.scorer.recv_hyp(sent_id, list_of_tokens)
import sys
sys.setdefaultencoding('utf-8')
def start_server(scorer, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, debug=False):
app = web.Application(
[
(r"/result", ResultHandler, dict(scorer=scorer)),
(r"/src", SourceHandler, dict(scorer=scorer)),
(r"/hypo", HypothesisHandler, dict(scorer=scorer)),
(r"/", EvalSessionHandler, dict(scorer=scorer)),
],
debug=debug,
)
app.listen(port, max_buffer_size=1024 ** 3)
sys.stdout.write(f"Evaluation Server Started. Listening to port {port}\n")
ioloop.IOLoop.current().start() | null |
184,138 | import argparse
from agents import build_agent
from client import SimulSTEvaluationService, SimulSTLocalEvaluationService
from fairseq.registry import REGISTRIES
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 12321
REGISTRIES = {}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hostname", type=str, default=DEFAULT_HOSTNAME, help="server hostname"
)
parser.add_argument(
"--port", type=int, default=DEFAULT_PORT, help="server port number"
)
parser.add_argument("--agent-type", default="simul_trans_text", help="Agent type")
parser.add_argument("--scorer-type", default="text", help="Scorer type")
parser.add_argument(
"--start-idx",
type=int,
default=0,
help="Start index of the sentence to evaluate",
)
parser.add_argument(
"--end-idx",
type=int,
default=float("inf"),
help="End index of the sentence to evaluate",
)
parser.add_argument(
"--scores", action="store_true", help="Request scores from server"
)
parser.add_argument("--reset-server", action="store_true", help="Reset the server")
parser.add_argument(
"--num-threads", type=int, default=10, help="Number of threads used by agent"
)
parser.add_argument(
"--local", action="store_true", default=False, help="Local evaluation"
)
args, _ = parser.parse_known_args()
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
args = parser.parse_args()
return args | null |
184,164 | from typing import NamedTuple, List
from urllib.parse import urlparse
import os, sys
import subprocess
from subprocess import check_call, check_output
import glob
import wget
import re
import multiprocessing as mp
from functools import partial
import pathlib
from collections import OrderedDict
to_data_path = f'{WORKDIR_ROOT}/wmt'
def download_and_extract(
to_folder, lang_pairs, dl_dataset,
to_manually_download_urls,
completed_urls={}, completed_extraction={},
debug=False):
def work_on_wmt(directions, wmt_data):
download_and_extract(
to_data_path,
directions,
wmt_data,
to_manually_download_urls=to_manually_download_urls,
completed_urls=completed_urls, completed_extraction=completed_extraction, debug=True) | null |
184,168 | from fairseq.models import register_model, register_model_architecture
from fairseq.models.multilingual_transformer import MultilingualTransformerModel
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
base_architecture,
)
from .latent_transformer import LatentTransformerDecoder, LatentTransformerEncoder
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
def latent_multilingual_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 24)
args.share_encoders = getattr(args, "share_encoders", True)
args.share_decoders = getattr(args, "share_decoders", True)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", True)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", True)
base_architecture(args) | null |
184,182 | import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
import sentencepiece as sp
from fairseq.data.audio.audio_utils import _get_kaldi_fbank, _get_torchaudio_fbank
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n") | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.