id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
181,599
import argparse from pathlib import Path from typing import Callable, List, Optional, Union import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, EMAConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list def get_parser(desc, default_task="translation"): # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonConfig()) from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): parser.add_argument( "--" + registry_name.replace("_", "-"), default=REGISTRY["default"], choices=REGISTRY["registry"].keys(), ) # Task definitions can be found under fairseq/tasks/ from fairseq.tasks import TASK_REGISTRY parser.add_argument( "--task", metavar="TASK", default=default_task, choices=TASK_REGISTRY.keys(), help="task", ) # fmt: on return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group("dataset_data_loading") gen_parser_from_dataclass(group, DatasetConfig()) # fmt: on return group def add_distributed_training_args(parser, default_world_size=None): group = parser.add_argument_group("distributed_training") if default_world_size is None: default_world_size = max(1, torch.cuda.device_count()) gen_parser_from_dataclass( group, DistributedTrainingConfig(distributed_world_size=default_world_size) ) return group class CommonEvalConfig(FairseqDataclass): path: Optional[str] = field( default=None, metadata={"help": "path(s) to model file(s), colon separated"}, ) post_process: Optional[str] = field( default=None, metadata={ "help": ( "post-process text by removing BPE, letter segmentation, etc. " "Valid options can be found in fairseq.data.utils.post_process." ), "argparse_const": "subword_nmt", "argparse_alias": "--remove-bpe", }, ) quiet: bool = field(default=False, metadata={"help": "only print final scores"}) model_overrides: str = field( default="{}", metadata={ "help": "a dictionary used to override model args at generation that were used during model training" }, ) results_path: Optional[str] = field( default=None, metadata={"help": "path to save eval results (optional)"} ) def gen_parser_from_dataclass( parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False, with_prefix: Optional[str] = None, ) -> None: """ convert a dataclass instance to tailing parser arguments. If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are building a flat namespace from a structured dataclass (see transformer_config.py for example). """ def argparse_name(name: str): if name == "data" and (with_prefix is None or with_prefix == ''): # normally data is positional args, so we don't add the -- nor the prefix return name if name == "_name": # private member, skip return None full_name = "--" + name.replace("_", "-") if with_prefix is not None and with_prefix != '': # if a prefix is specified, construct the prefixed arg name full_name = with_prefix + "-" + full_name[2:] # strip -- when composing return full_name def get_kwargs_from_dc( dataclass_instance: FairseqDataclass, k: str ) -> Dict[str, Any]: """k: dataclass attributes""" kwargs = {} field_type = dataclass_instance._get_type(k) inter_type = interpret_dc_type(field_type) field_default = dataclass_instance._get_default(k) if isinstance(inter_type, type) and issubclass(inter_type, Enum): field_choices = [t.value for t in list(inter_type)] else: field_choices = None field_help = dataclass_instance._get_help(k) field_const = dataclass_instance._get_argparse_const(k) if isinstance(field_default, str) and field_default.startswith("${"): kwargs["default"] = field_default else: if field_default is MISSING: kwargs["required"] = True if field_choices is not None: kwargs["choices"] = field_choices if ( isinstance(inter_type, type) and (issubclass(inter_type, List) or issubclass(inter_type, Tuple)) ) or ("List" in str(inter_type) or "Tuple" in str(inter_type)): if "int" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, int) elif "float" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, float) elif "str" in str(inter_type): kwargs["type"] = lambda x: eval_str_list(x, str) else: raise NotImplementedError( "parsing of type " + str(inter_type) + " is not implemented" ) if field_default is not MISSING: kwargs["default"] = ( ",".join(map(str, field_default)) if field_default is not None else None ) elif ( isinstance(inter_type, type) and issubclass(inter_type, Enum) ) or "Enum" in str(inter_type): kwargs["type"] = str if field_default is not MISSING: if isinstance(field_default, Enum): kwargs["default"] = field_default.value else: kwargs["default"] = field_default elif inter_type is bool: kwargs["action"] = ( "store_false" if field_default is True else "store_true" ) kwargs["default"] = field_default else: kwargs["type"] = inter_type if field_default is not MISSING: kwargs["default"] = field_default # build the help with the hierarchical prefix if with_prefix is not None and with_prefix != '' and field_help is not None: field_help = with_prefix[2:] + ': ' + field_help kwargs["help"] = field_help if field_const is not None: kwargs["const"] = field_const kwargs["nargs"] = "?" return kwargs for k in dataclass_instance._get_all_attributes(): field_name = argparse_name(dataclass_instance._get_name(k)) field_type = dataclass_instance._get_type(k) if field_name is None: continue elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass): # for fields that are of type FairseqDataclass, we can recursively # add their fields to the namespace (so we add the args from model, task, etc. to the root namespace) prefix = None if with_prefix is not None: # if a prefix is specified, then we don't want to copy the subfields directly to the root namespace # but we prefix them with the name of the current field. prefix = field_name gen_parser_from_dataclass(parser, field_type(), delete_default, prefix) continue kwargs = get_kwargs_from_dc(dataclass_instance, k) field_args = [field_name] alias = dataclass_instance._get_argparse_alias(k) if alias is not None: field_args.append(alias) if "default" in kwargs: if isinstance(kwargs["default"], str) and kwargs["default"].startswith( "${" ): if kwargs["help"] is None: # this is a field with a name that will be added elsewhere continue else: del kwargs["default"] if delete_default and "default" in kwargs: del kwargs["default"] try: parser.add_argument(*field_args, **kwargs) except ArgumentError: pass def get_validation_parser(default_task=None): parser = get_parser("Validation", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser, default_world_size=1) group = parser.add_argument_group("Evaluation") gen_parser_from_dataclass(group, CommonEvalConfig()) return parser
null
181,600
import argparse from pathlib import Path from typing import Callable, List, Optional, Union import torch from fairseq import utils from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, EvalLMConfig, GenerationConfig, InteractiveConfig, OptimizationConfig, EMAConfig, ) from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list def get_training_parser(default_task="translation"): parser = get_parser("Trainer", default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) add_ema_args(parser) add_deepspeed_args(parser) return parser def parse_args_and_arch( parser: argparse.ArgumentParser, input_args: List[str] = None, parse_known: bool = False, suppress_defaults: bool = False, modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None, ): """ Args: parser (ArgumentParser): the parser input_args (List[str]): strings to parse, defaults to sys.argv parse_known (bool): only parse known arguments, similar to `ArgumentParser.parse_known_args` suppress_defaults (bool): parse while ignoring all default values modify_parser (Optional[Callable[[ArgumentParser], None]]): function to modify the parser, e.g., to set default values """ if suppress_defaults: # Parse args without any default values. This requires us to parse # twice, once to identify all the necessary task/model args, and a second # time with all defaults set to None. args = parse_args_and_arch( parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False, ) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace( **{k: v for k, v in vars(args).items() if v is not None} ) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY # Before creating the true parser, we need to import optional user module # in order to eagerly import custom tasks, optimizers, architectures, etc. usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument("--user-dir", default=None) usr_args, _ = usr_parser.parse_known_args(input_args) utils.import_user_module(usr_args) if modify_parser is not None: modify_parser(parser) # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, "arch"): model_specific_group = parser.add_argument_group( "Model-specific configuration", # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) if args.arch in ARCH_MODEL_REGISTRY: ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) elif args.arch in MODEL_REGISTRY: MODEL_REGISTRY[args.arch].add_args(model_specific_group) else: raise RuntimeError() if hasattr(args, "task"): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, "use_bmuf", False): # hack to support extra args for block distributed data parallelism from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) # Add *-specific args to parser. from fairseq.registry import REGISTRIES for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) elif hasattr(cls, "__dataclass"): gen_parser_from_dataclass(parser, cls.__dataclass()) # Modify the parser a second time, since defaults may have been reset if modify_parser is not None: modify_parser(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if ( hasattr(args, "batch_size_valid") and args.batch_size_valid is None ) or not hasattr(args, "batch_size_valid"): args.batch_size_valid = args.batch_size if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None: args.max_tokens_valid = args.max_tokens if getattr(args, "memory_efficient_fp16", False): args.fp16 = True if getattr(args, "memory_efficient_bf16", False): args.bf16 = True args.tpu = getattr(args, "tpu", False) args.bf16 = getattr(args, "bf16", False) if args.bf16: args.tpu = True if args.tpu and args.fp16: raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs") if getattr(args, "seed", None) is None: args.seed = 1 # default seed for training args.no_seed_provided = True else: args.no_seed_provided = False if getattr(args, "update_epoch_batch_itr", None) is None: if hasattr(args, "grouped_shuffling"): args.update_epoch_batch_itr = args.grouped_shuffling else: args.grouped_shuffling = False args.update_epoch_batch_itr = False # Apply architecture configuration. if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY: ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_args( data: Union[str, Path], task: str = "translation", arch: str = "transformer", **overrides ): parser = get_training_parser(task) args = parse_args_and_arch(parser, [str(data), "--task", task, "--arch", arch]) for k, v in overrides.items(): setattr(args, k, v) return args
null
181,605
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder from fairseq.models import register_model, register_model_architecture from fairseq.models.roberta import ( roberta_base_architecture, roberta_prenorm_architecture, RobertaEncoder, RobertaModel, ) from fairseq.modules import LayerNorm def base_architecture(args): def model_parallel_roberta_base_architecture(args): base_architecture(args)
null
181,610
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding import math import numpy as np def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.decoder_layers = getattr(args, "decoder_layers", 6) base_architecture(args) def transformer_iwslt_de_en_dist(args): transformer_iwslt_de_en(args)
null
181,611
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( Embedding, TransformerDecoderEmbedding, TransformerDecoderLayer, TransformerDecoderOutputLayer, TransformerEncoderEmbedding, TransformerEncoderLayer, TransformerEncoderLayerNorm, ) from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.models.transformer import ( base_architecture, transformer_iwslt_de_en, transformer_wmt_en_de_big, ) from fairseq.modules import SinusoidalPositionalEmbedding import math import numpy as np def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) def transformer_wmt_en_de_big_dist(args): transformer_wmt_en_de_big(args)
null
181,615
import logging from hydra.core.config_store import ConfigStore from fairseq.dataclass.configs import FairseqConfig from omegaconf import DictConfig, OmegaConf class FairseqConfig(FairseqDataclass): common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() optimization: OptimizationConfig = OptimizationConfig() checkpoint: CheckpointConfig = CheckpointConfig() bmuf: FairseqBMUFConfig = FairseqBMUFConfig() generation: GenerationConfig = GenerationConfig() eval_lm: EvalLMConfig = EvalLMConfig() interactive: InteractiveConfig = InteractiveConfig() model: Any = MISSING task: Any = None criterion: Any = None optimizer: Any = None lr_scheduler: Any = None scoring: Any = None bpe: Any = None tokenizer: Any = None ema: EMAConfig = EMAConfig() REGISTRIES = {} TASK_DATACLASS_REGISTRY = {} MODEL_DATACLASS_REGISTRY = {} ARCH_MODEL_NAME_REGISTRY = {} def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True): if remove_missing: if is_dataclass(dc): target_keys = set(dc.__dataclass_fields__.keys()) else: target_keys = set(dc.keys()) with open_dict(cfg): for k in list(cfg.keys()): if k not in target_keys: del cfg[k] merged_cfg = OmegaConf.merge(dc, cfg) merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"] OmegaConf.set_struct(merged_cfg, True) return merged_cfg The provided code snippet includes necessary dependencies for implementing the `add_defaults` function. Write a Python function `def add_defaults(cfg: DictConfig) -> None` to solve the following problem: This function adds default values that are stored in dataclasses that hydra doesn't know about Here is the function: def add_defaults(cfg: DictConfig) -> None: """This function adds default values that are stored in dataclasses that hydra doesn't know about""" from fairseq.registry import REGISTRIES from fairseq.tasks import TASK_DATACLASS_REGISTRY from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY from fairseq.dataclass.utils import merge_with_parent from typing import Any OmegaConf.set_struct(cfg, False) for k, v in FairseqConfig.__dataclass_fields__.items(): field_cfg = cfg.get(k) if field_cfg is not None and v.type == Any: dc = None if isinstance(field_cfg, str): field_cfg = DictConfig({"_name": field_cfg}) field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"] name = getattr(field_cfg, "_name", None) if k == "task": dc = TASK_DATACLASS_REGISTRY.get(name) elif k == "model": name = ARCH_MODEL_NAME_REGISTRY.get(name, name) dc = MODEL_DATACLASS_REGISTRY.get(name) elif k in REGISTRIES: dc = REGISTRIES[k]["dataclass_registry"].get(name) if dc is not None: cfg[k] = merge_with_parent(dc, field_cfg)
This function adds default values that are stored in dataclasses that hydra doesn't know about
181,617
import ast import inspect import logging import os import re from argparse import ArgumentError, ArgumentParser, Namespace from dataclasses import _MISSING_TYPE, MISSING, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import FairseqConfig from hydra.core.global_hydra import GlobalHydra from hydra.experimental import compose, initialize from omegaconf import DictConfig, OmegaConf, open_dict, _utils logger = logging.getLogger(__name__) def _set_legacy_defaults(args, cls): """Helper to set default arguments based on *add_args*.""" if not hasattr(cls, "add_args"): return import argparse parser = argparse.ArgumentParser( argument_default=argparse.SUPPRESS, allow_abbrev=False ) cls.add_args(parser) # copied from argparse.py: defaults = argparse.Namespace() for action in parser._actions: if action.dest is not argparse.SUPPRESS: if not hasattr(defaults, action.dest): if action.default is not argparse.SUPPRESS: setattr(defaults, action.dest, action.default) for key, default_value in vars(defaults).items(): if not hasattr(args, key): setattr(args, key, default_value) def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]: """use the field in args to overrides those in cfg""" overrides = [] deletes = [] for k in FairseqConfig.__dataclass_fields__.keys(): overrides.extend( _override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args) ) if args is not None: if hasattr(args, "task"): from fairseq.tasks import TASK_DATACLASS_REGISTRY migrate_registry( "task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes ) else: deletes.append("task") # these options will be set to "None" if they have not yet been migrated # so we can populate them with the entire flat args CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"} from fairseq.registry import REGISTRIES for k, v in REGISTRIES.items(): if hasattr(args, k): migrate_registry( k, getattr(args, k), v["dataclass_registry"], args, overrides, deletes, use_name_as_val=k not in CORE_REGISTRIES, ) else: deletes.append(k) no_dc = True if hasattr(args, "arch"): from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY if args.arch in ARCH_MODEL_REGISTRY: m_cls = ARCH_MODEL_REGISTRY[args.arch] dc = getattr(m_cls, "__dataclass", None) if dc is not None: m_name = ARCH_MODEL_NAME_REGISTRY[args.arch] overrides.append("model={}".format(m_name)) overrides.append("model._name={}".format(args.arch)) # override model params with those exist in args overrides.extend(_override_attr("model", dc, args)) no_dc = False if no_dc: deletes.append("model") return overrides, deletes class omegaconf_no_object_check: def __init__(self): self.old_is_primitive = _utils.is_primitive_type def __enter__(self): _utils.is_primitive_type = lambda _: True def __exit__(self, type, value, traceback): _utils.is_primitive_type = self.old_is_primitive ARCH_MODEL_REGISTRY = {} TASK_REGISTRY = {} The provided code snippet includes necessary dependencies for implementing the `convert_namespace_to_omegaconf` function. Write a Python function `def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig` to solve the following problem: Convert a flat argparse.Namespace to a structured DictConfig. Here is the function: def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg
Convert a flat argparse.Namespace to a structured DictConfig.
181,618
import ast import inspect import logging import os import re from argparse import ArgumentError, ArgumentParser, Namespace from dataclasses import _MISSING_TYPE, MISSING, is_dataclass from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import FairseqConfig from hydra.core.global_hydra import GlobalHydra from hydra.experimental import compose, initialize from omegaconf import DictConfig, OmegaConf, open_dict, _utils REGISTRIES = {} def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]): # this will be deprecated when we get rid of argparse and model_overrides logic from fairseq.registry import REGISTRIES with open_dict(cfg): for k in cfg.keys(): # "k in cfg" will return false if its a "mandatory value (e.g. ???)" if k in cfg and isinstance(cfg[k], DictConfig): if k in overrides and isinstance(overrides[k], dict): for ok, ov in overrides[k].items(): if isinstance(ov, dict) and cfg[k][ok] is not None: overwrite_args_by_name(cfg[k][ok], ov) else: cfg[k][ok] = ov else: overwrite_args_by_name(cfg[k], overrides) elif k in cfg and isinstance(cfg[k], Namespace): for override_key, val in overrides.items(): setattr(cfg[k], override_key, val) elif k in overrides: if ( k in REGISTRIES and overrides[k] in REGISTRIES[k]["dataclass_registry"] ): cfg[k] = DictConfig( REGISTRIES[k]["dataclass_registry"][overrides[k]] ) overwrite_args_by_name(cfg[k], overrides) cfg[k]._name = overrides[k] else: cfg[k] = overrides[k]
null
181,621
import multiprocessing import os import pdb import sys class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if _stdin_fd is not None: if not _stdin[0]: _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak import pdb import sys sys.modules["fairseq.distributed_utils"] = distributed_utils sys.modules["fairseq.meters"] = meters sys.modules["fairseq.metrics"] = metrics sys.modules["fairseq.progress_bar"] = progress_bar def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
null
181,626
import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict class DistributedTrainingConfig(FairseqDataclass): distributed_world_size: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of GPUs across all nodes (default: all visible GPUs)" }, ) distributed_num_procs: Optional[int] = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of processes to fork (default: all visible GPUs)" }, ) distributed_rank: Optional[int] = field( default=0, metadata={"help": "rank of the current worker"} ) distributed_backend: str = field( default="nccl", metadata={"help": "distributed backend"} ) distributed_init_method: Optional[str] = field( default=None, metadata={ "help": "typically tcp://hostname:port that will be used to " "establish initial connetion" }, ) distributed_port: int = field( default=-1, metadata={ "help": "port number (not required if using --distributed-init-method)" }, ) device_id: int = field( default=0, metadata={ "help": "which GPU to use (usually configured automatically)", "argparse_alias": "--local_rank", }, ) distributed_no_spawn: bool = field( default=False, metadata={ "help": "do not spawn multiple processes even if multiple GPUs are visible" }, ) ddp_backend: DDP_BACKEND_CHOICES = field( default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} ) ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( default="none", metadata={"help": "communication hook"} ) bucket_cap_mb: int = field( default=25, metadata={"help": "bucket size for reduction"} ) fix_batches_to_gpus: bool = field( default=False, metadata={ "help": "don't shuffle batches between GPUs; this reduces overall " "randomness and may affect precision but avoids the cost of re-reading the data" }, ) find_unused_parameters: bool = field( default=False, metadata={ "help": "disable unused parameter detection (not applicable to " "--ddp-backend=legacy_ddp)" }, ) gradient_as_bucket_view: bool = field( default=False, metadata={ "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " "--gradient-as-bucket-view=gradient_as_bucket_view)" }, ) fast_stat_sync: bool = field( default=False, metadata={"help": "[deprecated] this is now defined per Criterion"}, ) heartbeat_timeout: int = field( default=-1, metadata={ "help": "kill the job if no progress is made in N seconds; " "set to -1 to disable" }, ) broadcast_buffers: bool = field( default=False, metadata={ "help": "Copy non-trainable parameters between GPUs, such as " "batchnorm population statistics" }, ) slowmo_momentum: Optional[float] = field( default=None, metadata={ "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" }, ) slowmo_algorithm: str = field( default="LocalSGD", metadata={"help": "whether to use LocalSGD or SGP"} ) localsgd_frequency: int = field( default=3, metadata={"help": "Local SGD allreduce frequency"} ) nprocs_per_node: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "number of GPUs in each node. An allreduce operation across GPUs in " "a node is very fast. Hence, we do allreduce across GPUs in a node, " "and gossip across different nodes" }, ) pipeline_model_parallel: bool = field( default=False, metadata={"help": "if set, use pipeline model parallelism across GPUs"}, ) pipeline_balance: Optional[str] = field( default=None, metadata={ "help": "partition the model into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_balance) " "should equal the total number of layers in the model" }, ) pipeline_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-balance argument" }, ) pipeline_chunks: Optional[int] = field( default=0, metadata={"help": "microbatch count for pipeline model parallelism"} ) pipeline_encoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_encoder_balance) " "should equal the total number of encoder layers in the model" }, ) pipeline_encoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-encoder-balance argument" }, ) pipeline_decoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_decoder_balance) " "should equal the total number of decoder layers in the model" }, ) pipeline_decoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-decoder-balance argument" }, ) pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( default="never", metadata={"help": "checkpointing mode for pipeline model parallelism"}, ) zero_sharding: ZERO_SHARDING_CHOICES = field( default="none", metadata={"help": "ZeRO sharding"} ) fp16: bool = II("common.fp16") memory_efficient_fp16: bool = II("common.memory_efficient_fp16") tpu: bool = II("common.tpu") # configuration for --ddp-backend=fully_sharded no_reshard_after_forward: bool = field( default=False, metadata={"help": "don't reshard parameters after forward pass"}, ) fp32_reduce_scatter: bool = field( default=False, metadata={"help": "reduce-scatter grads in FP32"}, ) cpu_offload: bool = field( default=False, metadata={"help": "offload FP32 params to CPU"} ) use_sharded_state: bool = field( default=False, metadata={"help": "use sharded checkpoint files"}, ) def is_local_master(cfg: DistributedTrainingConfig): # local master rank return cfg.distributed_rank % cfg.nprocs_per_node == 0
null
181,627
import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict try: import torch_xla.core.xla_model as xm except ImportError: xm = None def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False): if cfg.distributed_init_method is not None or cfg.tpu: return num_pipelines_per_node = None if cfg.pipeline_model_parallel: num_pipeline_devices, num_pipelines_per_node = _pipeline_parallel_pre_init(cfg) if all( key in os.environ for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"] ): # support torch.distributed.launch _infer_torch_distributed_launch_init(cfg) elif cfg.distributed_port > 0: # we can determine the init method automatically for Slurm _infer_slurm_init(cfg, num_pipelines_per_node) elif cfg.distributed_world_size > 1 or force_distributed: # fallback for single node with multiple GPUs _infer_single_node_init(cfg) if cfg.pipeline_model_parallel: _pipeline_parallel_post_init(cfg, num_pipeline_devices, num_pipelines_per_node) elif not cfg.distributed_no_spawn: with open_dict(cfg): cfg.distributed_num_procs = min( torch.cuda.device_count(), cfg.distributed_world_size ) def distributed_main(i, main, cfg: FairseqConfig, kwargs): cfg.distributed_training.device_id = i if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i cfg.distributed_training.distributed_rank = distributed_init(cfg) after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None) if after_distributed_init_fn: cfg = after_distributed_init_fn(cfg) main(cfg, **kwargs) if torch.distributed.is_initialized(): torch.distributed.barrier(get_global_group()) class FairseqConfig(FairseqDataclass): common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() optimization: OptimizationConfig = OptimizationConfig() checkpoint: CheckpointConfig = CheckpointConfig() bmuf: FairseqBMUFConfig = FairseqBMUFConfig() generation: GenerationConfig = GenerationConfig() eval_lm: EvalLMConfig = EvalLMConfig() interactive: InteractiveConfig = InteractiveConfig() model: Any = MISSING task: Any = None criterion: Any = None optimizer: Any = None lr_scheduler: Any = None scoring: Any = None bpe: Any = None tokenizer: Any = None ema: EMAConfig = EMAConfig() def call_main(cfg: FairseqConfig, main, **kwargs): if cfg.distributed_training.distributed_init_method is None: infer_init_method(cfg.distributed_training) if cfg.distributed_training.distributed_init_method is not None: # distributed training if not cfg.distributed_training.distributed_no_spawn: start_rank = cfg.distributed_training.distributed_rank cfg.distributed_training.distributed_rank = None # assign automatically kwargs["start_rank"] = start_rank torch.multiprocessing.spawn( fn=distributed_main, args=(main, cfg, kwargs), nprocs=min( torch.cuda.device_count(), cfg.distributed_training.distributed_world_size, ), join=True, ) else: distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs) elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1: import torch_xla.distributed.xla_multiprocessing as xmp torch.multiprocessing.set_sharing_strategy("file_system") xmp.spawn( fn=distributed_main, args=(main, cfg, kwargs), # tpu-comment: # 8 devices in one TPU VM, is the max processes to be spawned. # The rest is driven by xm.distributed.xla_dist nprocs=min(cfg.distributed_training.distributed_world_size, 8), ) else: # single GPU main main(cfg, **kwargs)
null
181,636
import contextlib from typing import Optional import torch from fairseq.dataclass.configs import DistributedTrainingConfig from fairseq.distributed import utils as dist_utils try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP has_FSDP = True except ImportError: FSDP = torch.nn.Module has_FSDP = False class FullyShardedDataParallel(FSDP): """ A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some fairseq-specific checkpoint saving/loading logic. Args: use_sharded_state (bool): if True, then ``state_dict`` will return ``FSDP.local_state_dict`` and ``load_state_dict`` will call ``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will return the full model weights on data parallel rank 0 (empty on other ranks) and ``load_state_dict`` will broadcast model weights from rank 0 to other ranks. """ def __init__(self, *args, use_sharded_state: bool = False, **kwargs): if not has_FSDP: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) super().__init__(*args, **kwargs) self.use_sharded_state = use_sharded_state def unwrapped_module(self) -> torch.nn.Module: if self.flatten_parameters: return self.module.module else: return self.module def state_dict(self, destination=None, prefix="", keep_vars=False): if self.use_sharded_state: return super().local_state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: if self.rank == 0: return super().state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: # We must call state_dict() due to use of communication # primitives. But we don't use the result. super().state_dict() return destination or {} def load_state_dict(self, state_dict, strict=True, model_cfg=None): if self.use_sharded_state: return super().load_local_state_dict(state_dict, strict=strict) else: state_dict = dist_utils.broadcast_object( state_dict, src_rank=0, group=self.process_group ) return super().load_state_dict(state_dict, strict=strict) class DistributedTrainingConfig(FairseqDataclass): distributed_world_size: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of GPUs across all nodes (default: all visible GPUs)" }, ) distributed_num_procs: Optional[int] = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "total number of processes to fork (default: all visible GPUs)" }, ) distributed_rank: Optional[int] = field( default=0, metadata={"help": "rank of the current worker"} ) distributed_backend: str = field( default="nccl", metadata={"help": "distributed backend"} ) distributed_init_method: Optional[str] = field( default=None, metadata={ "help": "typically tcp://hostname:port that will be used to " "establish initial connetion" }, ) distributed_port: int = field( default=-1, metadata={ "help": "port number (not required if using --distributed-init-method)" }, ) device_id: int = field( default=0, metadata={ "help": "which GPU to use (usually configured automatically)", "argparse_alias": "--local_rank", }, ) distributed_no_spawn: bool = field( default=False, metadata={ "help": "do not spawn multiple processes even if multiple GPUs are visible" }, ) ddp_backend: DDP_BACKEND_CHOICES = field( default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} ) ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( default="none", metadata={"help": "communication hook"} ) bucket_cap_mb: int = field( default=25, metadata={"help": "bucket size for reduction"} ) fix_batches_to_gpus: bool = field( default=False, metadata={ "help": "don't shuffle batches between GPUs; this reduces overall " "randomness and may affect precision but avoids the cost of re-reading the data" }, ) find_unused_parameters: bool = field( default=False, metadata={ "help": "disable unused parameter detection (not applicable to " "--ddp-backend=legacy_ddp)" }, ) gradient_as_bucket_view: bool = field( default=False, metadata={ "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " "--gradient-as-bucket-view=gradient_as_bucket_view)" }, ) fast_stat_sync: bool = field( default=False, metadata={"help": "[deprecated] this is now defined per Criterion"}, ) heartbeat_timeout: int = field( default=-1, metadata={ "help": "kill the job if no progress is made in N seconds; " "set to -1 to disable" }, ) broadcast_buffers: bool = field( default=False, metadata={ "help": "Copy non-trainable parameters between GPUs, such as " "batchnorm population statistics" }, ) slowmo_momentum: Optional[float] = field( default=None, metadata={ "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" }, ) slowmo_algorithm: str = field( default="LocalSGD", metadata={"help": "whether to use LocalSGD or SGP"} ) localsgd_frequency: int = field( default=3, metadata={"help": "Local SGD allreduce frequency"} ) nprocs_per_node: int = field( default=max(1, torch.cuda.device_count()), metadata={ "help": "number of GPUs in each node. An allreduce operation across GPUs in " "a node is very fast. Hence, we do allreduce across GPUs in a node, " "and gossip across different nodes" }, ) pipeline_model_parallel: bool = field( default=False, metadata={"help": "if set, use pipeline model parallelism across GPUs"}, ) pipeline_balance: Optional[str] = field( default=None, metadata={ "help": "partition the model into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_balance) " "should equal the total number of layers in the model" }, ) pipeline_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-balance argument" }, ) pipeline_chunks: Optional[int] = field( default=0, metadata={"help": "microbatch count for pipeline model parallelism"} ) pipeline_encoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_encoder_balance) " "should equal the total number of encoder layers in the model" }, ) pipeline_encoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-encoder-balance argument" }, ) pipeline_decoder_balance: Optional[str] = field( default=None, metadata={ "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " "contains N_i layers. The sum(args.pipeline_decoder_balance) " "should equal the total number of decoder layers in the model" }, ) pipeline_decoder_devices: Optional[str] = field( default=None, metadata={ "help": "a list of device indices indicating which device to place " "each of the N_K partitions. The length of this list should " "equal the length of the --pipeline-decoder-balance argument" }, ) pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( default="never", metadata={"help": "checkpointing mode for pipeline model parallelism"}, ) zero_sharding: ZERO_SHARDING_CHOICES = field( default="none", metadata={"help": "ZeRO sharding"} ) fp16: bool = II("common.fp16") memory_efficient_fp16: bool = II("common.memory_efficient_fp16") tpu: bool = II("common.tpu") # configuration for --ddp-backend=fully_sharded no_reshard_after_forward: bool = field( default=False, metadata={"help": "don't reshard parameters after forward pass"}, ) fp32_reduce_scatter: bool = field( default=False, metadata={"help": "reduce-scatter grads in FP32"}, ) cpu_offload: bool = field( default=False, metadata={"help": "offload FP32 params to CPU"} ) use_sharded_state: bool = field( default=False, metadata={"help": "use sharded checkpoint files"}, ) def fsdp_enable_wrap(cfg: DistributedTrainingConfig): try: from fairscale.nn import enable_wrap except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if cfg.memory_efficient_fp16: assert cfg.fp16 # memory_efficient_fp16 should imply fp16 group = dist_utils.get_data_parallel_group() if group is None and cfg.distributed_world_size == 1: from fairscale.utils.testing import DummyProcessGroup group = DummyProcessGroup(rank=0, size=1) fsdp_config = { "process_group": group, "reshard_after_forward": not cfg.no_reshard_after_forward, "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, "fp32_reduce_scatter": cfg.fp32_reduce_scatter, "flatten_parameters": not cfg.not_fsdp_flatten_parameters, "cpu_offload": cfg.cpu_offload, "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, "bucket_cap_mb": cfg.bucket_cap_mb, "state_dict_device": torch.device("cpu"), # reduce GPU mem usage } with enable_wrap( wrapper_cls=FullyShardedDataParallel, use_sharded_state=cfg.use_sharded_state, **fsdp_config, ): yield
null
181,651
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface def base_architecture(args): def safe_getattr(obj, k, default=None): def roberta_prenorm_architecture(args): args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True) base_architecture(args)
null
181,652
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder from fairseq.modules import LayerNorm from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_getattr, safe_hasattr from .hub_interface import RobertaHubInterface def base_architecture(args): def roberta_base_architecture(args): base_architecture(args)
null
181,660
from typing import Optional import logging import torch import torch.nn as nn from fairseq import utils from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from .hub_interface import BARTHubInterface def mbart_base_architecture(args): def mbart_base_wmt20_architecture(args): args.layernorm_embedding = getattr(args, "layernorm_embedding", False) mbart_base_architecture(args)
null
181,661
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_baevski_wiki103(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 16) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.dropout = safe_getattr(args, "dropout", 0.3) args.adaptive_input = safe_getattr(args, "adaptive_input", True) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) args.adaptive_input_cutoff = safe_getattr( args, "adaptive_input_cutoff", "20000,60000" ) args.adaptive_softmax_cutoff = safe_getattr( args, "adaptive_softmax_cutoff", "20000,60000" ) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) transformer_lm_big(args)
null
181,662
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def transformer_lm_big(args): args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) transformer_lm_big(args)
null
181,663
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
181,664
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_small(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
181,665
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_tiny(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) args.decoder_layers = safe_getattr(args, "decoder_layers", 2) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
181,666
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_medium(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) args.decoder_layers = safe_getattr(args, "decoder_layers", 36) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
181,667
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_lm_architecture(args): # backward compatibility for older model checkpoints if safe_hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if safe_hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = safe_getattr(args, "decoder_layers", 6) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.activation_fn = safe_getattr(args, "activation_fn", "relu") args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) args.base_layers = safe_getattr(args, "base_layers", 0) args.base_sublayers = safe_getattr(args, "base_sublayers", 1) args.base_shuffle = safe_getattr(args, "base_shuffle", False) args.add_bos_token = safe_getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = safe_getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = safe_getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = safe_getattr(args, "character_embeddings", False) args.decoder_output_dim = safe_getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = safe_getattr( args, "decoder_input_dim", args.decoder_embed_dim ) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) args.adaptive_input = safe_getattr(args, "adaptive_input", False) args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) args.offload_activations = safe_getattr(args, "offload_activations", False) args.scale_fc = safe_getattr(args, "scale_fc", False) args.scale_attn = safe_getattr(args, "scale_attn", False) args.scale_heads = safe_getattr(args, "scale_heads", False) args.scale_resids = safe_getattr(args, "scale_resids", False) if args.offload_activations: args.checkpoint_activations = True def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt2_big(args): args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) args.decoder_layers = safe_getattr(args, "decoder_layers", 48) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") base_lm_architecture(args)
null
181,668
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) base_gpt3_architecture(args)
null
181,669
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args)
null
181,670
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_large(args): # 760M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) base_gpt3_architecture(args)
null
181,671
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): def safe_getattr(obj, k, default=None): def transformer_lm_gpt3_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
181,672
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_2_7(args): # 2.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
181,673
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): def safe_getattr(obj, k, default=None): def transformer_lm_gpt3_6_7(args): # 6.7B params args.decoder_layers = safe_getattr(args, "decoder_layers", 32) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) base_gpt3_architecture(args)
null
181,674
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_13(args): # 13B params args.decoder_layers = safe_getattr(args, "decoder_layers", 40) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) base_gpt3_architecture(args)
null
181,675
from dataclasses import dataclass, field from typing import Optional from fairseq import options, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder, ) from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder from fairseq.utils import safe_getattr, safe_hasattr from omegaconf import II def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr( args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4 ) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def transformer_lm_gpt3_175(args): # 175B params args.decoder_layers = safe_getattr(args, "decoder_layers", 96) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) base_gpt3_architecture(args)
null
181,678
from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerModel, base_architecture, transformer_wmt_en_de_big, ) def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) def transformer_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) args.full_context_alignment = getattr(args, "full_context_alignment", False) base_architecture(args)
null
181,679
from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( TransformerModel, base_architecture, transformer_wmt_en_de_big, ) def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, "attention_dropout", 0.1) transformer_vaswani_wmt_en_de_big(args) def transformer_wmt_en_de_big_align(args): args.alignment_heads = getattr(args, "alignment_heads", 1) args.alignment_layer = getattr(args, "alignment_layer", 4) transformer_wmt_en_de_big(args)
null
181,689
import torch from fairseq.utils import new_arange def load_libnat(): def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat, use_cuda = load_libnat() def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): in_masks = in_tokens.ne(padding_idx) out_masks = out_tokens.ne(padding_idx) mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( out_tokens.int(), libnat.levenshtein_distance( in_tokens.int(), out_tokens.int(), in_masks.sum(1).int(), out_masks.sum(1).int(), ), ) masked_tgt_masks = masked_tgt_masks.bool() & out_masks mask_ins_targets = mask_ins_targets.type_as(in_tokens)[ :, 1 : in_masks.size(1) ].masked_fill_(~in_masks[:, 1:], 0) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets if use_cuda: return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
null
181,693
import torch from fairseq.utils import new_arange def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1] out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering) return out_tokens, out_scores, out_attn
null
181,703
import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules.transformer_sentence_encoder import init_bert_params def base_architecture(args): def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
null
181,707
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder from fairseq.models.transformer import Embedding from fairseq.modules import TransformerDecoderLayer from fairseq.modules.transformer_sentence_encoder import init_bert_params from .levenshtein_utils import ( _apply_del_words, _apply_ins_masks, _apply_ins_words, _fill, _get_del_targets, _get_ins_targets, _skip, _skip_encoder_out, ) def levenshtein_base_architecture(args): def levenshtein_transformer_wmt_en_de(args): levenshtein_base_architecture(args)
null
181,732
from fairseq import utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder from fairseq.utils import safe_hasattr def base_lm_architecture(args): def fconv_lm_dauphin_gbw(args): layers = "[(512, 5)]" layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) args.decoder_layers = getattr(args, "decoder_layers", layers) args.decoder_attention = getattr(args, "decoder_attention", "False") args.adaptive_softmax_cutoff = getattr( args, "adaptive_softmax_cutoff", "10000,50000,200000" ) base_lm_architecture(args)
null
181,736
import logging import math from pathlib import Path from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from torch import Tensor from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.speech_to_text.hub_interface import S2THubInterface from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) def s2t_transformer_s(args): def s2t_transformer_xs(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 3) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.dropout = getattr(args, "dropout", 0.3) s2t_transformer_s(args)
null
181,739
import logging import math from pathlib import Path from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from torch import Tensor from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.speech_to_text.hub_interface import S2THubInterface from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) def s2t_transformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) base_architecture(args) def s2t_transformer_lp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_l(args)
null
181,774
import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, FairseqDropout, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) def base_architecture(args): def fconv_wmt_en_fr(args): convs = "[(512, 3)] * 6" # first 6 layers have 512 units convs += " + [(768, 3)] * 4" # next 4 layers have 768 units convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_layers = getattr(args, "encoder_layers", convs) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) args.decoder_layers = getattr(args, "decoder_layers", convs) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) base_architecture(args)
null
181,784
import logging import json from typing import Dict import numpy as np import torch from torch import nn import torch.nn.functional as F from fairseq.data.audio.audio_utils import ( get_window, get_fourier_basis, get_mel_filters, TTSSpectrogram, ) from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig from fairseq.models.text_to_speech.codehifigan import CodeGenerator as CodeHiFiGANModel from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel class GriffinLimVocoder(nn.Module): def __init__( self, sample_rate, win_size, hop_size, n_fft, n_mels, f_min, f_max, window_fn, spec_bwd_max_iter=32, fp16=False, ): def forward(self, x): def from_data_cfg(cls, args, data_cfg: S2TDataConfig): class HiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: def forward(self, x: torch.Tensor) -> torch.Tensor: def from_data_cfg(cls, args, data_cfg: S2TDataConfig): class CodeHiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: def forward(self, x: Dict[str, torch.Tensor], dur_prediction=False) -> torch.Tensor: def from_data_cfg(cls, args, data_cfg): class S2TDataConfig(object): def __init__(self, yaml_path): def vocab_filename(self): def shuffle(self) -> bool: def pre_tokenizer(self) -> Dict: def bpe_tokenizer(self) -> Dict: def prepend_tgt_lang_tag(self) -> bool: def input_feat_per_channel(self): def input_channels(self): def sampling_alpha(self): def use_audio_input(self): def audio_root(self): def get_feature_transforms(self, split, is_train): def get_vocoder(args, data_cfg: S2TDataConfig): if args.vocoder == "griffin_lim": return GriffinLimVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "hifigan": return HiFiGANVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "code_hifigan": return CodeHiFiGANVocoder.from_data_cfg(args, data_cfg) else: raise ValueError("Unknown vocoder")
null
181,809
import argparse import collections import os import re import torch from fairseq.file_io import PathManager class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False The provided code snippet includes necessary dependencies for implementing the `average_checkpoints` function. Write a Python function `def average_checkpoints(inputs)` to solve the following problem: Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. Here is the function: def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, "rb") as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state["model"] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( "For checkpoint {}, expected list of params: {}, " "but found: {}".format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state["model"] = averaged_params return new_state
Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
181,810
import argparse import collections import os import re import torch from fairseq.file_io import PathManager class PathManager: """ Wrapper for insulating OSS I/O (using Python builtin operations) from iopath's PathManager abstraction (for transparently handling various internal backends). """ def open( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): if IOPathManager: return IOPathManager.open( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) return open( path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool: if IOPathManager: return IOPathManager.copy( src_path=src_path, dst_path=dst_path, overwrite=overwrite ) return shutil.copyfile(src_path, dst_path) def get_local_path(path: str, **kwargs) -> str: if IOPathManager: return IOPathManager.get_local_path(path, **kwargs) return path def exists(path: str) -> bool: if IOPathManager: return IOPathManager.exists(path) return os.path.exists(path) def isfile(path: str) -> bool: if IOPathManager: return IOPathManager.isfile(path) return os.path.isfile(path) def ls(path: str) -> List[str]: if IOPathManager: return IOPathManager.ls(path) return os.listdir(path) def mkdirs(path: str) -> None: if IOPathManager: return IOPathManager.mkdirs(path) os.makedirs(path, exist_ok=True) def rm(path: str) -> None: if IOPathManager: return IOPathManager.rm(path) os.remove(path) def chmod(path: str, mode: int) -> None: if not PathManager.path_requires_pathmanager(path): os.chmod(path, mode) def register_handler(handler) -> None: if IOPathManager: return IOPathManager.register_handler(handler=handler) def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs ) -> None: if IOPathManager: return IOPathManager.copy_from_local( local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs ) return shutil.copyfile(local_path, dst_path) def path_requires_pathmanager(path: str) -> bool: """Do we require PathManager to access given path?""" if IOPathManager: for p in IOPathManager._path_handlers.keys(): if path.startswith(p): return True return False def supports_rename(path: str) -> bool: # PathManager doesn't yet support renames return not PathManager.path_requires_pathmanager(path) def rename(src: str, dst: str): os.rename(src, dst) """ ioPath async PathManager methods: """ def opena( path: str, mode: str = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, ): """ Return file descriptor with asynchronous write operations. """ global IOPathManager if not IOPathManager: logging.info("ioPath is initializing PathManager.") try: from iopath.common.file_io import PathManager IOPathManager = PathManager() except Exception: logging.exception("Failed to initialize ioPath PathManager object.") return IOPathManager.opena( path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def async_close() -> bool: """ Wait for files to be written and clean up asynchronous PathManager. NOTE: `PathManager.async_close()` must be called at the end of any script that uses `PathManager.opena(...)`. """ global IOPathManager if IOPathManager: return IOPathManager.async_close() return False def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") else: pt_regexp = re.compile(r"checkpoint(\d+)\.pt") files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception( "Found {} checkpoint files but need at least {}", len(entries), n ) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
null
181,811
import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from multiprocessing.pool import ThreadPool def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates ) # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) # Save checkpoint if do_save or should_stop: checkpoint_utils.save_checkpoint( cfg.checkpoint, trainer, epoch_itr, valid_losses[0] ) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0": raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or ( self.is_fsdp and self.cfg.distributed_training.cpu_offload ) def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info( "Exponential Moving Average Shadow Model is initialized." ) def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info("NOTE: your device may support faster training with --fp16 or --amp") self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" logger.info(f"Saving checkpoint to {filename}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) if self.should_save_checkpoint_on_current_rank: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] if type(itr_state) == list: # assert len(itr_state) == self.data_parallel_world_size itr_state = itr_state[self.data_parallel_rank] extra_state["train_iterator"] = itr_state epoch = itr_state.get("epoch", 1) if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state.get("iterations_in_epoch", 0) == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32) else: logger.info( "Loading EMA from checkpoint" ) self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info( "Loading EMA fp32 params from checkpoint" ) self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() logging_outputs, ( sample_size, ooms, total_train_time, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slow_mo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step(samples, raise_oom) # recursion to feed in same batch except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_additional_optimizer_actions"): if hasattr(self.optimizer, "fp32_params"): self.model.perform_additional_optimizer_actions( self.optimizer.optimizer, self.optimizer.fp32_params ) else: self.model.perform_additional_optimizer_actions( self.optimizer.optimizer ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) # logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_outputs def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm ** 0.5 should_agg_norm = ( self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if 'target' in sample: sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( (torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]` to solve the following problem: Train the model for one epoch and return validation losses. Here is the function: def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator( itr, update_freq, skip_remainder_batch=cfg.optimization.skip_remainder_batch, ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_file=cfg.common.log_file, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start iterating over samples") for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % cfg.common.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop
Train the model for one epoch and return validation losses.
181,812
import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from multiprocessing.pool import ThreadPool def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if ( distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg ): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() if cfg.common.log_file is not None: handler = logging.FileHandler(filename=cfg.common.log_file) logger.addHandler(handler) np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) ds_local_master = cfg.common.deepspeed and distributed_utils.is_local_master(cfg.distributed_training) if distributed_utils.is_master(cfg.distributed_training) or ds_local_master: checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir, rank=torch.distributed.get_rank()) ckp_copy_thread = ThreadPool(processes=1) else: ckp_copy_thread = None # if distributed_utils.is_master(cfg.distributed_training): # checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) if cfg.checkpoint.write_checkpoints_asynchronously: try: import iopath # noqa: F401 except ImportError: logging.exception( "Asynchronous checkpoint writing is specified but iopath is " "not installed: `pip install iopath`" ) return # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion if cfg.distributed_training.ddp_backend == "fully_sharded": with fsdp_enable_wrap(cfg.distributed_training): model = fsdp_wrap(task.build_model(cfg.model)) else: model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) ), sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad ), ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), sum( p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad ), ) ) # Load valid dataset (we load training data below, based on the latest checkpoint) # We load the valid dataset AFTER building the model data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) if cfg.dataset.combine_valid_subsets: task.load_dataset("valid", combine=True, epoch=1) else: for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.deepspeed: assert quantizer is None, "fairseq wuantizer is not currently supported on deepspeed" from fairseq.ds_trainer import DeepSpeedTrainer trainer = DeepSpeedTrainer(cfg, task, model, criterion) elif cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per device = {} and max sentences per device = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) if cfg.common.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("load_checkpoint") # wait for all workers max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) # ioPath implementation to wait for all asynchronous file writes to complete. if cfg.checkpoint.write_checkpoints_asynchronously: logger.info( "ioPath PathManager waiting for all asynchronous checkpoint " "writes to finish." ) PathManager.async_close() logger.info("ioPath PathManager finished waiting.") class PlasmaStore: def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100): self.server = self.start(path, nbytes) def __del__(self): self.server.kill() def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen: if not PYARROW_AVAILABLE: raise ImportError("please run pip install pyarrow to use --use_plasma_view") # best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm _server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path]) plasma.connect(path, num_retries=200) # If we can't connect we fail immediately return _server def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if cfg.common.use_plasma_view: server = PlasmaStore(path=cfg.common.plasma_path) logger.info( f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}" ) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) # if cfg.common.use_plasma_view: # server.server.kill()
null
181,814
import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints") def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = ( 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) ) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset : offset + this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def make_batches(lines, cfg, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if cfg.generation.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [ task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list ] if cfg.generation.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference( tokens, lengths, constraints=constraints_tensor ), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch["id"] src_tokens = batch["net_input"]["src_tokens"] src_lengths = batch["net_input"]["src_lengths"] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, )
null
181,815
import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output def main(cfg: FairseqConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) start_time = time.time() total_translate_time = 0 utils.import_user_module(cfg.common) if cfg.interactive.buffer_size < 1: cfg.interactive.buffer_size = 1 if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.batch_size = 1 assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( not cfg.dataset.batch_size or cfg.dataset.batch_size <= cfg.interactive.buffer_size ), "--batch-size cannot be larger than --buffer-size" logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Setup task, e.g., translation task = tasks.setup_task(cfg.task) # Load ensemble overrides = ast.literal_eval(cfg.common_eval.model_overrides) logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Initialize generator generator = task.build_generator(models, cfg.generation) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if cfg.generation.constraints: logger.warning( "NOTE: Constrained decoding currently assumes a shared subword vocabulary." ) if cfg.interactive.buffer_size > 1: logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info("Type the input sentence and press return:") start_id = 0 for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size): results = [] for batch in make_batches(inputs, cfg, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, } translate_start_time = time.time() translations = task.inference_step( generator, models, sample, constraints=constraints ) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if cfg.generation.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append( ( start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations), }, ) ) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): src_str = "" if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) print("S-{}\t{}".format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print( "C-{}\t{}".format( id_, tgt_dict.string(constraint, cfg.common_eval.post_process), ) ) # Process top predictions for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print("H-{}\t{}\t{}".format(id_, score, hypo_str)) # detokenized hypothesis print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str)) print( "P-{}\t{}".format( id_, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"].div_(math.log(2)).tolist(), ) ), ) ) if cfg.generation.print_alignment: alignment_str = " ".join( ["{}-{}".format(src, tgt) for src, tgt in alignment] ) print("A-{}\t{}".format(id_, alignment_str)) # update running id_ counter start_id += len(inputs) logger.info( "Total time: {:.3f} seconds; translation time: {:.3f}".format( time.time() - start_time, total_translate_time ) ) def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
null
181,816
import logging import os import sys from argparse import Namespace from itertools import chain import torch from omegaconf import DictConfig from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import metrics, progress_bar from fairseq.utils import reset_logging def main(cfg: DictConfig, override_args=None): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) reset_logging() assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_world_size > 1: data_parallel_world_size = distributed_utils.get_data_parallel_world_size() data_parallel_rank = distributed_utils.get_data_parallel_rank() else: data_parallel_world_size = 1 data_parallel_rank = 0 if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, "model_overrides", "{}"))) else: overrides = None # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=overrides, suffix=cfg.checkpoint.checkpoint_suffix, ) model = models[0] # Move models to GPU for model in models: model.eval() if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(saved_cfg) # Build criterion criterion = task.build_criterion(saved_cfg.criterion) criterion.eval() for subset in cfg.dataset.valid_subset.split(","): try: task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task) dataset = task.dataset(subset) except KeyError: raise Exception("Cannot find dataset: " + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=data_parallel_world_size, shard_id=data_parallel_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if data_parallel_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=cfg.common.all_gather_list_size, group=distributed_utils.get_data_parallel_group(), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) distributed_utils.call_main( convert_namespace_to_omegaconf(args), main, override_args=override_args )
null
181,825
import logging import os import hydra import torch from hydra.core.hydra_config import HydraConfig from omegaconf import OmegaConf, open_dict from fairseq import distributed_utils, metrics from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.initialize import add_defaults, hydra_init from fairseq.dataclass.utils import omegaconf_no_object_check from fairseq.utils import reset_logging from fairseq_cli.train import main as pre_main logger = logging.getLogger("fairseq_cli.hydra_train") def hydra_main(cfg: FairseqConfig) -> float: def hydra_init(cfg_name="config") -> None: def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" hydra_init(cfg_name) hydra_main()
null
181,826
import logging import math import os import sys from argparse import Namespace from typing import Iterable, List, Optional import torch import fairseq from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter from fairseq.sequence_scorer import SequenceScorer from omegaconf import DictConfig def main(cfg: DictConfig, **unused_kwargs): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) logger.info(cfg) if cfg.eval_lm.context_window > 0: # reduce tokens per sample by the required context window size cfg.task.tokens_per_sample -= cfg.eval_lm.context_window # Initialize the task using the current *cfg* task = tasks.setup_task(cfg.task) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task, ) use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) # Optimize ensemble for generation and set the source and dest dicts on the model # (required by scorer) for model in models: if use_fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) assert len(models) > 0 logger.info( "num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters())) ) # Load dataset splits task.load_dataset(cfg.dataset.gen_subset) dataset = task.dataset(cfg.dataset.gen_subset) logger.info( "{} {} {:,} examples".format( cfg.task.data, cfg.dataset.gen_subset, len(dataset) ) ) itr = task.eval_lm_dataloader( dataset=dataset, max_tokens=cfg.dataset.max_tokens or 36000, batch_size=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( *[model.max_positions() for model in models] ), num_shards=max( cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size, ), shard_id=max( cfg.dataset.shard_id, cfg.distributed_training.distributed_rank, ), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, context_window=cfg.eval_lm.context_window, ) itr = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) results = eval_lm( models=models, source_dictionary=task.source_dictionary, batch_iterator=itr, post_process=cfg.common_eval.post_process, output_word_probs=cfg.eval_lm.output_word_probs, output_word_stats=cfg.eval_lm.output_word_stats, target_dictionary=task.target_dictionary, softmax_batch=cfg.eval_lm.softmax_batch, remove_bos_token=getattr(cfg.task, "add_bos_token", False), ) logger.info( "Loss (base 2): {:.4f}, Perplexity: {:.2f}".format( results["loss"], results["perplexity"] ) ) return results def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
null
181,827
import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from omegaconf import DictConfig from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter def get_symbols_to_strip_from_output(generator): if hasattr(generator, "symbols_to_strip_from_output"): return generator.symbols_to_strip_from_output else: return {generator.eos} def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { "init": self.elapsed_time, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): if "start" in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict["init"]) else: self.reset(init=state_dict["init"], n=state_dict["n"]) self.round = state_dict.get("round", None) def avg(self): return self.n / self.elapsed_time def elapsed_time(self): return self.init + (time.perf_counter() - self.start) def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) def avg(self): return self.sum / self.n if self.n > 0 else self.sum def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val def _main(cfg: DictConfig, output_file): logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=output_file, ) logger = logging.getLogger("fairseq_cli.generate") utils.import_user_module(cfg.common) if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 12000 logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Load dataset splits task = tasks.setup_task(cfg.task) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary overrides = ast.literal_eval(cfg.common_eval.model_overrides) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task) if cfg.generation.lm_path is not None: overrides["data"] = cfg.task.data try: lms, _ = checkpoint_utils.load_model_ensemble( [cfg.generation.lm_path], arg_overrides=overrides, task=None ) except: logger.warning( f"Failed to load language model! Please make sure that the language model dict is the same " f"as target dict and is located in the data dir ({cfg.task.data})" ) raise assert len(lms) == 1 else: lms = [None] # Optimize ensemble for generation for model in chain(models, lms): if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(cfg.dataset.gen_subset), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models] ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=cfg.distributed_training.distributed_world_size, shard_id=cfg.distributed_training.distributed_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) # Initialize generator gen_timer = StopwatchMeter() extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight} generator = task.build_generator( models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(cfg.scoring, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if cfg.generation.prefix_size > 0: prefix_tokens = sample["target"][:, : cfg.generation.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step( generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): has_target = sample["target"] is not None # Remove padding if "src_tokens" in sample["net_input"]: src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) else: src_tokens = None target_tokens = None if has_target: target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text( sample_id ) target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, cfg.common_eval.post_process, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output( generator ), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not cfg.common_eval.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str), file=output_file) if has_target: print("T-{}\t{}".format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not cfg.common_eval.quiet: score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print( "H-{}\t{}\t{}".format(sample_id, score, hypo_str), file=output_file, ) # detokenized hypothesis print( "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str), file=output_file, ) print( "P-{}\t{}".format( sample_id, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"] .div_(math.log(2)) .tolist(), ) ), ), file=output_file, ) if cfg.generation.print_alignment == "hard": print( "A-{}\t{}".format( sample_id, " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment ] ), ), file=output_file, ) if cfg.generation.print_alignment == "soft": print( "A-{}\t{}".format( sample_id, " ".join( [",".join(src_probs) for src_probs in alignment] ), ), file=output_file, ) if cfg.generation.print_step: print( "I-{}\t{}".format(sample_id, hypo["steps"]), file=output_file, ) if cfg.generation.retain_iter_history: for step, h in enumerate(hypo["history"]): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h["tokens"].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print( "E-{}_{}\t{}".format(sample_id, step, h_str), file=output_file, ) # Score only the top hypothesis if has_target and j == 0: if ( align_dict is not None or cfg.common_eval.post_process is not None ): # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line( target_str, add_if_not_exist=True ) hypo_tokens = tgt_dict.encode_line( detok_hypo_str, add_if_not_exist=True ) if hasattr(scorer, "add_string"): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info( "Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) if has_target: if cfg.bpe and not cfg.generation.sacrebleu: if cfg.common_eval.post_process: logger.warning( "BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization" ) else: logger.warning( "If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization" ) # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( "Generate {} with beam={}: {}".format( cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string() ), file=output_file, ) return scorer
null
181,829
import os import gzip from sre_parse import SPECIAL_CHARS import numpy as np from random import Random from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Union import collections from infinibatch import iterators def apply_to_sample(f, sample): if hasattr(sample, "__len__") and len(sample) == 0: return {} def _apply(x): if isinstance(x, np.ndarray): return f(x) elif isinstance(x, collections.OrderedDict): # OrderedDict has attributes that needs to be preserved od = collections.OrderedDict((key, _apply(value)) for key, value in x.items()) od.__dict__ = x.__dict__ return od elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] elif isinstance(x, tuple): return tuple(_apply(x) for x in x) elif isinstance(x, set): return {_apply(x) for x in x} else: return x return _apply(sample)
null
181,830
import copy import logging from dataclasses import dataclass, field import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( roberta_large_architecture, RobertaModel, ) from fairseq.models.transformer_lm import ( base_gpt3_architecture, ) from fairseq.utils import safe_getattr from torchscale.architecture.config import EncoderConfig from torchscale.model.BEiT3 import BEiT3 from unilm.models.aligner import Aligner, Aligner_encoder from unilm.models.connector import build_connector from unilm.models.diffusion import Diffusionmodel, VAE from unilm.models.gpt import GPTmodel, GPTModelConfig def slice_tokens_for_mlm(A, indx, num_elem=2): all_indx = indx[:, None] + torch.arange(num_elem) return A[torch.arange(all_indx.shape[0])[:, None], all_indx]
null
181,831
import copy import logging from dataclasses import dataclass, field import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq import utils from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.roberta import ( roberta_large_architecture, RobertaModel, ) from fairseq.models.transformer_lm import ( base_gpt3_architecture, ) from fairseq.utils import safe_getattr from torchscale.architecture.config import EncoderConfig from torchscale.model.BEiT3 import BEiT3 from unilm.models.aligner import Aligner, Aligner_encoder from unilm.models.connector import build_connector from unilm.models.diffusion import Diffusionmodel, VAE from unilm.models.gpt import GPTmodel, GPTModelConfig def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def kosmosg_xl(args): # 1.3B params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) args.dropout = safe_getattr(args, "dropout", 0.1) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) base_gpt3_architecture(args) roberta_large_architecture(args)
null
181,832
import torch import torch.nn as nn from fairseq.modules import MultiheadAttention from fairseq import utils class SimpleConnector(nn.Module): """Connector model of GPT and MLM.""" def __init__(self, input_dim, output_dim): super().__init__() self.dense = nn.Linear(input_dim, output_dim) def forward(self, features, **kwargs): x = self.dense(features) return x class ComplexConnector(nn.Module): """Connector model of GPT and MLM.""" def __init__(self, input_dim, output_dim, activation_fn): super().__init__() self.dense = nn.Linear(input_dim, input_dim) self.activation_fn = utils.get_activation_fn(activation_fn) self.predict = nn.Linear(input_dim, output_dim) def forward(self, features, **kwargs): x = self.dense(features) x = self.activation_fn(x) x = self.predict(x) return x class XConnector(nn.Module): """Connector model of GPT and MLM.""" def __init__(self, input_dim, output_dim, args,): super().__init__() self.dense = nn.Linear(input_dim, output_dim) self.latent_query = torch.nn.Parameter(torch.randn(args.latent_query_num, output_dim)) self.x_attn = MultiheadAttention( output_dim, args.decoder_attention_heads, kdim=output_dim, vdim=output_dim, dropout=args.attention_dropout, encoder_decoder_attention=True, ) def forward(self, features, **kwargs): x = self.dense(features) # x = attention_i(q=latent_query, kv=concat([x, latent_query])) # shape of x is [batch_size * seq_len, output_dim] -> [seq_len, batch_size, output_dim] x = x.view(-1, kwargs['src_len'], x.size(-1)).transpose(0, 1) bsz = x.size(1) latent_query = self.latent_query.unsqueeze(1).expand(-1, bsz, -1) x, _ = self.x_attn(latent_query, torch.cat([x, latent_query]), torch.cat([x, latent_query])) return x.transpose(0, 1).contiguous().view(-1, x.size(-1)) # [batch_size * seq_len, output_dim] def build_connector(args, input_dim, output_dim): connector_name = args.text_connector if hasattr(args, "text_connector") else args.connector if connector_name == "none": connector = None elif connector_name == "complex": connector = ComplexConnector(input_dim, output_dim, args.activation_fn) elif connector_name == "simple": connector = SimpleConnector(input, output_dim) elif connector_name == "xconnector": connector = XConnector(input_dim, output_dim, args) else: raise ValueError("Invalid text connector type: {}".format(args.connector)) return connector
null
181,833
from dataclasses import dataclass, field from typing import Any, Dict, List, Optional from fairseq.dataclass import ChoiceEnum, FairseqDataclass from torch import Tensor import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import distributed_utils, utils from fairseq import checkpoint_utils from fairseq import utils from fairseq.utils import safe_getattr, safe_hasattr from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.transformer_lm import ( TransformerLanguageModelConfig, TransformerLanguageModel, base_gpt3_architecture, ) from fairseq.models.transformer.transformer_decoder import TransformerDecoder from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding from fairseq.modules import PositionalEmbedding from omegaconf import II from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import TextEmbedding def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def base_gpt3_architecture(args): args.decoder_input_dim = args.decoder_embed_dim args.decoder_output_dim = args.decoder_embed_dim args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) # GPT-3 used learned positional embeddings, rather than sinusoidal args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) args.dropout = safe_getattr(args, "dropout", 0.0) args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) args.activation_fn = safe_getattr(args, "activation_fn", "gelu") args.share_decoder_input_output_embed = True base_lm_architecture(args) def gptmodel_small(args): # 125M params args.decoder_layers = safe_getattr(args, "decoder_layers", 12) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) base_gpt3_architecture(args)
null
181,834
from dataclasses import dataclass, field from typing import Any, Dict, List, Optional from fairseq.dataclass import ChoiceEnum, FairseqDataclass from torch import Tensor import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import distributed_utils, utils from fairseq import checkpoint_utils from fairseq import utils from fairseq.utils import safe_getattr, safe_hasattr from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture, ) from fairseq.models.transformer_lm import ( TransformerLanguageModelConfig, TransformerLanguageModel, base_gpt3_architecture, ) from fairseq.models.transformer.transformer_decoder import TransformerDecoder from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding from fairseq.modules import PositionalEmbedding from omegaconf import II from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder from torchscale.component.embedding import TextEmbedding def safe_getattr(obj, k, default=None): def base_gpt3_architecture(args): def gptmodel_medium(args): # 350M params args.decoder_layers = safe_getattr(args, "decoder_layers", 24) args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) base_gpt3_architecture(args)
null
181,835
from fairseq.dataclass import FairseqDataclass from fairseq.models import ( BaseFairseqModel, ) from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.utils import safe_getattr import torch import torch.nn.functional as F import torch.utils.checkpoint from diffusers import AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, UNet2DConditionModel from diffusers.utils.torch_utils import randn_tensor import inspect from PIL import Image from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import LoraLoaderMixin from diffusers.configuration_utils import FrozenDict def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def diffusionmodel_base(args): args.pretrained_model_name_or_path = safe_getattr(args, "pretrained_model_name_or_path", "runwayml/stable-diffusion-v1-5")
null
181,836
from fairseq.dataclass import FairseqDataclass from fairseq.models import ( BaseFairseqModel, ) from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.utils import safe_getattr import torch import torch.nn.functional as F import torch.utils.checkpoint from diffusers import AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, UNet2DConditionModel from diffusers.utils.torch_utils import randn_tensor import inspect from PIL import Image from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import LoraLoaderMixin from diffusers.configuration_utils import FrozenDict def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default) def vae_base(args): args.pretrained_model_name_or_path = safe_getattr(args, "pretrained_model_name_or_path", "runwayml/stable-diffusion-v1-5")
null
181,837
import logging import os import torch from copy import deepcopy from typing import Tuple, Union, Callable, Optional from torch import nn from torch.nn import functional as F from open_clip.model import CLIP, CLIPVisionCfg, QuickGELU, TimmModel, ModifiedResNet, VisualTransformer, to_2tuple, LayerNorm, Transformer from open_clip.factory import _MODEL_CONFIGS, list_models, load_checkpoint, get_pretrained_url, download_pretrained, load_state_dict logger = logging.getLogger(__name__) class ClipVisualOnly(nn.Module): # text_cfg for compatibility with original CLIP def __init__(self, embed_dim, vision_cfg, text_cfg, quick_gelu=False): super().__init__() if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: raise NotImplementedError self.visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, embed_dim=embed_dim, image_size=vision_cfg.image_size) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): raise NotImplementedError vision_heads = vision_cfg.width * 32 // vision_cfg.head_width self.visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width) else: vision_heads = vision_cfg.width // vision_cfg.head_width self.visual = VisualTransformer4Seq2Seq( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, output_dim=embed_dim, act_layer=act_layer,) self.init_parameters() def init_parameters(self): if hasattr(self.visual, 'init_parameters'): self.visual.init_parameters() def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) def encode_image(self, image): return self.visual(image) def forward(self, image): image_features = self.encode_image(image) image_features = F.normalize(image_features, dim=-1) return image_features def load_checkpoint4vision_only(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: str = '', jit: bool = False, force_quick_gelu: bool = False, pretrained_image: bool = False,): model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names if pretrained.lower() == 'openai': raise NotImplementedError else: if model_name in _MODEL_CONFIGS: logger.info(f'Loading {model_name} model config.') model_cfg = deepcopy(_MODEL_CONFIGS[model_name]) else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' model = ClipVisualOnly(**model_cfg) if pretrained: checkpoint_path = '' url = get_pretrained_url(model_name, pretrained) if url: checkpoint_path = download_pretrained(url) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') # NOTE TODO remove, strict=True is only for debug load_checkpoint4vision_only(model, checkpoint_path, strict=False) # reload attn weights into ts attn dim = model.visual.transformer.resblocks[0].attn.in_proj_weight.shape[0] // 3 for resblock in model.visual.transformer.resblocks: resblock.ts_attn.q_proj.weight = nn.Parameter(resblock.attn.in_proj_weight[:dim].clone()) resblock.ts_attn.q_proj.bias = nn.Parameter(resblock.attn.in_proj_bias[:dim].clone()) resblock.ts_attn.k_proj.weight = nn.Parameter(resblock.attn.in_proj_weight[dim:2*dim].clone()) resblock.ts_attn.k_proj.bias = nn.Parameter(resblock.attn.in_proj_bias[dim:2*dim].clone()) resblock.ts_attn.v_proj.weight = nn.Parameter(resblock.attn.in_proj_weight[2*dim:].clone()) resblock.ts_attn.v_proj.bias = nn.Parameter(resblock.attn.in_proj_bias[2*dim:].clone()) resblock.ts_attn.out_proj.weight = nn.Parameter(resblock.attn.out_proj.weight.clone()) resblock.ts_attn.out_proj.bias = nn.Parameter(resblock.attn.out_proj.bias.clone()) resblock.attn = None else: logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.') raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.') if jit: model = torch.jit.script(model) return model
null
181,838
from app_utils import * def create_demo_segmentation(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) preprocessor_name = gr.Radio(label='Preprocessor', choices=['UPerNet', 'None'], type='value', value='UPerNet') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,839
from app_utils import * def create_demo_softedge(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) preprocessor_name = gr.Radio(label='Preprocessor', choices=[ 'HED', 'PidiNet', 'HED safe', 'PidiNet safe', 'None', ], type='value', value='PidiNet') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,840
from app_utils import * def create_demo_normal(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=384, step=1) preprocessor_name = gr.Radio(label='Preprocessor', choices=['NormalBae', 'None'], type='value', value='NormalBae') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,841
from app_utils import * def create_canvas(w, h): return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255 def create_demo_scribble_interactive(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): canvas_width = gr.Slider(label='Canvas width', minimum=256, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=1) canvas_height = gr.Slider(label='Canvas height', minimum=256, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=1) create_button = gr.Button('Open drawing canvas!') image = gr.Image(tool='sketch', brush_radius=10) prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') create_button.click( fn=create_canvas, inputs=[canvas_width, canvas_height], outputs=image, queue=False, api_name=False, ) ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,842
from app_utils import * def create_demo_canny(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) canny_low_threshold = gr.Slider(label='Canny low threshold', minimum=1, maximum=255, value=100, step=1) canny_high_threshold = gr.Slider(label='Canny high threshold', minimum=1, maximum=255, value=200, step=1) with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, canny_low_threshold, canny_high_threshold, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,843
import cv2 import numpy as np def resize_image(input_image, resolution, interpolation=None): H, W, C = input_image.shape H = float(H) W = float(W) k = float(resolution) / max(H, W) H *= k W *= k H = int(np.round(H / 64.0)) * 64 W = int(np.round(W / 64.0)) * 64 if interpolation is None: interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA img = cv2.resize(input_image, (W, H), interpolation=interpolation) return img
null
181,844
from app_utils import * def create_demo_openpose(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) preprocessor_name = gr.Radio(label='Preprocessor', choices=['Openpose', 'None'], type='value', value='Openpose') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,845
from app_utils import * def create_demo_mlsd(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) mlsd_value_threshold = gr.Slider(label='Hough value threshold (MLSD)', minimum=0.01, maximum=2.0, value=0.1, step=0.01) mlsd_distance_threshold = gr.Slider(label='Hough distance threshold (MLSD)', minimum=0.01, maximum=20.0, value=0.1, step=0.01) with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, mlsd_value_threshold, mlsd_distance_threshold, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,846
from app_utils import * def create_demo_depth(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=384, step=1) preprocessor_name = gr.Radio( label='Preprocessor', choices=['Midas', 'DPT', 'None'], type='value', value='DPT') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,847
from app_utils import * def create_demo_ip2p(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 )
null
181,848
from app_utils import * def create_demo_scribble(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) preprocessor_name = gr.Radio( label='Preprocessor', choices=['HED', 'PidiNet', 'None'], type='value', value='HED') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,849
from app_utils import * def create_demo_shuffle(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocessor_name = gr.Radio( label='Preprocessor', choices=['ContentShuffle', 'None'], type='value', value='ContentShuffle') with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,850
from app_utils import * def create_demo_lineart(generation_fn): with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): image = gr.Image(label="Control image") prompt = gr.Textbox(label="Prompt", max_lines=1, placeholder="Use <i> to represent the images in prompt") num_input_images = gr.Slider(1, MAX_INPUT_IMAGES, value=DEFAULT_INPUT_IMAGES, step=1, label="Number of input images:") input_images = [ gr.Image(label=f'img{i}', type="pil", visible=True if i < DEFAULT_INPUT_IMAGES else False) for i in range(MAX_INPUT_IMAGES)] num_input_images.change(variable_images, num_input_images, input_images) seed = gr.Slider(label="Seed", minimum=MIN_SEED, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label='Randomize seed', value=True) run_button = gr.Button(label="Run") with gr.Accordion("Advanced options", open=False): num_inference_steps = gr.Slider(label="num_inference_steps", minimum=10, maximum=100, value=50, step=5) text_guidance_scale = gr.Slider(1, 15, value=6, step=0.5, label="Text Guidance Scale") negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, value="") num_images_per_prompt = gr.Slider(1, MAX_IMAGES_PER_PROMPT, value=DEFAULT_IMAGES_PER_PROMPT, step=1, label="Number of Images") image_resolution = gr.Slider(label='Image resolution', minimum=MIN_IMAGE_RESOLUTION, maximum=MAX_IMAGE_RESOLUTION, value=DEFAULT_IMAGE_RESOLUTION, step=256) preprocess_resolution = gr.Slider(label='Preprocess resolution', minimum=128, maximum=512, value=512, step=1) preprocessor_name = gr.Radio( label='Preprocessor', choices=[ 'Lineart', 'Lineart coarse', 'None', 'Lineart (anime)', 'None (anime)', ], type='value', value='Lineart', info= 'Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.' ) with gr.Column(scale=2): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=2, height='100%') ips = [prompt, num_inference_steps, text_guidance_scale, negative_prompt, num_images_per_prompt, image, image_resolution, preprocess_resolution, preprocessor_name, *input_images] prompt.submit( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) run_button.click( fn=randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False ).then(fn=generation_fn, inputs=ips, outputs=result_gallery) gr.Examples( examples=controlnet_example, inputs=[image, prompt, input_images[0], input_images[1]], cache_examples=False, examples_per_page=100 ) return demo
null
181,851
import base64 import io import json import multiprocessing import os import random from argparse import ArgumentParser from multiprocessing import Process import numpy as np import requests import torch import torch.nn.functional as F from PIL import Image from scipy.ndimage import label, find_objects, grey_dilation from torch.utils.data import Dataset, DataLoader from tqdm import tqdm from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, Blip2Processor, Blip2ForConditionalGeneration, \ CLIPSegProcessor, CLIPSegForImageSegmentation Image.MAX_IMAGE_PIXELS = 1000000000 PROMPT_FOR_GENERATION_FORMAT = """{intro} {instruction_key} Extract all objects mentioned in the caption and separate them using commas. Exclude background elements (site, location, environment) and only include foreground objects. Ensure that only nouns are included and exclude adjectives entirely. {input_key} {input} {response_key} """.format( intro=INTRO_BLURB, instruction_key=INSTRUCTION_KEY, instruction="{instruction}", input_key=INPUT_KEY, input="{input}", response_key=RESPONSE_KEY, ) def save_tsv(args, shard_id, shard, device): os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3, 4, 5, 6, 7" random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.set_device(device) model_dtype = torch.float16 # blip2 blip2_processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-6.7b") blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=model_dtype) blip2_model.eval().to(device) # mpt mpt_config = AutoConfig.from_pretrained('mosaicml/mpt-7b-instruct', trust_remote_code=True) mpt_config.init_device = device mpt_config.attn_config['attn_impl'] = args.attn_impl mpt_tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b') mpt_tokenizer.pad_token = mpt_tokenizer.eos_token mpt_tokenizer.padding_side = 'left' mpt_model = AutoModelForCausalLM.from_pretrained('mosaicml/mpt-7b-instruct', config=mpt_config, torch_dtype=model_dtype, trust_remote_code=True) mpt_model.eval() mpt_generate_kwargs = { 'max_new_tokens': args.max_new_tokens, 'temperature': args.temperature, 'top_p': args.top_p, 'top_k': args.top_k, 'repetition_penalty': args.repetition_penalty, 'no_repeat_ngram_size': args.no_repeat_ngram_size, 'use_cache': args.use_cache, 'do_sample': False if args.temperature == 0 else args.do_sample, 'eos_token_id': mpt_tokenizer.eos_token_id, 'pad_token_id': mpt_tokenizer.pad_token_id, } # clipseg clipseg_processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") clipseg_model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", torch_dtype=model_dtype) clipseg_model.eval().to(device) cnt = 0 for image in tqdm(shard): if image is None: continue if cnt % 1000 == 0: # close previous file if any if cnt > 0: f.close() f = open(os.path.join(args.output_dir, "data", f"cnt_{args.machine_id}_{shard_id}_{cnt // 1000}.tsv"), "w", encoding='utf-8') cnt += 1 blip2_input = blip2_processor(images=image, return_tensors="pt").to(device, model_dtype) blip2_gen = blip2_model.generate(**blip2_input) caption = blip2_processor.batch_decode(blip2_gen, skip_special_tokens=True)[0] \ .replace('\t', '').replace('\n', '').strip() # tag extraction prompt = PROMPT_FOR_GENERATION_FORMAT.format(input=caption) # Run HF generate mpt_input = mpt_tokenizer(prompt, return_tensors='pt', padding=True) for key, value in mpt_input.items(): mpt_input[key] = value.to(device) mpt_gen = mpt_model.generate( input_ids=mpt_input['input_ids'], attention_mask=mpt_input['attention_mask'], **mpt_generate_kwargs, ) tags = mpt_tokenizer.batch_decode(mpt_gen, skip_special_tokens=True)[0][len(prompt):] if '#' in tags: continue tags = tags.split(",") tags = [tag.replace('\t', '').replace('\n', '').strip() for tag in tags] tags = [tag for tag in tags if len(tag) > 0 and tag in caption] if len(tags) == 0: continue clipseg_input = clipseg_processor(text=tags, images=[image] * len(tags), padding=True, return_tensors="pt") for key, value in clipseg_input.items(): clipseg_input[key] = value.to(device) if value.dtype == torch.float32: clipseg_input[key] = value.to(device, model_dtype) # predict clipseg_gen = clipseg_model(**clipseg_input).logits if len(tags) == 1: clipseg_gen = clipseg_gen.unsqueeze(0) image_size = image.height # interpolate to original size clipseg_gen = F.interpolate(clipseg_gen.unsqueeze(1), size=image_size, mode='bilinear') masks = torch.sigmoid(clipseg_gen).squeeze(1) masks = masks.cpu().numpy() sub_images = [] tags_to_keep = [] # save the masked image for mask_id, mask in enumerate(masks): image_array = np.array(image) thresholded_mask = mask > args.threshold if thresholded_mask.max() == 0: continue thresholded_mask = grey_dilation(thresholded_mask, size=(image_size // 100, image_size // 100)) labeled_matrix, num_features = label(thresholded_mask) regions = find_objects(labeled_matrix) sizes = [np.sum(thresholded_mask[region]) for region in regions] max_index = np.argmax(sizes) max_region = regions[max_index] thresholded_mask[labeled_matrix != (max_index + 1)] = False tags_to_keep.append(tags[mask_id]) # Determine the dimensions of the region y_start, y_stop = max_region[0].start, max_region[0].stop x_start, x_stop = max_region[1].start, max_region[1].stop height = y_stop - y_start width = x_stop - x_start # Calculate the desired side length for a square region side_length = max(height, width) # Calculate the center of the region center_y = (y_start + y_stop) // 2 center_x = (x_start + x_stop) // 2 # Calculate the new boundaries for the region new_y_start = center_y - (side_length // 2) new_y_stop = new_y_start + side_length new_x_start = center_x - (side_length // 2) new_x_stop = new_x_start + side_length # Adjust the boundaries if they exceed the image boundaries if new_y_start < 0: new_y_start = 0 new_y_stop = side_length elif new_y_stop > image_array.shape[0]: new_y_start = image_array.shape[0] - side_length new_y_stop = image_array.shape[0] if new_x_start < 0: new_x_start = 0 new_x_stop = side_length elif new_x_stop > image_array.shape[1]: new_x_start = image_array.shape[1] - side_length new_x_stop = image_array.shape[1] # Create a new mask with the adjusted boundaries object_image = image_array[new_y_start:new_y_stop, new_x_start:new_x_stop] max_region_mask = thresholded_mask[new_y_start:new_y_stop, new_x_start:new_x_stop] masked_image = object_image.copy() masked_image[~max_region_mask] = 255 object_image = Image.fromarray(object_image).resize((512, 512)) masked_image = Image.fromarray(masked_image).resize((512, 512)) sub_images.extend([object_image, masked_image]) if len(sub_images) == 0: continue image = image.resize((512, 512)) # encode image using base64 buffer = io.BytesIO() image.save(buffer, format='PNG') image = base64.b64encode(buffer.getvalue()).decode('utf-8') for j, im in enumerate(sub_images): buffer = io.BytesIO() im.save(buffer, format='PNG') sub_images[j] = base64.b64encode(buffer.getvalue()).decode('utf-8') # write to tsv file f.write('\t'.join([ caption, ','.join(tags_to_keep), image, *sub_images ]) + '\n')
null
181,852
import base64 import io import json import multiprocessing import os import random from argparse import ArgumentParser from multiprocessing import Process import numpy as np import requests import torch import torch.nn.functional as F from PIL import Image from scipy.ndimage import label, find_objects, grey_dilation from torch.utils.data import Dataset, DataLoader from tqdm import tqdm from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, Blip2Processor, Blip2ForConditionalGeneration, \ CLIPSegProcessor, CLIPSegForImageSegmentation def collate_fn(batch): return batch[0] if batch is not None else None
null
181,853
import argparse import base64 import io import json import os from multiprocessing import Process from PIL import Image from tqdm import tqdm def save_tsv(args, i, sub_seeds_list): with open(os.path.join(args.output_dir, 'data', f'{str(i).zfill(4)}.tsv'), 'w') as f: for name, seeds in tqdm(sub_seeds_list, desc=f'processing {i}th tsv file', leave=False): # load prompt prompt = json.load(open(os.path.join(args.data_dir, name, 'prompt.json'))) # load images images = [Image.open(os.path.join(args.data_dir, name, f'{seed}_{j}.jpg')).convert('RGB') for seed in seeds for j in range(2)] # resize the shorter side to 512 images = [im.resize((512, int(512 / im.size[0] * im.size[1])) if im.size[0] < im.size[1] else (int(512 / im.size[1] * im.size[0]), 512)) for im in images] # encode image using base64 for j, im in enumerate(images): buffer = io.BytesIO() im.save(buffer, format='PNG') images[j] = base64.b64encode(buffer.getvalue()).decode('utf-8') # write to tsv file f.write('\t'.join([ prompt['input'].replace('\t', '').replace('\n', '').strip(), prompt['edit'].replace('\t', '').replace('\n', '').strip(), *images ]) + '\n')
null
181,854
import random import gradio as gr import numpy as np import torch MAX_SEED = np.iinfo(np.int32).max def randomize_seed_fn(seed, randomize_seed): if randomize_seed: seed = random.randint(0, MAX_SEED) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) return seed
null
181,855
import random import gradio as gr import numpy as np import torch MAX_INPUT_IMAGES = 10 def variable_images(k): k = int(k) return [gr.Textbox.update(visible=True)] * k + [gr.Textbox.update(visible=False)] * (MAX_INPUT_IMAGES - k)
null
181,857
import collections from random import Random from typing import Dict, Iterable, Optional import numpy as np from infinibatch import iterators The provided code snippet includes necessary dependencies for implementing the `safe_getattr` function. Write a Python function `def safe_getattr(obj, k, default=None)` to solve the following problem: Returns obj[k] if it exists and is not None, otherwise returns default. Here is the function: def safe_getattr(obj, k, default=None): """Returns obj[k] if it exists and is not None, otherwise returns default.""" from omegaconf import OmegaConf if OmegaConf.is_config(obj): return obj[k] if k in obj and obj[k] is not None else default return getattr(obj, k, default)
Returns obj[k] if it exists and is not None, otherwise returns default.
181,858
import collections from random import Random from typing import Dict, Iterable, Optional import numpy as np from infinibatch import iterators The provided code snippet includes necessary dependencies for implementing the `safe_hasattr` function. Write a Python function `def safe_hasattr(obj, k)` to solve the following problem: Returns True if the given key exists and is not None. Here is the function: def safe_hasattr(obj, k): """Returns True if the given key exists and is not None.""" return getattr(obj, k, None) is not None
Returns True if the given key exists and is not None.
181,859
import collections from random import Random from typing import Dict, Iterable, Optional import numpy as np from infinibatch import iterators def image_code_to_token(code): return "<image{}>".format(code)
null
181,860
import logging from typing import Dict, List, Optional, Tuple import torch from fairseq import distributed_utils, utils from fairseq.distributed import utils as fsdp_wrap from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding from fairseq.modules import PositionalEmbedding from torch import Tensor from torchscale.architecture.config import DecoderConfig, EncoderConfig from torchscale.architecture.encoder import Encoder from .language_modeling import LMDecoder as MTDecoder def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8) args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0) args.is_moe = getattr(args, "is_moe", False) args.selected_expert_count = getattr(args, "selected_expert_count", 2)
null
181,861
import logging from dataclasses import dataclass, field from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from apex.normalization import FusedLayerNorm as LayerNorm from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import BaseFairseqModel, register_model, register_model_architecture from fairseq.models.squad import SQuADHead from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding from fairseq.modules import PositionalEmbedding from omegaconf import II from torchscale.architecture.config import EncoderConfig from .machine_translation import MTEncoder as Encoder def base_unilm_architecture(args): if hasattr(args, "encoder_final_norm"): args.no_encoder_final_norm = not args.encoder_final_norm args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072) args.encoder_layers = getattr(args, "encoder_layers", 12) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.activation_fn = getattr(args, "activation_fn", "gelu") args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0) args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None) # args.add_bos_token = getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.encoder_output_dim = getattr( args, "encoder_output_dim", args.encoder_embed_dim ) args.encoder_input_dim = getattr(args, "encoder_input_dim", args.encoder_embed_dim) # Model training is not stable without this args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.no_encoder_final_norm = getattr(args, "no_encoder_final_norm", False) args.no_scale_embedding = getattr(args, "no_scale_embedding", True) args.layernorm_embedding = getattr(args, "layernorm_embedding", True) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True
null
181,862
import logging from dataclasses import dataclass, field from typing import Optional import torch from fairseq import distributed_utils, utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding from fairseq.modules import PositionalEmbedding from omegaconf import II from torchscale.architecture.config import DecoderConfig from torchscale.architecture.decoder import Decoder def base_lm_architecture(args): # backward compatibility for older model checkpoints if hasattr(args, "no_tie_adaptive_proj"): # previous models defined --no-tie-adaptive-proj, so use the existence of # that option to determine if this is an "old" model checkpoint args.no_decoder_final_norm = True # old models always set this to True if args.no_tie_adaptive_proj is False: args.tie_adaptive_proj = True if hasattr(args, "decoder_final_norm"): args.no_decoder_final_norm = not args.decoder_final_norm args.dropout = getattr(args, "dropout", 0.1) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.activation_fn = getattr(args, "activation_fn", "relu") args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0) args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None) args.base_layers = getattr(args, "base_layers", 0) args.base_sublayers = getattr(args, "base_sublayers", 1) args.base_shuffle = getattr(args, "base_shuffle", False) args.add_bos_token = getattr(args, "add_bos_token", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.character_embeddings = getattr(args, "character_embeddings", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # Model training is not stable without this args.decoder_normalize_before = True args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False) args.adaptive_input = getattr(args, "adaptive_input", False) args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4) args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None) args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False) args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) args.offload_activations = getattr(args, "offload_activations", False) if args.offload_activations: args.checkpoint_activations = True
null
181,863
import math import warnings import torch import torch.distributed as dist from fairseq.utils import multi_tensor_l2norm_available, multi_tensor_total_norm def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor: def clip_grad_norm_( params, max_norm, moe_expert_count, aggregate_norm_fn=None ) -> torch.Tensor: def grad_exists(p): return p is not None and getattr(p, "grad", None) is not None if isinstance(params, torch.Tensor): params = [params] params = list(params) params = list(filter(grad_exists, params)) grads, expert_grads, base_expert_grads, sharded_grads = [], [], [], [] denom = math.sqrt(max(dist.get_global_world_size(), moe_expert_count)) for p in params: if hasattr(p, "expert"): expert_grads.append(p.grad.detach() / denom) elif hasattr(p, "base_expert"): base_expert_grads.append(p.grad.detach()) elif hasattr(p, "_is_sharded"): sharded_grads.append(p.grad.detach()) else: grads.append(p.grad.detach()) if len(grads) == 0: if len(params) > 0: total_norm = params[0].new_tensor(0.0) else: total_norm = torch.tensor(0.0) elif len(grads) == 1: total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) else: if multi_tensor_l2norm_available: total_norm = multi_tensor_total_norm(grads) else: if torch.cuda.is_available(): warnings.warn( "amp_C fused kernels unavailable, disabling multi_tensor_l2norm; " "you may get better performance by installing NVIDIA's apex library" ) device = torch.cuda.current_device() elif grads[0].device.type == "xla": device = grads[0].device else: device = torch.device("cpu") total_norm = torch.norm( torch.stack( [torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads] ) ) # calculate split_norm and all_reduce with other workers norms = [total_norm] for split_grads in [expert_grads, sharded_grads]: if len(split_grads) == 0: continue split_norm = torch.norm( torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in split_grads]) ) if dist.is_initialized(): split_norm.pow_(2) dist.all_reduce(split_norm) split_norm.sqrt_() norms.append(split_norm) if len(norms) > 1: total_norm = torch.norm(torch.stack(norms)) if aggregate_norm_fn is not None: total_norm = aggregate_norm_fn(total_norm) if max_norm > 0: max_norm = float(max_norm) clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1) for g in grads + expert_grads + sharded_grads + base_expert_grads: g.mul_(clip_coef) return total_norm
null
181,864
import torch import torch.nn as nn import torch.nn.functional as F from apex.normalization import FusedLayerNorm as LayerNorm class set_torch_seed(object): def __init__(self, seed): assert isinstance(seed, int) self.rng_state = self.get_rng_state() torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def get_rng_state(self): state = {"torch_rng_state": torch.get_rng_state()} if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(self, state): torch.set_rng_state(state["torch_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) def __enter__(self): return self def __exit__(self, *exc): self.set_rng_state(self.rng_state) class FeedForwardNetwork(nn.Module): def __init__( self, embed_dim, ffn_dim, activation_fn, dropout, activation_dropout, subln=False, ): super().__init__() self.embed_dim = embed_dim self.activation_fn = get_activation_fn(activation=str(activation_fn)) self.activation_dropout_module = torch.nn.Dropout( activation_dropout, inplace=True ) self.dropout_module = torch.nn.Dropout(dropout, inplace=True) self.fc1 = nn.Linear(self.embed_dim, ffn_dim) self.fc2 = nn.Linear(ffn_dim, self.embed_dim) self.ffn_layernorm = LayerNorm(ffn_dim) if subln else None def reset_parameters(self): self.fc1.reset_parameters() self.fc2.reset_parameters() if self.ffn_layernorm is not None: self.ffn_layernorm.reset_parameters() def forward(self, x): x_shape = x.shape x = x.reshape(-1, x.size(-1)) x = self.fc1(x) x = self.activation_fn(x.float()).type_as(x) x = self.activation_dropout_module(x) if self.ffn_layernorm is not None: x = self.ffn_layernorm(x) x = self.fc2(x) x = x.view(x_shape) x = self.dropout_module(x) return x def make_experts(args, embed_dim, expert_ffn_dim): world_size = ( 1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size() ) expert_list = [] ddp_rank = args.ddp_rank start_seed = torch.randint(1000000, (1,)).item() # at least as many experts than gpus if args.moe_expert_count >= world_size: assert ( args.moe_expert_count % world_size == 0 ), f"{args.moe_expert_count}, {world_size}" local_moe_expert_count = args.moe_expert_count // world_size for i in range(local_moe_expert_count): with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i): expert_list.append( FeedForwardNetwork( embed_dim, expert_ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.subln, ) ) else: assert ( world_size % args.moe_expert_count == 0 ), f"{world_size}, {args.moe_expert_count}" with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count): expert_list.append( FeedForwardNetwork( embed_dim, expert_ffn_dim, args.activation_fn, args.dropout, args.activation_dropout, args.subln, ) ) experts = nn.ModuleList(expert_list) return experts
null
181,865
import torch import torch.nn as nn import torch.nn.functional as F from apex.normalization import FusedLayerNorm as LayerNorm def get_activation_fn(activation): if activation == "relu": return F.relu elif activation == "gelu": return F.gelu else: raise NotImplementedError
null
181,866
import math import torch import torch.nn.functional as F from apex.normalization import FusedLayerNorm as LayerNorm from torch import nn from diffusers.models.attention_processor import LoRALinearLayer from .multiway_network import MultiwayWrapper from xformers.ops import memory_efficient_attention, LowerTriangularMask, MemoryEfficientAttentionCutlassOp def rotate_every_two(x): x1 = x[:, :, ::2] x2 = x[:, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\ def duplicate_interleave(m): """ A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. """ dim0 = m.shape[0] m = m.view(-1, 1) # flatten the matrix m = m.repeat(1, 2) # repeat all elements into the 2nd dimension m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy return m def apply_rotary_pos_emb(x, sin, cos, scale=1): sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos)) # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2) return (x * cos) + (rotate_every_two(x) * sin)
null
181,867
import logging import time from typing import Any, Tuple, cast import torch import torch.distributed as dist from torch import Tensor from torch.nn import Module, ModuleList def _find_my_group_index(grouped_ranks): my_rank = dist.get_rank() for i, group in enumerate(grouped_ranks): if my_rank in group: return i raise RuntimeError def get_moe_group(moe_expert_count): if dist.is_initialized(): if not hasattr(get_moe_group, "_moe_groups"): world_size = dist.get_world_size() if world_size <= moe_expert_count: assert moe_expert_count % world_size == 0 moe_groups = [[i] for i in range(world_size)] else: assert world_size % moe_expert_count == 0 ranks_per_group = world_size // moe_expert_count moe_groups = [ [i + j * moe_expert_count for j in range(ranks_per_group)] for i in range(moe_expert_count) ] get_moe_group._moe_group_idx = moe_groups get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups] my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx) return get_moe_group._moe_groups[my_group_idx]
null
181,868
import logging import time from typing import Any, Tuple, cast import torch import torch.distributed as dist from torch import Tensor from torch.nn import Module, ModuleList def _find_my_group_index(grouped_ranks): def get_all2all_group(moe_expert_count): if dist.is_initialized(): if not hasattr(get_all2all_group, "_all2all_groups"): world_size = dist.get_world_size() # more experts than world size if world_size <= moe_expert_count: assert moe_expert_count % world_size == 0 all2all_groups = [[i for i in range(world_size)]] # larger world than num experts else: assert world_size % moe_expert_count == 0 ranks_per_group = world_size // moe_expert_count all2all_groups = [ [i * moe_expert_count + j for j in range(moe_expert_count)] for i in range(ranks_per_group) ] get_all2all_group._all2all_group_idx = all2all_groups get_all2all_group._all2all_groups = [ dist.new_group(g) for g in all2all_groups ] my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx) return get_all2all_group._all2all_groups[my_group_idx]
null
181,871
import copy import torch import torch.nn as nn class MultiwayNetwork(nn.Module): def __init__(self, module, dim=0): super().__init__() self.dim = dim self.A = module self.B = copy.deepcopy(module) self.B.reset_parameters() self.split_position = -1 def forward(self, x, **kwargs): if self.split_position == -1: return self.A(x, **kwargs) if self.split_position == 0: return self.B(x, **kwargs) x1, x2 = torch.split( x, [self.split_position, x.size(self.dim) - self.split_position], dim=self.dim, ) # x1, x2 = x[:self.split_position], x[self.split_position:] y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs) return torch.cat([y1, y2], dim=self.dim) def MultiwayWrapper(args, module, dim=0): if args.multiway: return MultiwayNetwork(module, dim=dim) return module
null
181,873
import numpy as np from scipy.optimize import minimize import torch import torch.nn as nn def fixed_pos_embedding(x): seq_len, dim = x.shape inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim)) sinusoid_inp = ( torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x) ) return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
null
181,874
import torch.nn as nn from torchscale.component.multihead_attention import MultiheadAttention from torchscale.component.multiway_network import MultiwayNetwork def init_bert_params(module): def normal_(data): data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): if isinstance(module.q_proj, MultiwayNetwork): normal_(module.q_proj.A.weight.data) normal_(module.q_proj.B.weight.data) normal_(module.k_proj.A.weight.data) normal_(module.k_proj.B.weight.data) normal_(module.v_proj.A.weight.data) normal_(module.v_proj.B.weight.data) else: normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
null
181,875
from typing import Any, List, Optional, Sequence, Union, Tuple import torch from PIL import Image from torch import Tensor from torchmetrics import Metric from torchmetrics.functional.multimodal.clip_score import _get_model_and_processor from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE, _TRANSFORMERS_AVAILABLE from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE from typing_extensions import Literal _DEFAULT_MODEL: str = "openai/clip-vit-large-patch14" def _download_clip() -> None: _CLIPModel.from_pretrained(_DEFAULT_MODEL) _CLIPProcessor.from_pretrained(_DEFAULT_MODEL)
null
181,876
import os import torch from PIL import Image from accelerate import Accelerator from omegaconf import OmegaConf from torch.nn.utils.rnn import pad_sequence from torchmetrics.multimodal.clip_score import CLIPScore as CLIP_TScore from tqdm import tqdm from app_model import AppModel from app_utils import randomize_seed_fn from eval.clip_score import CLIPIScore as CLIP_IScore from eval.clip_score import CLIPTScore as CLIP_TScore from eval.dino_score import DINOScore as DINO_Score from eval.dreambench_prompts import * from fairseq import options from fairseq.dataclass.utils import convert_namespace_to_omegaconf def image_collate_fn(batch): image = [x[0] for x in batch] real_image = [x[1] for x in batch] prompt = [x[2] for x in batch] return image, real_image, prompt
null
181,877
import os import torch from PIL import Image from accelerate import Accelerator from omegaconf import OmegaConf from torch.nn.utils.rnn import pad_sequence from torchmetrics.multimodal.clip_score import CLIPScore as CLIP_TScore from tqdm import tqdm from app_model import AppModel from app_utils import randomize_seed_fn from eval.clip_score import CLIPIScore as CLIP_IScore from eval.clip_score import CLIPTScore as CLIP_TScore from eval.dino_score import DINOScore as DINO_Score from eval.dreambench_prompts import * from fairseq import options from fairseq.dataclass.utils import convert_namespace_to_omegaconf def dreambench_collate_fn(batch): src_tokens = [x[0] for x in batch] gpt_img_src_tokens = torch.cat([x[1] for x in batch]) img_gpt_input_mask = [x[2] for x in batch] negative_tokens = batch[0][3].unsqueeze(0) src_tokens = pad_sequence(src_tokens, batch_first=True, padding_value=1) img_gpt_input_mask = pad_sequence(img_gpt_input_mask, batch_first=True, padding_value=0) object_name = [x[4] for x in batch] object_id = [x[5] for x in batch] real_image = [x[6] for x in batch] prompt = [x[7] for x in batch] return src_tokens, gpt_img_src_tokens, img_gpt_input_mask, negative_tokens, object_name, object_id, real_image, prompt
null
181,878
import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup import site version = write_version_py() with open("README.md") as f: readme = f.read() if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def write_version_py(): with open(os.path.join("fairseq", "version.txt")) as f: version = f.read().strip() # append latest commit hash to version string try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"]) .decode("ascii") .strip() ) version += "+" + sha[:7] except Exception: pass # write version info to fairseq/version.py with open(os.path.join("fairseq", "version.py"), "w") as f: f.write('__version__ = "{}"\n'.format(version)) return version
null
181,879
import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup import site version = write_version_py() extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} extra_packages = [] def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core>=1.0.7,<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", "bitarray", "torchaudio>=0.8.0", ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, )
null
181,880
import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup import site if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files
null
181,938
from fairseq import options def add_reranking_args(parser): group = parser.add_argument_group("Reranking") # fmt: off group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True, help='path to first model or ensemble of models for rescoring') group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False, help='path to second model or ensemble of models for rescoring') group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10, help='the number of candidate hypothesis to rescore') group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128, help='batch size for generating the nbest list') group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'], help='data subset to generate (train, valid, test)') group.add_argument('--gen-model', default=None, metavar='FILE', help='the model to generate translations') group.add_argument('-b1', '--backwards1', action='store_true', help='whether or not the first model group is backwards') group.add_argument('-b2', '--backwards2', action='store_true', help='whether or not the second model group is backwards') group.add_argument('-a', '--weight1', default=1, nargs='+', type=float, help='the weight(s) of the first model') group.add_argument('-b', '--weight2', default=1, nargs='+', type=float, help='the weight(s) of the second model, or the gen model if using nbest from interactive.py') group.add_argument('-c', '--weight3', default=1, nargs='+', type=float, help='the weight(s) of the third model') # lm arguments group.add_argument('-lm', '--language-model', default=None, metavar='FILE', help='language model for target language to rescore translations') group.add_argument('--lm-dict', default=None, metavar='FILE', help='the dict of the language model for the target language') group.add_argument('--lm-name', default=None, help='the name of the language model for the target language') group.add_argument('--lm-bpe-code', default=None, metavar='FILE', help='the bpe code for the language model for the target language') group.add_argument('--data-dir-name', default=None, help='name of data directory') group.add_argument('--lenpen', default=1, nargs='+', type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--score-dict-dir', default=None, help='the directory with dictionaries for the scoring models') group.add_argument('--right-to-left1', action='store_true', help='whether the first model group is a right to left model') group.add_argument('--right-to-left2', action='store_true', help='whether the second model group is a right to left model') group.add_argument('--post-process', '--remove-bpe', default='@@ ', help='the bpe symbol, used for the bitext and LM') group.add_argument('--prefix-len', default=None, type=int, help='the length of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--sampling', action='store_true', help='use sampling instead of beam search for generating n best list') group.add_argument('--diff-bpe', action='store_true', help='bpe for rescoring and nbest list not the same') group.add_argument('--rescore-bpe-code', default=None, help='bpe code for rescoring models') group.add_argument('--nbest-list', default=None, help='use predefined nbest list in interactive.py format') group.add_argument('--write-hypos', default=None, help='filename prefix to write hypos to') group.add_argument('--ref-translation', default=None, help='reference translation to use with nbest list from interactive.py') group.add_argument('--backwards-score-dict-dir', default=None, help='the directory with dictionaries for the backwards model,' 'if None then it is assumed the fw and backwards models share dictionaries') # extra scaling args group.add_argument('--gen-model-name', default=None, help='the name of the models that generated the nbest list') group.add_argument('--model1-name', default=None, help='the name of the set for model1 group ') group.add_argument('--model2-name', default=None, help='the name of the set for model2 group') group.add_argument('--shard-id', default=0, type=int, help='the id of the shard to generate') group.add_argument('--num-shards', default=1, type=int, help='the number of shards to generate across') group.add_argument('--all-shards', action='store_true', help='use all shards') group.add_argument('--target-prefix-frac', default=None, type=float, help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--source-prefix-frac', default=None, type=float, help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--normalize', action='store_true', help='whether to normalize by src and target len') # fmt: on return group def get_reranking_parser(default_task="translation"): parser = options.get_parser("Generation and reranking", default_task) add_reranking_args(parser) return parser
null