text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
if self.optimizer is None: decay_parameters = self.get_decay_parameter_names(opt_model) optimizer_grouped_parameters = [ { "params": [ p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) ], "weight_decay": 0.0, }, ] if self.optimizer_cls_and_kwargs is not None: optimizer_cls, optimizer_kwargs = self.optimizer_cls_and_kwargs else: optimizer_cls, optimizer_kwargs = self.get_optimizer_cls_and_kwargs(self.args, opt_model)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Overwrite `params` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for GaLore optimizer. if "params" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("params") # Overwrite `model` in case it's created by `get_optimizer_cls_and_kwargs` # e.g. for LOMO optimizer. if "model" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("model") # For layer-wise dummy optimizers we overwrite optimizer_grouped_parameters with `optimizer_dict` # to avoid arguments conflicts. if "optimizer_dict" in optimizer_kwargs: optimizer_grouped_parameters = optimizer_kwargs.pop("optimizer_dict") self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if optimizer_cls.__name__ == "Adam8bit": import bitsandbytes
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f"skipped {module}: {skipped/2**20}M params") manager.register_module_override(module, "weight", {"optim_bits": 32}) logger.debug(f"bitsandbytes: will optimize {module} in fp32") logger.info(f"skipped: {skipped/2**20}M params") if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) return self.optimizer def get_num_trainable_parameters(self): """ Get the number of trainable parameters. """ return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def get_learning_rates(self): """ Returns the learning rate of each parameter from self.optimizer. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") return [group["lr"] for group in self.optimizer.param_groups] def get_optimizer_group(self, param: Optional[Union[str, torch.nn.parameter.Parameter]] = None): """ Returns optimizer group for a parameter if given, else returns all optimizer groups for params.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Args: param (`str` or `torch.nn.parameter.Parameter`, *optional*): The parameter for which optimizer group needs to be returned. """ if self.optimizer is None: raise ValueError("Trainer optimizer is None, please make sure you have setup the optimizer before.") if param is not None: for group in self.optimizer.param_groups: if param in group["params"]: return group return [group["params"] for group in self.optimizer.param_groups] @staticmethod def get_optimizer_cls_and_kwargs( args: TrainingArguments, model: Optional[PreTrainedModel] = None ) -> Tuple[Any, Any]: """ Returns the optimizer class and optimizer parameters based on the training arguments. Args: args (`transformers.training_args.TrainingArguments`): The training arguments for the training session. """
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# parse args.optim_args optim_args = {} if args.optim_args: for mapping in args.optim_args.replace(" ", "").split(","): key, value = mapping.split("=") optim_args[key] = value optimizer_kwargs = {"lr": args.learning_rate} adam_kwargs = { "betas": (args.adam_beta1, args.adam_beta2), "eps": args.adam_epsilon, } if args.optim == OptimizerNames.ADAFACTOR: optimizer_cls = Adafactor optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim == OptimizerNames.ADAMW_HF: from .optimization import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: from torch.optim import AdamW
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: optimizer_kwargs.update({"fused": True}) elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: try: from torch_xla.amp.syncfree import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: try: from torch_npu.optim import NpuFusedAdamW
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = NpuFusedAdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import FusedAdamW from torch_npu.") elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: try: from apex.optimizers import FusedAdam
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") elif args.optim in [ OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.ADEMAMIX, OptimizerNames.ADEMAMIX_8BIT, OptimizerNames.PAGED_ADEMAMIX, OptimizerNames.PAGED_ADEMAMIX_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, OptimizerNames.RMSPROP_BNB, OptimizerNames.RMSPROP_8BIT, OptimizerNames.RMSPROP_32BIT, ]: try: from bitsandbytes.optim import AdamW, Lion, RMSprop
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
is_paged = False optim_bits = 32 optimizer_cls = None additional_optim_kwargs = adam_kwargs if "paged" in args.optim: is_paged = True if "8bit" in args.optim: optim_bits = 8 if "adam" in args.optim: optimizer_cls = AdamW elif "lion" in args.optim: optimizer_cls = Lion additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)} elif "rmsprop" in args.optim: optimizer_cls = RMSprop # Above we pass all `adam_kwargs` to the optimizer, here # we only pass `optim_args` which can be passed by the user. additional_optim_kwargs = optim_args elif "ademamix" in args.optim: if is_bitsandbytes_available() and version.parse(
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
importlib.metadata.version("bitsandbytes") ) < version.parse("0.44.0"): raise ValueError( "The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. " "Please install `bitsandbytes` >= 0.44.0." )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
from bitsandbytes.optim import AdEMAMix optimizer_cls = AdEMAMix additional_optim_kwargs = { "betas": ( float(optim_args.get("beta1", args.adam_beta1)), float(optim_args.get("beta2", args.adam_beta2)), float(optim_args.get("beta3", 0.9999)), ), "alpha": float(optim_args.get("alpha", 5.0)), "eps": float(optim_args.get("eps", args.adam_epsilon)), } if "t_alpha" in optim_args: additional_optim_kwargs["t_alpha"] = int(optim_args["t_alpha"]) if "t_beta3" in optim_args: additional_optim_kwargs["t_beta3"] = int(optim_args["t_beta3"])
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
bnb_kwargs = {"optim_bits": optim_bits} if "rmsprop" not in args.optim: bnb_kwargs["is_paged"] = is_paged optimizer_kwargs.update(additional_optim_kwargs) optimizer_kwargs.update(bnb_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!") if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.41.1"): logger.warning( "You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. " "It is recommended to update your version as a major bug has been fixed in 8-bit optimizers." ) elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: try: from torchdistx.optimizers import AnyPrecisionAdamW
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = AnyPrecisionAdamW optimizer_kwargs.update(adam_kwargs)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. optimizer_kwargs.update( { "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), "compensation_buffer_dtype": getattr( torch, optim_args.get("compensation_buffer_dtype", "bfloat16") ), } ) except ImportError: raise ValueError("Please install https://github.com/pytorch/torchdistx") elif args.optim == OptimizerNames.SGD: optimizer_cls = torch.optim.SGD elif args.optim == OptimizerNames.ADAGRAD:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = torch.optim.Adagrad elif args.optim == OptimizerNames.RMSPROP: optimizer_cls = torch.optim.RMSprop elif args.optim in [ OptimizerNames.GALORE_ADAMW, OptimizerNames.GALORE_ADAMW_8BIT, OptimizerNames.GALORE_ADAFACTOR, OptimizerNames.GALORE_ADAMW_LAYERWISE, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE, ]: if not is_galore_torch_available(): raise ImportError( "You need to install `galore_torch` in order to use GaLore optimizers" " install it with `pip install git+https://github.com/jiaweizzhao/GaLore`" ) from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
is_layerwise = args.optim.lower().endswith("layerwise") if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED: raise NotImplementedError("Layer-wise GaLore does not support DDP at this time") optimizer_mapping = { OptimizerNames.GALORE_ADAMW: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor, } optimizer_cls = optimizer_mapping[args.optim] if args.optim_target_modules is None: raise ValueError( "You need to define a `optim_target_modules` in order to properly use GaLore optimizers" )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if not isinstance(args.optim_target_modules, (list, str)): raise ValueError( f"`optim_target_modules` has to be a list of strings, a string corresponding to a regex, or a specific module or 'all-linear', you passed {args.optim_target_modules}" ) if model is None: raise ValueError("You need to pass a model in order to correctly initialize a GaLore optimizer.") logger.warning( "Activated GaLoRE fine-tuning, depending on your model size and hardware, the training might take a while before starting. Please be patient !" ) all_linear = ( isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace("_", "-") == "all-linear" )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
galore_params = [] galore_params_names = [] for module_name, module in model.named_modules(): target_module_exists, is_regex = check_target_module_exists( args.optim_target_modules, module_name, return_is_regex=True ) if not isinstance(module, nn.Linear): # Warn in case we match but it's not a linear layer if target_module_exists and not is_regex: logger.warning( f"{module_name} has been matched but ignored as GaLore only supports linear layers. Please double check your `optim_target_modules`!" ) continue if not target_module_exists and not all_linear: continue galore_params.append(module.weight) galore_params_names.append(module_name + ".weight")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if len(galore_params) == 0: raise ValueError( f"None of the target modules were found! ({args.optim_target_modules}). Please make sure to pass a valid `target_modules`." ) non_galore_params = [p for n, p in model.named_parameters() if n not in galore_params_names] galore_optim_kwargs = { "rank": int(optim_args.pop("rank", 128)), "update_proj_gap": int(optim_args.pop("update_proj_gap", 200)), "scale": float(optim_args.pop("scale", 0.25)), "proj_type": optim_args.pop("proj_type", "std"), } # The default args are from the official repository: https://github.com/jiaweizzhao/GaLore param_groups = [ {"params": non_galore_params}, {"params": galore_params, **galore_optim_kwargs}, ]
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if is_layerwise: # For layer-wise optimizers, the optimization step is done through post accumulation # gradient hooks. The trick is to first attach these hooks to the model parameters then # create a dummy optimizer that will perform no-ops in the Trainer. # See the original implementation or the nice implementation from @hiyouga # here: https://github.com/hiyouga/LLaMA-Factory/commit/8664262cde3919e10eaecbd66e8c5d356856362e#diff-ebe08ab14496dfb9e06075f0fdd36799ef6d1535cc4dd4715b74c4e3e06fe3ba if args.gradient_accumulation_steps != 1: raise ValueError("Layerwise GaLoRE optimizer do not support gradient accumulation !")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_dict = {} for param in non_galore_params: param_groups = [{"params": [param]}] optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) for param in galore_params: param_groups = [{"params": [param], **galore_optim_kwargs}] optimizer_dict[param] = optimizer_cls(param_groups, **optimizer_kwargs) def optimizer_hook(param): if param.grad is not None: optimizer_dict[param].step() optimizer_dict[param].zero_grad() for param in model.parameters(): if param.requires_grad: param.register_post_accumulate_grad_hook(optimizer_hook) optimizer_cls = LayerWiseDummyOptimizer optimizer_kwargs.update({"optimizer_dict": optimizer_dict}) optimizer_kwargs.update({"params": param_groups})
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if args.optim == OptimizerNames.GALORE_ADAFACTOR: optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: if not is_lomo_available(): raise ImportError( "You need to install `lomo_optim` in order to use LOMO optimizers" " install it with `pip install lomo-optim`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use LOMO optimizers") if model is None: raise ValueError("You need to pass a `model` in order to correctly initialize a LOMO optimizer.") from lomo_optim import AdaLomo, Lomo if "ada" in args.optim: optimizer_cls = AdaLomo else: optimizer_cls = Lomo
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_kwargs.update({"model": model}) elif args.optim == OptimizerNames.GROKADAMW: if not is_grokadamw_available(): raise ValueError("Please install grokadamw with `pip install grokadamw`") from grokadamw import GrokAdamW
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = GrokAdamW optimizer_kwargs.update( { "alpha_init": float(optim_args.get("alpha_init", 0.98)), "lamb": float(optim_args.get("lamb", 2.0)), "gamma": float(optim_args.get("gamma", 0.1)), "grokking_signal_decay_rate": float(optim_args.get("grokking_signal_decay_rate", 0.1)), "gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)), } ) elif args.optim == OptimizerNames.ADAMW_TORCH_4BIT: if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse( "0.4.0" ): raise ImportError( "You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers." "Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao" )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if version.parse(importlib.metadata.version("torch")) <= version.parse("2.4"): raise ImportError( "You need to have `torch>2.4` in order to use torch 4-bit optimizers. " "Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly." ) from torchao.prototype.low_bit_optim import AdamW4bit
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_cls = AdamW4bit optimizer_kwargs.update(adam_kwargs) elif args.optim in [ OptimizerNames.SCHEDULE_FREE_ADAMW, OptimizerNames.SCHEDULE_FREE_SGD, ]: if not is_schedulefree_available(): raise ImportError( "You need to install `schedulefree` in order to use schedulefree optimizers" " install it with `pip install schedulefree`" ) if not is_accelerate_available("0.30.0"): raise ImportError("You need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers") from schedulefree import AdamWScheduleFree, SGDScheduleFree
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
additional_optim_kwargs = {} if args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW: optimizer_cls = AdamWScheduleFree additional_optim_kwargs = adam_kwargs elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD: optimizer_cls = SGDScheduleFree else: raise ValueError("Invalid schedulefree optimizer") additional_optim_kwargs["weight_decay"] = args.weight_decay additional_optim_kwargs["warmup_steps"] = args.warmup_steps additional_optim_kwargs.update( { "weight_lr_power": float(optim_args.get("weight_lr_power", 2.0)), "r": float(optim_args.get("r", 0.0)), } ) optimizer_kwargs.update(additional_optim_kwargs) else: raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") return optimizer_cls, optimizer_kwargs
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): """ Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument. Args: num_training_steps (int): The number of training steps to do. """ if self.lr_scheduler is None: self.lr_scheduler = get_scheduler( self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs, ) self._created_lr_scheduler = True return self.lr_scheduler
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When dataloader.dataset does not exist or has no length, estimates as best it can """ try: dataset = dataloader.dataset # Special case for IterableDatasetShard, we need to dig deeper if isinstance(dataset, IterableDatasetShard): return len(dataloader.dataset.dataset) return len(dataloader.dataset) except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader return len(dataloader) * self.args.per_device_train_batch_size
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
@staticmethod def num_tokens(train_dl: DataLoader, max_steps: Optional[int] = None) -> int: """ Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader. """ train_tokens = 0 try: for batch in train_dl: tokens = batch["input_ids"].numel() if max_steps is not None: return tokens * max_steps train_tokens += tokens except KeyError: logger.warning("Cannot get num_tokens from dataloader") return train_tokens def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): """HP search setup code""" self._trial = trial
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.hp_search_backend is None or trial is None: return if self.hp_search_backend == HPSearchBackend.OPTUNA: params = self.hp_space(trial) elif self.hp_search_backend == HPSearchBackend.RAY: params = trial params.pop("wandb", None) elif self.hp_search_backend == HPSearchBackend.SIGOPT: params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} elif self.hp_search_backend == HPSearchBackend.WANDB: params = trial
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
for key, value in params.items(): if not hasattr(self.args, key): logger.warning( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" " `TrainingArguments`." ) continue old_attr = getattr(self.args, key, None) # Casting value to the proper type if old_attr is not None: value = type(old_attr)(value)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info(f"Trial: {trial.params}") if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f"SigOpt Assignments: {trial.assignments}") if self.hp_search_backend == HPSearchBackend.WANDB: logger.info(f"W&B Sweep parameters: {trial}") if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") self.accelerator.free_memory() # Rebuild the deepspeed config to reflect the updated training parameters from accelerate.utils import DeepSpeedPlugin from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) self.args.hf_deepspeed_config.trainer_config_process(self.args) self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) # From 1.0 on, we need to fully wipe the DS plugin when doing sweeps. # Simply calling `_reset_state` is enough and doesn't need a version pin. AcceleratorState()._reset_state() self.create_accelerator_and_postprocess() def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): if self.hp_search_backend is None or trial is None: return metrics = metrics.copy() self.objective = self.compute_objective(metrics) if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if hasattr(trial, "study") and not trial.study._is_multi_objective(): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train with tempfile.TemporaryDirectory() as temp_checkpoint_dir: checkpoint = None if self.control.should_save: self._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir) checkpoint = ray.train.Checkpoint.from_directory(temp_checkpoint_dir) metrics["objective"] = self.objective ray.train.report(metrics, checkpoint=checkpoint)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _tune_save_checkpoint(self, checkpoint_dir: str): output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir, _internal_call=True) if self.args.should_save: # Update the `TrainerControl` state to where we are currently self.state.stateful_callbacks["TrainerControl"] = self.control.state() self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def call_model_init(self, trial=None): model_init_argcount = number_of_arguments(self.model_init) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def torch_jit_model_eval(self, model, dataloader, training=False): if not training: if dataloader is None: logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") return model example_batch = next(iter(dataloader)) example_batch = self._prepare_inputs(example_batch) try: jit_model = copy.copy(model) jit_model.eval() original_forward = jit_model.__dict__.pop("_original_forward", None) # remove mixed precision hooks from the model if original_forward: jit_model.forward = original_forward autocast_handler = AutocastKwargs(cache_enabled=False) with self.accelerator.autocast(autocast_handler=autocast_handler), torch.no_grad(): if version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.0.0"):
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if isinstance(example_batch, dict): jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) else: jit_model = torch.jit.trace( jit_model, example_kwarg_inputs={key: example_batch[key] for key in example_batch}, strict=False, ) else: jit_inputs = [] for key in example_batch: example_tensor = torch.ones_like(example_batch[key]) jit_inputs.append(example_tensor) jit_inputs = tuple(jit_inputs) jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) jit_model = torch.jit.freeze(jit_model) with torch.no_grad():
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
jit_model(**example_batch) jit_model(**example_batch) model = jit_model self.use_cpu_amp = False except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: logger.warning(f"failed to use PyTorch jit mode due to: {e}.")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
return model def ipex_optimize_model(self, model, training=False, dtype=torch.float32): if not is_ipex_available(): raise ImportError( "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" " to https://github.com/intel/intel-extension-for-pytorch." ) import intel_extension_for_pytorch as ipex
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if not training: model.eval() dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) else: if not model.training: model.train() model, self.optimizer = ipex.optimize( model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" ) return model def compare_trainer_and_checkpoint_args(self, training_args, trainer_state): attributes_map = { "logging_steps": "logging_steps", "eval_steps": "eval_steps", "save_steps": "save_steps", }
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
has_warning = False warning_str = "Warning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: " for arg_attr, state_attr in attributes_map.items(): arg_value = getattr(training_args, arg_attr, None) state_value = getattr(trainer_state, state_attr, None) if arg_value is not None and state_value is not None and arg_value != state_value: warning_str += f"\n\t{arg_attr}: {arg_value} (from args) != {state_value} (from trainer_state.json)" has_warning = True # train bs is special as we need to account for multi-GPU train_bs_args = training_args.per_device_train_batch_size train_bs_state = trainer_state.train_batch_size // max(1, training_args.n_gpu)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if train_bs_args != train_bs_state: warning_str += f"\n\tper_device_train_batch_size: {train_bs_args} (from args) != {train_bs_state} (from trainer_state.json)" has_warning = True if has_warning: logger.warning_once(warning_str) def _wrap_model(self, model, training=True, dataloader=None): if self.args.use_ipex: dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 model = self.ipex_optimize_model(model, training, dtype=dtype) if is_sagemaker_mp_enabled(): # Wrapping the base model twice in a DistributedModel will raise an error. if isinstance(self.model_wrapped, smp.model.DistributedModel): return self.model_wrapped return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again if self.accelerator.unwrap_model(model) is not model: return model # Mixed precision training with apex (torch < 1.6) if self.use_apex and training: model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) # Multi-gpu training (should be after apex fp16 initialization) / 8bit models does not support DDP if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. if not training: return model # Distributed training (should be after apex fp16 initialization) # Distributed training using PyTorch FSDP if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import ( size_based_auto_wrap_policy, transformer_auto_wrap_policy, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.is_fsdp_xla_v2_enabled: from torch_xla.experimental.spmd_fully_sharded_data_parallel import ( SpmdFullyShardedDataParallel as FSDPv2, ) except ImportError: raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get( "transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.fsdp_config["min_num_params"] > 0: auto_wrap_policy = functools.partial( size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"] ) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
auto_wrap_policy = functools.partial( transformer_auto_wrap_policy, # Transformer layer class to wrap transformer_layer_cls=transformer_cls_to_wrap, ) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: if model.config.use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) model.config.use_cache = False # Apply gradient checkpointing to auto-wrapped sub-modules if specified def auto_wrapper_callable(m, *args, **kwargs): target_cls = FSDP if not self.is_fsdp_xla_v2_enabled else FSDPv2 return target_cls(checkpoint_module(m), *args, **kwargs)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Wrap the base model with an outer FSDP wrapper if self.is_fsdp_xla_v2_enabled: def shard_output(output, mesh): from .modeling_outputs import CausalLMOutputWithPast real_output = None if isinstance(output, torch.Tensor): real_output = output elif isinstance(output, tuple): real_output = output[0] elif isinstance(output, CausalLMOutputWithPast): real_output = output.logits if real_output is None: raise ValueError("Something went wrong, the output of the model shouldn't be `None`") xs.mark_sharding(real_output, mesh, ("fsdp", None, None))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.model = model = FSDPv2( model, shard_output=shard_output, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, ) else: self.model = model = FSDP( model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs, ) # Patch `xm.optimizer_step` should not reduce gradients in this case, # as FSDP does not need gradient reduction over sharded parameters. def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): model = nn.parallel.DistributedDataParallel( model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] ) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: if is_torch_neuroncore_available(): return model kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): # find_unused_parameters breaks checkpointing as per # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing else: kwargs["find_unused_parameters"] = True
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.ddp_bucket_cap_mb is not None: kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def train( self, resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs, ): """ Main training entry point.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
Args: resume_from_checkpoint (`str` or `bool`, *optional*): If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): The trial run or the hyperparameter dictionary for hyperparameter search. ignore_keys_for_eval (`List[str]`, *optional*) A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions for evaluation during the training. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments used to hide deprecated arguments """ if resume_from_checkpoint is False:
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
resume_from_checkpoint = None
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# memory metrics - must set up as early as possible self._memory_tracker.start() args = self.args self.is_in_train = True # Attach NEFTune hooks if necessary if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) # do_train is not a reliable argument, as it might not be set and .train() still called, so # the following is a workaround: if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train and not self.is_model_parallel: self._move_model_to_device(self.model, args.device)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if "model_path" in kwargs: resume_from_checkpoint = kwargs.pop("model_path") warnings.warn( "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " "instead.", FutureWarning, ) if len(kwargs) > 0: raise TypeError(f"train() got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") # This might change the seed so needs to run first. self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Model re-init model_reloaded = False if self.model_init is not None: # Seed must be set before instantiating the model when using model_init. enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True # Reinitializes optimizer and scheduler self.optimizer, self.lr_scheduler = None, None # Load potential model checkpoint if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if resume_from_checkpoint is not None: if not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint) # In case of repeating the find_executable_batch_size, set `self._train_batch_size` properly state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) if state.train_batch_size is not None: self._train_batch_size = state.train_batch_size # If model was re-initialized, put it on the right device and update self.model_wrapped if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
inner_training_loop = find_executable_batch_size( self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size ) if args.push_to_hub: try: # Disable progress bars when uploading models during checkpoints to avoid polluting stdout hf_hub_utils.disable_progress_bars() return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self.accelerator.free_memory() self._train_batch_size = batch_size if self.args.auto_find_batch_size: if self.state.train_batch_size != self._train_batch_size: from accelerate.utils import release_memory (self.model_wrapped,) = release_memory(self.model_wrapped) self.model_wrapped = self.model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Check for DeepSpeed *after* the intial pass and modify the config if self.is_deepspeed_enabled: # Temporarily unset `self.args.train_batch_size` original_bs = self.args.per_device_train_batch_size self.args.per_device_train_batch_size = self._train_batch_size // max(1, self.args.n_gpu) self.propagate_args_to_deepspeed(True) self.args.per_device_train_batch_size = original_bs self.state.train_batch_size = self._train_batch_size logger.debug(f"Currently training with a batch size of: {self._train_batch_size}") # Data loader and number of training steps train_dataloader = self.get_train_dataloader() if self.is_fsdp_xla_v2_enabled: train_dataloader = tpu_spmd_dataloader(train_dataloader)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = self._train_batch_size * args.gradient_accumulation_steps * args.world_size
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
len_dataloader = None num_train_tokens = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = (
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps ) else: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torchrun or torch.distributed.launch (deprecated))." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled # We need to reset the scheduler, as its parameters may be different on subsequent calls if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.is_deepspeed_enabled: self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState( stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ] ) self.state.is_hyper_param_search = trial is not None self.state.train_batch_size = self._train_batch_size
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(max_steps * args.save_steps) else: self.state.save_steps = args.save_steps # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
model = self._wrap_model(self.model_wrapped) # as the model is wrapped, don't use `accelerator.prepare` # this is for unhandled cases such as # FSDP-XLA, SageMaker MP/DP, DataParallel, IPEX use_accelerator_prepare = True if model is self.model else False if use_accelerator_prepare and self.is_fsdp_enabled: # In case of auto_find_batch_size=True # Remove FSDP wrapping from sub-models. self.model = unwrap_model(self.model, recursive=True) if delay_optimizer_creation: if use_accelerator_prepare: # configure fsdp plugin for qlora if any self._fsdp_qlora_plugin_updates() if self.accelerator.mixed_precision != "fp8": self.model = self.accelerator.prepare(self.model) self.create_optimizer_and_scheduler(num_training_steps=max_steps)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# prepare using `accelerator` prepare if use_accelerator_prepare: self.model.train() if hasattr(self.lr_scheduler, "step"): if self.use_apex: model = self.accelerator.prepare(self.model) else: model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) else: # to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config. model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) elif self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: # In this case we are in DDP + LOMO, which should be supported self.optimizer = self.accelerator.prepare(self.optimizer) if self.is_fsdp_enabled: self.model = self.model_wrapped = model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped # ckpt loading if resume_from_checkpoint is not None: if self.is_deepspeed_enabled: deepspeed_load_checkpoint( self.model_wrapped, resume_from_checkpoint, load_module_strict=not _is_peft_model(self.model) ) elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), # FSDP(Transformers Model), Dynamo Optimized Module(Transformers Model) etc.
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples:,}") logger.info(f" Num Epochs = {num_train_epochs:,}") logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}") if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps:,}") logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) self.compare_trainer_and_checkpoint_args(self.args, self.state) self._load_callback_state() epochs_trained = int(self.state.global_step // num_update_steps_per_epoch) if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info(f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first" f" {steps_trained_in_current_epoch} batches in the first epoch." )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero()
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() grad_norm: Optional[float] = None self.control = self.callback_handler.on_train_begin(args, self.state, self.control) if args.eval_on_start: self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True) for epoch in range(epochs_trained, num_train_epochs): epoch_dataloader = train_dataloader if hasattr(epoch_dataloader, "set_epoch"): epoch_dataloader.set_epoch(epoch) # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
steps_in_epoch = ( len(epoch_dataloader) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False steps_skipped = 0 if steps_trained_in_current_epoch > 0: epoch_dataloader = skip_first_batches(epoch_dataloader, steps_trained_in_current_epoch) steps_skipped = steps_trained_in_current_epoch steps_trained_in_current_epoch = 0 rng_to_sync = True
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
step = -1 epoch_iterator = iter(epoch_dataloader) # We chunkify the epoch iterator into gradient accumulation steps `n` batches remainder = num_examples % args.gradient_accumulation_steps if remainder == 0: remainder = args.gradient_accumulation_steps update_step = -1 total_updates = steps_in_epoch // args.gradient_accumulation_steps + 1 for _ in range(total_updates): update_step += 1 num_batches = args.gradient_accumulation_steps if update_step != (total_updates - 1) else remainder batch_samples, num_items_in_batch = self.get_batch_samples(epoch_iterator, num_batches) for i, inputs in enumerate(batch_samples): step += 1 do_sync_step = (step + 1) % args.gradient_accumulation_steps == 0 or (step + 1) == steps_in_epoch
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Since we perform prefetching, we need to manually set sync_gradients if not do_sync_step: self.accelerator.gradient_state._set_sync_gradients(False) else: self.accelerator.gradient_state._set_sync_gradients(True)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if self.args.include_num_input_tokens_seen: main_input_name = getattr(self.model, "main_input_name", "input_ids") if main_input_name not in inputs: logger.warning( "Tried to track the number of tokens seen, however the current model is " "not configured properly to know what item is the input. To fix this, add " "a `main_input_name` attribute to the model class you are using." ) else: input_tokens = inputs[main_input_name].numel() input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64) self.state.num_input_tokens_seen += ( self.accelerator.gather(input_tokens).sum().cpu().item() )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# We explicitly want to avoid relying on `accelerator.accumulate` for generation training context = ( functools.partial(self.accelerator.no_sync, model=model) if i != len(batch_samples) - 1 and self.accelerator.distributed_type != DistributedType.DEEPSPEED else contextlib.nullcontext ) with context(): tr_loss_step = self.training_step(model, inputs, num_items_in_batch)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if ( args.logging_nan_inf_filter and not is_torch_xla_available() and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss = tr_loss + tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: if tr_loss.device != tr_loss_step.device: raise ValueError( f"Calculated loss must be on the original device: {tr_loss.device} but device in use is {tr_loss_step.device}" ) tr_loss = tr_loss + tr_loss_step self.current_flos += float(self.floating_point_ops(inputs))
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if do_sync_step: # Since we perform prefetching, we need to manually set sync_gradients to True self.accelerator.gradient_state._set_sync_gradients(True) # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0: # deepspeed does its own clipping
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if is_sagemaker_mp_enabled() and args.fp16: _grad_norm = self.optimizer.clip_master_grads(args.max_grad_norm) elif self.use_apex: # Revert to normal clipping otherwise, handling Apex or full precision _grad_norm = nn.utils.clip_grad_norm_( amp.master_params(self.optimizer), args.max_grad_norm, ) else: _grad_norm = self.accelerator.clip_grad_norm_( model.parameters(), args.max_grad_norm, )
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
if ( is_accelerate_available() and self.accelerator.distributed_type == DistributedType.DEEPSPEED ): grad_norm = model.get_global_grad_norm() # In some cases the grad norm may not return a float if hasattr(grad_norm, "item"): grad_norm = grad_norm.item() else: grad_norm = _grad_norm self.control = self.callback_handler.on_pre_optimizer_step(args, self.state, self.control) self.optimizer.step() self.control = self.callback_handler.on_optimizer_step(args, self.state, self.control)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
optimizer_was_run = not self.accelerator.optimizer_step_was_skipped if optimizer_was_run: # Delay optimizer scheduling until metrics are generated if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate( tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time ) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# PyTorch/XLA relies on the data loader to insert the mark_step for # each step. Since we are breaking the loop early, we need to manually # insert the mark_step here. if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break # We also need to break out of the nested loop if self.control.should_epoch_stop or self.control.should_training_stop: if is_torch_xla_available(): xm.mark_step() break if step < 0: logger.warning( "There seems not to be a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_xla_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past")
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sure the model has been saved by process 0. if is_torch_xla_available(): xm.rendezvous("load_best_model_at_end") elif args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() effective_global_step = max(self.state.global_step, 0.001) # Avoid ZeroDivisionError train_loss = self._total_loss_scalar / effective_global_step
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
metrics = speed_metrics( "train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps, num_tokens=num_train_tokens, ) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
# Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) self.control = self.callback_handler.on_train_end(args, self.state, self.control) # Wait for the checkpoint to be uploaded. self._finish_current_push() # After training we make sure to retrieve back the original forward pass method # for the embedding layer by removing the forward post hook. if self.neftune_noise_alpha is not None: self._deactivate_neftune(self.model)
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): if self.hp_search_backend is not None and trial is not None: if self.hp_search_backend == HPSearchBackend.OPTUNA: run_id = trial.number elif self.hp_search_backend == HPSearchBackend.RAY: import ray.train run_id = ray.train.get_context().get_trial_id() elif self.hp_search_backend == HPSearchBackend.SIGOPT: run_id = trial.id elif self.hp_search_backend == HPSearchBackend.WANDB: import wandb run_id = wandb.run.id run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" run_dir = os.path.join(self.args.output_dir, run_name) else: run_dir = self.args.output_dir return run_dir
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py
def _load_from_checkpoint(self, resume_from_checkpoint, model=None): if model is None: model = self.model
244
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/trainer.py