text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
ignore_data_skip: bool = field(
default=False,
metadata={
"help": (
"When resuming training, whether or not to skip the first epochs and batches to get to the same"
" training data."
)
},
)
fsdp: Optional[Union[List[FSDPOption], str]] = field(
default="",
metadata={
"help": (
"Whether or not to use PyTorch Fully Sharded Data Parallel (FSDP) training (in distributed training"
" only). The base option should be `full_shard`, `shard_grad_op` or `no_shard` and you can add"
" CPU-offload to `full_shard` or `shard_grad_op` like this: full_shard offload` or `shard_grad_op"
" offload`. You can add auto-wrap to `full_shard` or `shard_grad_op` with the same syntax: full_shard"
" auto_wrap` or `shard_grad_op auto_wrap`."
),
},
)
fsdp_min_num_params: int = field(
default=0, | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
metadata={
"help": (
"This parameter is deprecated. FSDP's minimum number of parameters for Default Auto Wrapping. (useful"
" only when `fsdp` field is passed)."
)
},
)
fsdp_config: Optional[Union[dict, str]] = field(
default=None,
metadata={
"help": (
"Config to be used with FSDP (Pytorch Fully Sharded Data Parallel). The value is either a "
"fsdp json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`."
)
},
)
fsdp_transformer_layer_cls_to_wrap: Optional[str] = field(
default=None,
metadata={
"help": (
"This parameter is deprecated. Transformer layer class name (case-sensitive) to wrap, e.g,"
" `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `fsdp` flag is passed)."
)
},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
accelerator_config: Optional[Union[dict, str]] = field(
default=None,
metadata={
"help": (
"Config to be used with the internal Accelerator object initializtion. The value is either a "
"accelerator json config file (e.g., `accelerator_config.json`) or an already loaded json file as `dict`."
)
},
)
deepspeed: Optional[Union[dict, str]] = field(
default=None,
metadata={
"help": (
"Enable deepspeed and pass the path to deepspeed json config file (e.g. `ds_config.json`) or an already"
" loaded json file as a dict"
)
},
)
label_smoothing_factor: float = field(
default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."}
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
default_optim = "adamw_torch"
# XXX: enable when pytorch==2.0.1 comes out - we want to give it time to get all the bugs sorted out
# if is_torch_available() and version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.1.0"):
# default_optim = "adamw_torch_fused"
# and update the doc above to:
# optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch_fused"` (for torch<2.1.0 `"adamw_torch"`):
optim: Union[OptimizerNames, str] = field(
default=default_optim,
metadata={"help": "The optimizer to use."},
)
optim_args: Optional[str] = field(default=None, metadata={"help": "Optional arguments to supply to optimizer."})
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
group_by_length: bool = field(
default=False,
metadata={"help": "Whether or not to group samples of roughly the same length together when batching."},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
length_column_name: Optional[str] = field(
default="length",
metadata={"help": "Column name with precomputed lengths to use when grouping by length."},
)
report_to: Union[None, str, List[str]] = field(
default=None, metadata={"help": "The list of integrations to report the results and logs to."}
)
ddp_find_unused_parameters: Optional[bool] = field(
default=None,
metadata={
"help": (
"When using distributed training, the value of the flag `find_unused_parameters` passed to "
"`DistributedDataParallel`."
)
},
)
ddp_bucket_cap_mb: Optional[int] = field(
default=None,
metadata={
"help": (
"When using distributed training, the value of the flag `bucket_cap_mb` passed to "
"`DistributedDataParallel`."
)
},
)
ddp_broadcast_buffers: Optional[bool] = field(
default=None,
metadata={ | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"help": (
"When using distributed training, the value of the flag `broadcast_buffers` passed to "
"`DistributedDataParallel`."
)
},
)
dataloader_pin_memory: bool = field(
default=True, metadata={"help": "Whether or not to pin memory for DataLoader."}
)
dataloader_persistent_workers: bool = field(
default=False,
metadata={
"help": "If True, the data loader will not shut down the worker processes after a dataset has been consumed once. This allows to maintain the workers Dataset instances alive. Can potentially speed up training, but will increase RAM usage."
},
)
skip_memory_metrics: bool = field(
default=True, metadata={"help": "Whether or not to skip adding of memory profiler reports to metrics."}
)
use_legacy_prediction_loop: bool = field(
default=False, metadata={"help": "Whether or not to use the legacy prediction_loop in the Trainer."}
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
resume_from_checkpoint: Optional[str] = field(
default=None,
metadata={"help": "The path to a folder with a valid checkpoint for your model."},
)
hub_model_id: Optional[str] = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_strategy: Union[HubStrategy, str] = field(
default="every_save",
metadata={"help": "The hub strategy to use when `--push_to_hub` is activated."},
)
hub_token: Optional[str] = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
hub_private_repo: Optional[bool] = field(
default=None,
metadata={ | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"help": "Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists."
},
)
hub_always_push: bool = field(
default=False,
metadata={"help": "Unless `True`, the Trainer will skip pushes if the previous one wasn't finished yet."},
)
gradient_checkpointing: bool = field(
default=False,
metadata={
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
},
)
gradient_checkpointing_kwargs: Optional[Union[dict, str]] = field(
default=None,
metadata={
"help": "Gradient checkpointing key word arguments such as `use_reentrant`. Will be passed to `torch.utils.checkpoint.checkpoint` through `model.gradient_checkpointing_enable`."
},
)
include_inputs_for_metrics: bool = field(
default=False,
metadata={ | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"help": "This argument is deprecated and will be removed in version 5 of 🤗 Transformers. Use `include_for_metrics` instead."
},
)
include_for_metrics: List[str] = field(
default_factory=list,
metadata={
"help": "List of strings to specify additional data to include in the `compute_metrics` function."
"Options: 'inputs', 'loss'."
},
)
eval_do_concat_batches: bool = field(
default=True,
metadata={
"help": "Whether to recursively concat inputs/losses/labels/predictions across batches. If `False`, will instead store them as lists, with each batch kept separate."
},
)
# Deprecated arguments
fp16_backend: str = field(
default="auto",
metadata={
"help": "Deprecated. Use half_precision_backend instead",
"choices": ["auto", "apex", "cpu_amp"],
},
)
evaluation_strategy: Union[IntervalStrategy, str] = field(
default=None, | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
metadata={"help": "Deprecated. Use `eval_strategy` instead"},
)
push_to_hub_model_id: Optional[str] = field(
default=None, metadata={"help": "The name of the repository to which push the `Trainer`."}
)
push_to_hub_organization: Optional[str] = field(
default=None, metadata={"help": "The name of the organization in with to which push the `Trainer`."}
)
push_to_hub_token: Optional[str] = field(
default=None, metadata={"help": "The token to use to push to the Model Hub."}
)
_n_gpu: int = field(init=False, repr=False, default=-1)
mp_parameters: str = field(
default="",
metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in Trainer"},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
auto_find_batch_size: bool = field(
default=False,
metadata={
"help": (
"Whether to automatically decrease the batch size in half and rerun the training loop again each time"
" a CUDA Out-of-Memory was reached"
)
},
)
full_determinism: bool = field(
default=False,
metadata={
"help": (
"Whether to call enable_full_determinism instead of set_seed for reproducibility in distributed"
" training. Important: this will negatively impact the performance, so only use it for debugging."
)
},
)
torchdynamo: Optional[str] = field(
default=None,
metadata={
"help": "This argument is deprecated, use `--torch_compile_backend` instead.",
},
)
ray_scope: Optional[str] = field(
default="last",
metadata={
"help": ( | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
'The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray'
" will then use the last checkpoint of all trials, compare those, and select the best one. However,"
" other options are also available. See the Ray documentation"
" (https://docs.ray.io/en/latest/tune/api_docs/analysis.html"
"#ray.tune.ExperimentAnalysis.get_best_trial)"
" for more options."
)
},
)
ddp_timeout: Optional[int] = field(
default=1800,
metadata={
"help": "Overrides the default timeout for distributed training (value should be given in seconds)."
},
)
torch_compile: bool = field(
default=False, metadata={"help": "If set to `True`, the model will be wrapped in `torch.compile`."}
)
torch_compile_backend: Optional[str] = field(
default=None,
metadata={ | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"help": "Which backend to use with `torch.compile`, passing one will trigger a model compilation.",
},
)
torch_compile_mode: Optional[str] = field(
default=None,
metadata={
"help": "Which mode to use with `torch.compile`, passing one will trigger a model compilation.",
},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
dispatch_batches: Optional[bool] = field(
default=None,
metadata={"help": "Deprecated. Pass {'dispatch_batches':VALUE} to `accelerator_config`."},
)
split_batches: Optional[bool] = field(
default=None,
metadata={"help": "Deprecated. Pass {'split_batches':True} to `accelerator_config`."},
)
include_tokens_per_second: Optional[bool] = field(
default=False,
metadata={"help": "If set to `True`, the speed metrics will include `tgs` (tokens per second per device)."},
)
include_num_input_tokens_seen: Optional[bool] = field(
default=False,
metadata={
"help": "If set to `True`, will track the number of input tokens seen throughout training. (May be slower in distributed training)"
},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
neftune_noise_alpha: Optional[float] = field(
default=None,
metadata={
"help": "Activates neftune noise embeddings into the model. NEFTune has been proven to drastically improve model performances for instrcution fine-tuning. Check out the original paper here: https://arxiv.org/abs/2310.05914 and the original code here: https://github.com/neelsjain/NEFTune. Only supported for `PreTrainedModel` and `PeftModel` classes."
},
)
optim_target_modules: Union[None, str, List[str]] = field(
default=None,
metadata={
"help": "Target modules for the optimizer defined in the `optim` argument. Only used for the GaLore optimizer at the moment."
},
)
batch_eval_metrics: bool = field(
default=False,
metadata={"help": "Break eval metrics calculation into batches to save memory."},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
eval_on_start: bool = field(
default=False,
metadata={
"help": "Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check."
},
)
use_liger_kernel: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to enable the Liger Kernel for model training."},
)
eval_use_gather_object: Optional[bool] = field(
default=False,
metadata={
"help": "Whether to run recursively gather object in a nested list/tuple/dictionary of objects from all devices."
},
)
average_tokens_across_devices: Optional[bool] = field(
default=False,
metadata={
"help": "Whether or not to average tokens across devices. If enabled, will use all_reduce to "
"synchronize num_tokens_in_batch for precise loss calculation. Reference: "
"https://github.com/huggingface/transformers/issues/34242"
},
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def __post_init__(self):
# Parse in args that could be `dict` sent in from the CLI as a string
for field in _VALID_DICT_FIELDS:
passed_value = getattr(self, field)
# We only want to do this if the str starts with a bracket to indiciate a `dict`
# else its likely a filename if supported
if isinstance(passed_value, str) and passed_value.startswith("{"):
loaded_dict = json.loads(passed_value)
# Convert str values to types if applicable
loaded_dict = _convert_str_dict(loaded_dict)
setattr(self, field, loaded_dict) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# expand paths, if not os.makedirs("~/bar") will make directory
# in the current directory instead of the actual home
# see https://github.com/huggingface/transformers/issues/10628
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
if self.logging_dir is None and self.output_dir is not None:
self.logging_dir = os.path.join(self.output_dir, default_logdir())
if self.logging_dir is not None:
self.logging_dir = os.path.expanduser(self.logging_dir)
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
if self.evaluation_strategy is not None:
warnings.warn(
"`evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead",
FutureWarning,
)
self.eval_strategy = self.evaluation_strategy | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if isinstance(self.eval_strategy, EvaluationStrategy):
warnings.warn(
"using `EvaluationStrategy` for `eval_strategy` is deprecated and will be removed in version 5"
" of 🤗 Transformers. Use `IntervalStrategy` instead",
FutureWarning,
)
# Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it.
self.eval_strategy = self.eval_strategy.value
if self.no_cuda:
warnings.warn(
"using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. "
"Use `use_cpu` instead",
FutureWarning,
)
self.use_cpu = self.no_cuda | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self.eval_strategy = IntervalStrategy(self.eval_strategy)
self.logging_strategy = IntervalStrategy(self.logging_strategy)
self.save_strategy = SaveStrategy(self.save_strategy)
self.hub_strategy = HubStrategy(self.hub_strategy)
self.lr_scheduler_type = SchedulerType(self.lr_scheduler_type)
if self.do_eval is False and self.eval_strategy != IntervalStrategy.NO:
self.do_eval = True
if self.torch_empty_cache_steps is not None:
if not (isinstance(self.torch_empty_cache_steps, int) or self.torch_empty_cache_steps > 0):
raise ValueError(
f"`torch_empty_cache_steps` must be an integer bigger than 0, got {self.torch_empty_cache_steps}."
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# eval_steps has to be defined and non-zero, fallbacks to logging_steps if the latter is non-zero
if self.eval_strategy == IntervalStrategy.STEPS and (self.eval_steps is None or self.eval_steps == 0):
if self.logging_steps > 0:
logger.info(f"using `logging_steps` to initialize `eval_steps` to {self.logging_steps}")
self.eval_steps = self.logging_steps
else:
raise ValueError(
f"evaluation strategy {self.eval_strategy} requires either non-zero --eval_steps or"
" --logging_steps"
)
# logging_steps must be non-zero for logging_strategy that is other than 'no'
if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps == 0:
raise ValueError(f"logging strategy {self.logging_strategy} requires non-zero --logging_steps") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.logging_strategy == IntervalStrategy.STEPS and self.logging_steps > 1:
if self.logging_steps != int(self.logging_steps):
raise ValueError(f"--logging_steps must be an integer if bigger than 1: {self.logging_steps}")
self.logging_steps = int(self.logging_steps)
if self.eval_strategy == IntervalStrategy.STEPS and self.eval_steps > 1:
if self.eval_steps != int(self.eval_steps):
raise ValueError(f"--eval_steps must be an integer if bigger than 1: {self.eval_steps}")
self.eval_steps = int(self.eval_steps)
if self.save_strategy == SaveStrategy.STEPS and self.save_steps > 1:
if self.save_steps != int(self.save_steps):
raise ValueError(f"--save_steps must be an integer if bigger than 1: {self.save_steps}")
self.save_steps = int(self.save_steps) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# Sanity checks for load_best_model_at_end: we require save and eval strategies to be compatible.
if self.load_best_model_at_end and self.save_strategy != SaveStrategy.BEST:
if self.eval_strategy != self.save_strategy:
raise ValueError(
"--load_best_model_at_end requires the save and eval strategy to match, but found\n- Evaluation "
f"strategy: {self.eval_strategy}\n- Save strategy: {self.save_strategy}"
)
if self.eval_strategy == IntervalStrategy.STEPS and self.save_steps % self.eval_steps != 0:
if self.eval_steps < 1 or self.save_steps < 1:
if not (self.eval_steps < 1 and self.save_steps < 1):
raise ValueError(
"--load_best_model_at_end requires the saving steps to be a multiple of the evaluation " | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"steps, which cannot get guaranteed when mixing ratio and absolute steps for save_steps "
f"{self.save_steps} and eval_steps {self.eval_steps}."
)
# Work around floating point precision issues
LARGE_MULTIPLIER = 1_000_000
if (self.save_steps * LARGE_MULTIPLIER) % (self.eval_steps * LARGE_MULTIPLIER) != 0:
raise ValueError(
"--load_best_model_at_end requires the saving steps to be a multiple of the evaluation "
f"steps, but found {self.save_steps}, which is not a multiple of {self.eval_steps}."
)
raise ValueError(
"--load_best_model_at_end requires the saving steps to be a round multiple of the evaluation "
f"steps, but found {self.save_steps}, which is not a round multiple of {self.eval_steps}."
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
safetensors_available = is_safetensors_available()
if self.save_safetensors and not safetensors_available:
raise ValueError(f"--save_safetensors={self.save_safetensors} requires safetensors to be installed!")
if not self.save_safetensors and safetensors_available:
logger.info(
f"Found safetensors installation, but --save_safetensors={self.save_safetensors}. "
f"Safetensors should be a preferred weights saving format due to security and performance reasons. "
f"If your model cannot be saved by safetensors please feel free to open an issue at "
f"https://github.com/huggingface/safetensors!"
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if (
self.load_best_model_at_end or self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU
) and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = not (self.metric_for_best_model.endswith("loss"))
if self.run_name is None:
self.run_name = self.output_dir
if self.framework == "pt" and is_torch_available():
if self.fp16_backend and self.fp16_backend != "auto":
warnings.warn(
"`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use"
" `half_precision_backend` instead",
FutureWarning,
)
self.half_precision_backend = self.fp16_backend | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.bf16 or self.bf16_full_eval:
if self.use_cpu and not is_torch_bf16_cpu_available() and not is_torch_xla_available():
# cpu
raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10")
elif not self.use_cpu:
if torch.cuda.is_available() and not is_torch_bf16_gpu_available():
# gpu
raise ValueError(
"Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0"
)
if self.fp16 and self.bf16:
raise ValueError("At most one of fp16 and bf16 can be True, but not both")
if self.fp16_full_eval and self.bf16_full_eval:
raise ValueError("At most one of fp16 and bf16 can be True for full eval, but not both") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.bf16:
if self.half_precision_backend == "apex":
raise ValueError(" `--half_precision_backend apex`: GPU bf16 is not supported by apex.")
if self.lr_scheduler_type == SchedulerType.REDUCE_ON_PLATEAU:
if self.eval_strategy == IntervalStrategy.NO:
raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires an eval strategy")
if not is_torch_available():
raise ValueError("lr_scheduler_type reduce_lr_on_plateau requires torch>=0.2.0") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self.optim = OptimizerNames(self.optim)
if self.adafactor:
warnings.warn(
"`--adafactor` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--optim"
" adafactor` instead",
FutureWarning,
)
self.optim = OptimizerNames.ADAFACTOR
if self.optim == OptimizerNames.ADAMW_TORCH_FUSED and is_torch_available():
if version.parse(version.parse(torch.__version__).base_version) < version.parse("2.0.0"):
raise ValueError("--optim adamw_torch_fused requires PyTorch 2.0 or higher")
# there is a bug in fp16/AMP in pt-2.0.0
if version.parse(version.parse(torch.__version__).base_version) == version.parse("2.0.0") and self.fp16:
raise ValueError("--optim adamw_torch_fused with --fp16 requires PyTorch>2.0") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# We need to setup the accelerator config here *before* the first call to `self.device`
if is_accelerate_available():
if not isinstance(self.accelerator_config, (AcceleratorConfig)):
if self.accelerator_config is None:
self.accelerator_config = AcceleratorConfig()
elif isinstance(self.accelerator_config, dict):
self.accelerator_config = AcceleratorConfig(**self.accelerator_config)
# Check that a user didn't pass in the class instantiator
# such as `accelerator_config = AcceleratorConfig`
elif isinstance(self.accelerator_config, type):
raise NotImplementedError(
"Tried passing in a callable to `accelerator_config`, but this is not supported. "
"Please pass in a fully constructed `AcceleratorConfig` object instead."
)
else: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self.accelerator_config = AcceleratorConfig.from_json_file(self.accelerator_config) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.dispatch_batches is not None:
warnings.warn(
"Using `--dispatch_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use"
" `--accelerator_config {'dispatch_batches':VALUE} instead",
FutureWarning,
)
self.accelerator_config.dispatch_batches = self.dispatch_batches
if self.split_batches is not None:
warnings.warn(
"Using `--split_batches` is deprecated and will be removed in version 4.41 of 🤗 Transformers. Use"
" `--accelerator_config {'split_batches':VALUE} instead",
FutureWarning,
)
self.accelerator_config.split_batches = self.split_batches
# Initialize device before we proceed
if self.framework == "pt" and is_torch_available():
self.device | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# Disable average tokens when using single device
if self.average_tokens_across_devices:
try:
if self.world_size == 1:
logger.warning(
"average_tokens_across_devices is set to True but it is invalid when world size is"
"1. Turn it to False automatically."
)
self.average_tokens_across_devices = False
except ImportError as e:
logger.warning(f"Can not specify world size due to {e}. Turn average_tokens_across_devices to False.")
self.average_tokens_across_devices = False | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.torchdynamo is not None:
warnings.warn(
"`torchdynamo` is deprecated and will be removed in version 5 of 🤗 Transformers. Use"
" `torch_compile_backend` instead",
FutureWarning,
)
self.torch_compile_backend = self.torchdynamo
if (self.torch_compile_mode is not None or self.torch_compile_backend is not None) and not self.torch_compile:
self.torch_compile = True
if self.torch_compile and self.torch_compile_backend is None:
self.torch_compile_backend = "inductor"
# accelerate integration for torch compile
if self.torch_compile:
# set env vars for accelerate
prefix = "ACCELERATE_DYNAMO_"
os.environ[prefix + "BACKEND"] = self.torch_compile_backend
if self.torch_compile_mode is not None:
os.environ[prefix + "MODE"] = self.torch_compile_mode | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.framework == "pt" and is_torch_available() and self.torch_compile:
if is_torch_tf32_available():
if self.tf32 is None and not self.fp16 or self.bf16:
logger.info(
"Setting TF32 in CUDA backends to speedup torch compile, you won't see any improvement"
" otherwise."
)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
else:
logger.warning(
"The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here."
)
if self.framework == "pt" and is_torch_available() and self.tf32 is not None:
if self.tf32:
if is_torch_tf32_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
else: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
raise ValueError("--tf32 requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7")
else:
if is_torch_tf32_available():
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
# no need to assert on else | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# if training args is specified, it will override the one specified in the accelerate config
if self.half_precision_backend != "apex":
mixed_precision_dtype = os.environ.get("ACCELERATE_MIXED_PRECISION", "no")
if self.fp16:
mixed_precision_dtype = "fp16"
elif self.bf16:
mixed_precision_dtype = "bf16"
os.environ["ACCELERATE_MIXED_PRECISION"] = mixed_precision_dtype | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.report_to is None:
logger.info(
"The default value for the training argument `--report_to` will change in v5 (from all installed "
"integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as "
"now. You should start updating your code and make this info disappear :-)."
)
self.report_to = "all"
if self.report_to == "all" or self.report_to == ["all"]:
# Import at runtime to avoid a circular import.
from .integrations import get_available_reporting_integrations
self.report_to = get_available_reporting_integrations() | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if "codecarbon" in self.report_to and torch.version.hip:
logger.warning(
"When using the Trainer, CodeCarbonCallback requires the `codecarbon` package, which is not compatible with AMD ROCm (https://github.com/mlco2/codecarbon/pull/490). Automatically disabling the codecarbon callback. Reference: https://huggingface.co/docs/transformers/v4.39.3/en/main_classes/trainer#transformers.TrainingArguments.report_to."
)
self.report_to.remove("codecarbon")
elif self.report_to == "none" or self.report_to == ["none"]:
self.report_to = []
elif not isinstance(self.report_to, list):
self.report_to = [self.report_to] | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.warmup_ratio < 0 or self.warmup_ratio > 1:
raise ValueError("warmup_ratio must lie in range [0,1]")
elif self.warmup_ratio > 0 and self.warmup_steps > 0:
logger.info(
"Both warmup_ratio and warmup_steps given, warmup_steps will override any effect of warmup_ratio"
" during training"
)
if not isinstance(self.warmup_steps, int) or self.warmup_steps < 0:
raise ValueError("warmup_steps must be of type int and must be 0 or a positive integer.") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if isinstance(self.fsdp, bool):
self.fsdp = [FSDPOption.FULL_SHARD] if self.fsdp else ""
if isinstance(self.fsdp, str):
self.fsdp = [FSDPOption(s) for s in self.fsdp.split()]
if self.fsdp == [FSDPOption.OFFLOAD]:
raise ValueError(
"`--fsdp offload` can't work on its own. It needs to be added to `--fsdp full_shard` or "
'`--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.'
)
elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp:
raise ValueError("`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.gradient_checkpointing and (
FSDPOption.FULL_SHARD in self.fsdp or FSDPOption.HYBRID_SHARD in self.fsdp
):
logger.warning(
"When using FSDP full shard, instead of using `gradient_checkpointing` in TrainingArguments, please"
" use `activation_checkpointing` in `fsdp_config`. The former introduces a redundant AllGather"
" operation in backward pass. Reference: https://github.com/huggingface/transformers/issues/30404"
)
if self.fsdp_config is None:
self.fsdp_config = {} | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if isinstance(self.fsdp_config, str):
if len(self.fsdp) == 0:
warnings.warn("`--fsdp_config` is useful only when `--fsdp` is specified.")
with io.open(self.fsdp_config, "r", encoding="utf-8") as f:
self.fsdp_config = json.load(f)
for k in list(self.fsdp_config.keys()):
if k.startswith("fsdp_"):
v = self.fsdp_config.pop(k)
self.fsdp_config[k[5:]] = v
if self.fsdp_min_num_params > 0:
warnings.warn("using `--fsdp_min_num_params` is deprecated. Use fsdp_config instead ", FutureWarning)
self.fsdp_config["min_num_params"] = max(self.fsdp_config.get("min_num_params", 0), self.fsdp_min_num_params) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# if fsdp_config["transformer_layer_cls_to_wrap"] is specified as a string, convert it to a list with a single object
if isinstance(self.fsdp_config.get("transformer_layer_cls_to_wrap", None), str):
self.fsdp_config["transformer_layer_cls_to_wrap"] = [self.fsdp_config["transformer_layer_cls_to_wrap"]]
if self.fsdp_transformer_layer_cls_to_wrap is not None:
warnings.warn(
"using `--fsdp_transformer_layer_cls_to_wrap` is deprecated. Use fsdp_config instead ", FutureWarning
)
self.fsdp_config["transformer_layer_cls_to_wrap"] = self.fsdp_config.get(
"transformer_layer_cls_to_wrap", []
) + [self.fsdp_transformer_layer_cls_to_wrap]
if len(self.fsdp) == 0 and self.fsdp_config["min_num_params"] > 0:
warnings.warn("`min_num_params` is useful only when `--fsdp` is specified.") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if len(self.fsdp) == 0 and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None:
warnings.warn("`transformer_layer_cls_to_wrap` is useful only when `--fsdp` is specified.") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if (
len(self.fsdp) > 0
and self.fsdp_config["min_num_params"] > 0
and self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None
):
raise ValueError("`min_num_params` and `transformer_layer_cls_to_wrap` are mutually exclusive.")
self.fsdp_config["xla"] = self.fsdp_config.get("xla", False)
self.fsdp_config["xla_fsdp_v2"] = self.fsdp_config.get("xla_fsdp_v2", False)
self.fsdp_config["xla_fsdp_grad_ckpt"] = self.fsdp_config.get("xla_fsdp_grad_ckpt", False)
if self.fsdp_config["xla"]:
if len(self.fsdp) > 0:
# store XLA fsdp configuration parameters into a dictionary
# Copy the config to avoid modifying the original config (which may be used for JSON serialization)
self.xla_fsdp_config = self.fsdp_config.get("xla_fsdp_settings", {}).copy()
# apply appropriate string to torch.dtype conversions for parameters | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if "compute_dtype" in self.xla_fsdp_config:
self.xla_fsdp_config["compute_dtype"] = getattr(torch, self.xla_fsdp_config["compute_dtype"])
if "buffer_dtype" in self.xla_fsdp_config:
self.xla_fsdp_config["buffer_dtype"] = getattr(torch, self.xla_fsdp_config["buffer_dtype"])
else:
warnings.warn("XLA FSDP can be used only when `--fsdp` is specified.")
else:
if self.fsdp_config["xla_fsdp_grad_ckpt"]:
warnings.warn("`--xla_fsdp_grad_ckpt` is useful only when `--xla` is set to true.") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# accelerate integration for FSDP
if len(self.fsdp) > 0 and not self.fsdp_config["xla"]:
os.environ["ACCELERATE_USE_FSDP"] = "true"
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_SHARDING_STRATEGY,
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
prefix = "FSDP_"
for fsdp_option in self.fsdp:
if fsdp_option.upper() in FSDP_SHARDING_STRATEGY:
# set environment variable for FSDP sharding strategy
os.environ[f"{prefix}SHARDING_STRATEGY"] = str(
FSDP_SHARDING_STRATEGY.index(fsdp_option.upper()) + 1
)
elif fsdp_option == FSDPOption.OFFLOAD:
os.environ[f"{prefix}OFFLOAD_PARAMS"] = "true"
elif fsdp_option == FSDPOption.AUTO_WRAP:
os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[0]
if self.fsdp_config["min_num_params"] > 0:
os.environ[f"{prefix}MIN_NUM_PARAMS"] = str(self.fsdp_config["min_num_params"])
os.environ[f"{prefix}AUTO_WRAP_POLICY"] = FSDP_AUTO_WRAP_POLICY[1]
elif self.fsdp_config.get("transformer_layer_cls_to_wrap", None) is not None: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
os.environ[f"{prefix}TRANSFORMER_CLS_TO_WRAP"] = ",".join(
self.fsdp_config["transformer_layer_cls_to_wrap"]
)
prefetch_policy = self.fsdp_config.get("backward_prefetch", "NO_PREFETCH")
os.environ[f"{prefix}BACKWARD_PREFETCH"] = prefetch_policy.upper()
os.environ[f"{prefix}FORWARD_PREFETCH"] = str(self.fsdp_config.get("forward_prefetch", "false")).lower() | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
sync_module_states = str(self.fsdp_config.get("sync_module_states", "true")).lower()
cpu_ram_efficient_loading = str(self.fsdp_config.get("cpu_ram_efficient_loading", "false")).lower()
if sync_module_states == "false" and cpu_ram_efficient_loading == "true":
# In this case, all the processes except the main process would have random weights leading
# to unexpected behaviour during training, thus throwing error here to prevent it.
raise ValueError('`sync_module_states` must be `"True"` if `cpu_ram_efficient_loading` is `"True"`')
os.environ[f"{prefix}SYNC_MODULE_STATES"] = sync_module_states
os.environ[f"{prefix}CPU_RAM_EFFICIENT_LOADING"] = cpu_ram_efficient_loading
os.environ[f"{prefix}USE_ORIG_PARAMS"] = str(self.fsdp_config.get("use_orig_params", "true")).lower() | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.tpu_metrics_debug:
warnings.warn(
"using `--tpu_metrics_debug` is deprecated and will be removed in version 5 of 🤗 Transformers. Use"
" `--debug tpu_metrics_debug` instead",
FutureWarning,
)
if self.debug is None:
self.debug = " tpu_metrics_debug"
else:
self.debug += " tpu_metrics_debug"
self.tpu_metrics_debug = False
if isinstance(self.debug, str):
self.debug = [DebugOption(s) for s in self.debug.split()]
elif self.debug is None:
self.debug = [] | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self.deepspeed_plugin = None
if self.deepspeed:
# - must be run very last in arg parsing, since it will use a lot of these settings.
# - must be run before the model is created.
if not is_accelerate_available():
raise ValueError(
f"--deepspeed requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`."
)
from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig
# will be used later by the Trainer
# note: leave self.deepspeed unmodified in case a user relies on it not to be modified)
self.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.deepspeed)
self.hf_deepspeed_config.trainer_config_process(self)
# Accelerate DeepSpeed Plugin
from accelerate.utils import DeepSpeedPlugin | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
self.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.hf_deepspeed_config)
elif strtobool(os.environ.get("ACCELERATE_USE_DEEPSPEED", "false")):
# Accelerate DeepSpeed Plugin
from accelerate.utils import DeepSpeedPlugin
self.deepspeed_plugin = DeepSpeedPlugin()
mixed_precision = os.environ.get("ACCELERATE_MIXED_PRECISION", "no")
self.deepspeed_plugin.set_mixed_precision(mixed_precision)
self.deepspeed_plugin.set_deepspeed_weakref()
if self.use_cpu:
self.dataloader_pin_memory = False
if self.dataloader_num_workers == 0 and self.dataloader_prefetch_factor is not None:
raise ValueError(
"--dataloader_prefetch_factor can only be set when data is loaded in a different process, i.e."
" when --dataloader_num_workers > 1."
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.push_to_hub_token is not None:
warnings.warn(
"`--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use "
"`--hub_token` instead.",
FutureWarning,
)
self.hub_token = self.push_to_hub_token | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.push_to_hub_model_id is not None:
self.hub_model_id = get_full_repo_name(
self.push_to_hub_model_id, organization=self.push_to_hub_organization, token=self.hub_token
)
if self.push_to_hub_organization is not None:
warnings.warn(
"`--push_to_hub_model_id` and `--push_to_hub_organization` are deprecated and will be removed in "
"version 5 of 🤗 Transformers. Use `--hub_model_id` instead and pass the full repo name to this "
f"argument (in this case {self.hub_model_id}).",
FutureWarning,
)
else:
warnings.warn(
"`--push_to_hub_model_id` is deprecated and will be removed in version 5 of 🤗 Transformers. Use "
"`--hub_model_id` instead and pass the full repo name to this argument (in this case "
f"{self.hub_model_id}).", | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
FutureWarning,
)
elif self.push_to_hub_organization is not None:
self.hub_model_id = f"{self.push_to_hub_organization}/{Path(self.output_dir).name}"
warnings.warn(
"`--push_to_hub_organization` is deprecated and will be removed in version 5 of 🤗 Transformers. Use "
"`--hub_model_id` instead and pass the full repo name to this argument (in this case "
f"{self.hub_model_id}).",
FutureWarning,
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.eval_use_gather_object and not is_accelerate_available("0.30.0"):
raise ValueError(
"--eval_use_gather_object requires Accelerate to be version of `accelerate` > 0.30.0."
"This is not supported and we recommend you to update your version."
)
if self.data_seed is not None:
if not is_accelerate_available("1.1.0"):
raise NotImplementedError(
"data_seed requires Accelerate version `accelerate` >= 1.1.0. "
"This is not supported and we recommend you to update your version."
)
if self.include_inputs_for_metrics:
logger.warning(
"Using `include_inputs_for_metrics` is deprecated and will be removed in version 5 of 🤗 Transformers. Please use `include_for_metrics` list argument instead."
)
self.include_for_metrics.append("inputs")
def __str__(self):
self_as_dict = asdict(self) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# Remove deprecated arguments. That code should be removed once
# those deprecated arguments are removed from TrainingArguments. (TODO: v5)
del self_as_dict["per_gpu_train_batch_size"]
del self_as_dict["per_gpu_eval_batch_size"]
self_as_dict = {k: f"<{k.upper()}>" if k.endswith("_token") else v for k, v in self_as_dict.items()}
attrs_as_str = [f"{k}={v},\n" for k, v in sorted(self_as_dict.items())]
return f"{self.__class__.__name__}(\n{''.join(attrs_as_str)})"
__repr__ = __str__ | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from `per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
return train_batch_size | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from `per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
return eval_batch_size
@property
def ddp_timeout_delta(self) -> timedelta:
"""
The actual timeout for torch.distributed.init_process_group since it expects a timedelta variable.
"""
return timedelta(seconds=self.ddp_timeout) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@cached_property
def _setup_devices(self) -> "torch.device":
requires_backends(self, ["torch"])
logger.info("PyTorch: setting up devices")
if not is_sagemaker_mp_enabled():
if not is_accelerate_available():
raise ImportError(
f"Using the `Trainer` with `PyTorch` requires `accelerate>={ACCELERATE_MIN_VERSION}`: "
f"Please run `pip install transformers[torch]` or `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
# We delay the init of `PartialState` to the end for clarity
accelerator_state_kwargs = {"enabled": True, "use_configured_state": False}
if isinstance(self.accelerator_config, AcceleratorConfig):
accelerator_state_kwargs["use_configured_state"] = self.accelerator_config.pop(
"use_configured_state", False
)
if accelerator_state_kwargs["use_configured_state"]:
if PartialState._shared_state == {}: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
raise ValueError(
"Passing `'use_configured_state':True` to the AcceleratorConfig requires a pre-configured "
"`AcceleratorState` or `PartialState` to be defined before calling `TrainingArguments`. "
)
# We rely on `PartialState` to yell if there's issues here (which it will)
self.distributed_state = PartialState(cpu=self.use_cpu)
if self.deepspeed and self.distributed_state.distributed_type != DistributedType.DEEPSPEED:
raise RuntimeError(
"Tried to use an already configured `Accelerator` or `PartialState` that was not initialized for DeepSpeed, "
"but also passed in a `deepspeed` configuration to the `TrainingArguments`. Please set "
"`use_configured_state:False` instead or setup your `Accelerator` or `PartialState` properly."
)
else:
AcceleratorState._reset_state(reset_partial_state=True) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self.distributed_state = None
if not self.use_ipex and "ACCELERATE_USE_IPEX" not in os.environ:
os.environ["ACCELERATE_USE_IPEX"] = "false" | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
self._n_gpu = 1
if self.use_cpu or strtobool(os.environ.get("ACCELERATE_USE_CPU", "False")):
accelerator_state_kwargs["cpu"] = True
accelerator_state_kwargs["backend"] = self.ddp_backend
self._n_gpu = 0
elif is_sagemaker_mp_enabled():
accelerator_state_kwargs["enabled"] = False
local_rank = smp.local_rank()
device = torch.device("cuda", local_rank)
torch.cuda.set_device(device)
elif is_sagemaker_dp_enabled():
accelerator_state_kwargs["_use_sagemaker_dp"] = True
elif self.deepspeed:
accelerator_state_kwargs["use_deepspeed"] = True
accelerator_state_kwargs["timeout"] = timedelta(seconds=self.ddp_timeout)
else:
accelerator_state_kwargs["backend"] = self.ddp_backend
accelerator_state_kwargs["timeout"] = timedelta(seconds=self.ddp_timeout) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# Now we pop everything
if accelerator_state_kwargs.pop("enabled", False) and not accelerator_state_kwargs.pop(
"use_configured_state", False
):
# We need to patch this env var when enabling to detect deepspeed
use_deepspeed = accelerator_state_kwargs.pop("use_deepspeed", False)
if use_deepspeed:
os.environ["ACCELERATE_USE_DEEPSPEED"] = "true"
self.distributed_state = PartialState(**accelerator_state_kwargs)
if use_deepspeed:
del os.environ["ACCELERATE_USE_DEEPSPEED"]
if not is_sagemaker_mp_enabled():
device = self.distributed_state.device
self.local_rank = self.distributed_state.local_process_index
if dist.is_available() and dist.is_initialized() and self.parallel_mode != ParallelMode.DISTRIBUTED:
logger.warning(
"torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED. " | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch"
)
if is_torch_xla_available():
device = self.distributed_state.device
self._n_gpu = 0
elif is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled():
# Already set _n_gpu
pass
elif self.distributed_state.distributed_type == DistributedType.NO:
if self.use_mps_device:
warnings.warn(
"`use_mps_device` is deprecated and will be removed in version 5.0 of 🤗 Transformers. "
"`mps` device will be used by default if available similar to the way `cuda` device is used."
"Therefore, no action from user is required. "
)
if device.type != "mps":
raise ValueError(
"Either you do not have an MPS-enabled device on this machine or MacOS version is not 12.3+ " | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"or current PyTorch install was not built with MPS enabled."
)
if self.use_cpu:
device = torch.device("cpu")
elif is_torch_mps_available():
device = torch.device("mps")
elif is_torch_xpu_available():
if not is_ipex_available() and not is_accelerate_available("0.32.0.dev"):
raise ImportError("Using the XPU PyTorch backend requires `accelerate>=0.32.0.dev`")
device = torch.device("xpu:0")
torch.xpu.set_device(device)
elif is_torch_mlu_available():
device = torch.device("mlu:0")
torch.mlu.set_device(device)
elif is_torch_musa_available():
device = torch.device("musa:0")
torch.musa.set_device(device)
elif is_torch_npu_available():
device = torch.device("npu:0")
torch.npu.set_device(device) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
else:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device(
"cuda:0" if torch.cuda.is_available() else os.environ.get("ACCELERATE_TORCH_DEVICE", "cpu")
)
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
self._n_gpu = torch.cuda.device_count()
if device.type == "cuda":
torch.cuda.set_device(device) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
return device | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@property
def device(self) -> "torch.device":
"""
The device used by this process.
"""
requires_backends(self, ["torch"])
return self._setup_devices
@property
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
requires_backends(self, ["torch"])
# Make sure `self._n_gpu` is properly setup.
if not hasattr(self, "_n_gpu"):
_ = self._setup_devices
return self._n_gpu
@property
def parallel_mode(self):
"""
The current mode used for parallelism if multiple GPUs/TPU cores are available. One of: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
- `ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).
- `ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses `torch.nn.DataParallel`).
- `ParallelMode.DISTRIBUTED`: several GPUs, each having its own process (uses
`torch.nn.DistributedDataParallel`).
- `ParallelMode.TPU`: several TPU cores.
"""
requires_backends(self, ["torch"])
if is_torch_xla_available():
return ParallelMode.TPU
elif is_sagemaker_mp_enabled():
return ParallelMode.SAGEMAKER_MODEL_PARALLEL
elif is_sagemaker_dp_enabled():
return ParallelMode.SAGEMAKER_DATA_PARALLEL
elif (
self.distributed_state is not None and self.distributed_state.distributed_type != DistributedType.NO
) or (self.distributed_state is None and self.local_rank != -1):
return ParallelMode.DISTRIBUTED
elif self.n_gpu > 1:
return ParallelMode.NOT_DISTRIBUTED
else: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
return ParallelMode.NOT_PARALLEL | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@property
def world_size(self):
"""
The number of processes used in parallel.
"""
requires_backends(self, ["torch"])
if self.distributed_state is not None:
return self.distributed_state.num_processes
elif is_sagemaker_mp_enabled():
return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size()
return 1
@property
def process_index(self):
"""
The index of the current process used.
"""
requires_backends(self, ["torch"])
if self.distributed_state is not None:
return self.distributed_state.process_index
elif is_sagemaker_mp_enabled():
return smp.dp_rank() if not smp.state.cfg.prescaled_batch else smp.rdp_rank()
return 0
@property
def local_process_index(self):
"""
The index of the local process used.
"""
requires_backends(self, ["torch"]) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if self.distributed_state is not None:
return self.distributed_state.local_process_index
elif is_sagemaker_mp_enabled():
return smp.local_rank()
return 0
@property
def should_log(self):
"""
Whether or not the current process should produce log.
"""
if self.log_on_each_node:
return self.local_process_index == 0
else:
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.process_index == 0
@property
def should_save(self):
"""
Whether or not the current process should write to disk, e.g., to save models and checkpoints.
"""
if self.save_on_each_node:
return self.local_process_index == 0
else:
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.process_index == 0 | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def get_process_log_level(self):
"""
Returns the log level to be used depending on whether this process is the main process of node 0, main process
of node non-0, or a non-main process.
For the main process the log level defaults to the logging level set (`logging.WARNING` if you didn't do
anything) unless overridden by `log_level` argument.
For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica`
argument.
The choice between the main and replica process settings is made according to the return value of `should_log`.
"""
# convert to int
log_level = trainer_log_levels[self.log_level]
log_level_replica = trainer_log_levels[self.log_level_replica] | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
log_level_main_node = logging.get_verbosity() if log_level == -1 else log_level
log_level_replica_node = logging.get_verbosity() if log_level_replica == -1 else log_level_replica
return log_level_main_node if self.should_log else log_level_replica_node
@property
def place_model_on_device(self):
"""
Can be subclassed and overridden for some specific integrations.
"""
return not is_sagemaker_mp_enabled()
@property
def _no_sync_in_gradient_accumulation(self):
"""
Whether or not to use no_sync for the gradients when doing gradient accumulation.
"""
return not (
self.deepspeed or is_sagemaker_dp_enabled() or is_sagemaker_mp_enabled() or is_torch_neuroncore_available()
) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
@contextlib.contextmanager
def main_process_first(self, local=True, desc="work"):
"""
A context manager for torch distributed environment where on needs to do something on the main process, while
blocking replicas, and when it's finished releasing the replicas.
One such use is for `datasets`'s `map` feature which to be efficient should be run once on the main process,
which upon completion saves a cached version of results and which then automatically gets loaded by the
replicas. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Args:
local (`bool`, *optional*, defaults to `True`):
if `True` first means process of rank 0 of each node if `False` first means process of rank 0 of node
rank 0 In multi-node environment with a shared filesystem you most likely will want to use
`local=False` so that only the main process of the first node will do the processing. If however, the
filesystem is not shared, then the main process of each node will need to do the processing, which is
the default behavior.
desc (`str`, *optional*, defaults to `"work"`):
a work description to be used in debug logs | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
"""
if is_torch_available() and self.world_size > 1:
main_process_desc = "main local process" if local else "main process"
if self.distributed_state is not None:
is_main_process = (
self.distributed_state.is_local_main_process if local else self.distributed_state.is_main_process
)
elif is_sagemaker_mp_enabled():
is_main_process = smp.rank() == 0
try:
if not is_main_process:
# tell all replicas to wait
logger.debug(f"{self.process_index}: waiting for the {main_process_desc} to perform {desc}") | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
if is_torch_xla_available():
xm.rendezvous(desc)
else:
dist.barrier()
yield
finally:
if is_main_process:
# the wait is over
logger.debug(f"{self.process_index}: {main_process_desc} completed {desc}, releasing all replicas")
if is_torch_xla_available():
xm.rendezvous(desc)
else:
dist.barrier()
else:
yield
def get_warmup_steps(self, num_training_steps: int):
"""
Get number of steps used for a linear warmup.
"""
warmup_steps = (
self.warmup_steps if self.warmup_steps > 0 else math.ceil(num_training_steps * self.warmup_ratio)
)
return warmup_steps | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def _dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self._dict_torch_dtype_to_str(value) | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
# filter out fields that are defined as field(init=False)
d = {field.name: getattr(self, field.name) for field in fields(self) if field.init}
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
# Handle the accelerator_config if passed
if is_accelerate_available() and isinstance(v, AcceleratorConfig):
d[k] = v.to_dict()
self._dict_torch_dtype_to_str(d)
return d | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()} | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
# The following methods are there to simplify the instantiation of `TrainingArguments`
def set_training(
self,
learning_rate: float = 5e-5,
batch_size: int = 8,
weight_decay: float = 0,
num_epochs: float = 3,
max_steps: int = -1,
gradient_accumulation_steps: int = 1,
seed: int = 42,
gradient_checkpointing: bool = False,
):
"""
A method that regroups all basic arguments linked to the training.
<Tip>
Calling this method will automatically set `self.do_train` to `True`.
</Tip> | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Args:
learning_rate (`float`, *optional*, defaults to 5e-5):
The initial learning rate for the optimizer.
batch_size (`int` *optional*, defaults to 8):
The batch size per device (GPU/TPU core/CPU...) used for training.
weight_decay (`float`, *optional*, defaults to 0):
The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in the
optimizer.
num_train_epochs(`float`, *optional*, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents
of the last epoch before stopping training).
max_steps (`int`, *optional*, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides `num_train_epochs`. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
For a finite dataset, training is reiterated through the dataset (if all data is exhausted) until
`max_steps` is reached.
gradient_accumulation_steps (`int`, *optional*, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
<Tip warning={true}>
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every `gradient_accumulation_steps * xxx_step` training
examples.
</Tip>
seed (`int`, *optional*, defaults to 42):
Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use
the [`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized
parameters.
gradient_checkpointing (`bool`, *optional*, defaults to `False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example:
```py
>>> from transformers import TrainingArguments | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
>>> args = TrainingArguments("working_dir")
>>> args = args.set_training(learning_rate=1e-4, batch_size=32)
>>> args.learning_rate
1e-4
```
"""
self.do_train = True
self.learning_rate = learning_rate
self.per_device_train_batch_size = batch_size
self.weight_decay = weight_decay
self.num_train_epochs = num_epochs
self.max_steps = max_steps
self.gradient_accumulation_steps = gradient_accumulation_steps
self.seed = seed
self.gradient_checkpointing = gradient_checkpointing
return self
def set_evaluate(
self,
strategy: Union[str, IntervalStrategy] = "no",
steps: int = 500,
batch_size: int = 8,
accumulation_steps: Optional[int] = None,
delay: Optional[float] = None,
loss_only: bool = False,
jit_mode: bool = False,
):
"""
A method that regroups all arguments linked to evaluation. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Args:
strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"no"`):
The evaluation strategy to adopt during training. Possible values are:
- `"no"`: No evaluation is done during training.
- `"steps"`: Evaluation is done (and logged) every `steps`.
- `"epoch"`: Evaluation is done at the end of each epoch. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Setting a `strategy` different from `"no"` will set `self.do_eval` to `True`.
steps (`int`, *optional*, defaults to 500):
Number of update steps between two evaluations if `strategy="steps"`.
batch_size (`int` *optional*, defaults to 8):
The batch size per device (GPU/TPU core/CPU...) used for evaluation.
accumulation_steps (`int`, *optional*):
Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU.
If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster
but requires more memory).
delay (`float`, *optional*):
Number of epochs or steps to wait for before the first evaluation can be performed, depending on the
eval_strategy.
loss_only (`bool`, *optional*, defaults to `False`):
Ignores all outputs except the loss. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
jit_mode (`bool`, *optional*):
Whether or not to use PyTorch jit trace for inference. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Example:
```py
>>> from transformers import TrainingArguments
>>> args = TrainingArguments("working_dir")
>>> args = args.set_evaluate(strategy="steps", steps=100)
>>> args.eval_steps
100
```
"""
self.eval_strategy = IntervalStrategy(strategy)
if self.eval_strategy == IntervalStrategy.STEPS and steps == 0:
raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.")
self.do_eval = self.eval_strategy != IntervalStrategy.NO
self.eval_steps = steps
self.per_device_eval_batch_size = batch_size
self.eval_accumulation_steps = accumulation_steps
self.eval_delay = delay
self.prediction_loss_only = loss_only
self.jit_mode_eval = jit_mode
return self | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
def set_testing(
self,
batch_size: int = 8,
loss_only: bool = False,
jit_mode: bool = False,
):
"""
A method that regroups all basic arguments linked to testing on a held-out dataset.
<Tip>
Calling this method will automatically set `self.do_predict` to `True`.
</Tip>
Args:
batch_size (`int` *optional*, defaults to 8):
The batch size per device (GPU/TPU core/CPU...) used for testing.
loss_only (`bool`, *optional*, defaults to `False`):
Ignores all outputs except the loss.
jit_mode (`bool`, *optional*):
Whether or not to use PyTorch jit trace for inference.
Example:
```py
>>> from transformers import TrainingArguments | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
>>> args = TrainingArguments("working_dir")
>>> args = args.set_testing(batch_size=32)
>>> args.per_device_eval_batch_size
32
```
"""
self.do_predict = True
self.per_device_eval_batch_size = batch_size
self.prediction_loss_only = loss_only
self.jit_mode_eval = jit_mode
return self
def set_save(
self,
strategy: Union[str, IntervalStrategy] = "steps",
steps: int = 500,
total_limit: Optional[int] = None,
on_each_node: bool = False,
):
"""
A method that regroups all arguments linked to checkpoint saving.
Args:
strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`):
The checkpoint save strategy to adopt during training. Possible values are: | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
- `"no"`: No save is done during training.
- `"epoch"`: Save is done at the end of each epoch.
- `"steps"`: Save is done every `save_steps`.
steps (`int`, *optional*, defaults to 500):
Number of updates steps before two checkpoint saves if `strategy="steps"`.
total_limit (`int`, *optional*):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
`output_dir`.
on_each_node (`bool`, *optional*, defaults to `False`):
When doing multi-node distributed training, whether to save models and checkpoints on each node, or
only on the main one.
This should not be activated when the different nodes use the same storage as the files will be saved
with the same names for each node.
Example:
```py
>>> from transformers import TrainingArguments | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
>>> args = TrainingArguments("working_dir")
>>> args = args.set_save(strategy="steps", steps=100)
>>> args.save_steps
100
```
"""
self.save_strategy = SaveStrategy(strategy)
if self.save_strategy == SaveStrategy.STEPS and steps == 0:
raise ValueError("Setting `strategy` as 'steps' requires a positive value for `steps`.")
self.save_steps = steps
self.save_total_limit = total_limit
self.save_on_each_node = on_each_node
return self
def set_logging(
self,
strategy: Union[str, IntervalStrategy] = "steps",
steps: int = 500,
report_to: Union[str, List[str]] = "none",
level: str = "passive",
first_step: bool = False,
nan_inf_filter: bool = False,
on_each_node: bool = False,
replica_level: str = "passive",
):
"""
A method that regroups all arguments linked to logging. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Args:
strategy (`str` or [`~trainer_utils.IntervalStrategy`], *optional*, defaults to `"steps"`):
The logging strategy to adopt during training. Possible values are:
- `"no"`: No logging is done during training.
- `"epoch"`: Logging is done at the end of each epoch.
- `"steps"`: Logging is done every `logging_steps`. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
steps (`int`, *optional*, defaults to 500):
Number of update steps between two logs if `strategy="steps"`.
level (`str`, *optional*, defaults to `"passive"`):
Logger log level to use on the main process. Possible choices are the log levels as strings: `"debug"`,
`"info"`, `"warning"`, `"error"` and `"critical"`, plus a `"passive"` level which doesn't set anything
and lets the application set the level.
report_to (`str` or `List[str]`, *optional*, defaults to `"all"`):
The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,
`"clearml"`, `"codecarbon"`, `"comet_ml"`, `"dagshub"`, `"dvclive"`, `"flyte"`, `"mlflow"`,
`"neptune"`, `"tensorboard"`, and `"wandb"`. Use `"all"` to report to all integrations installed,
`"none"` for no integrations.
first_step (`bool`, *optional*, defaults to `False`): | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Whether to log and evaluate the first `global_step` or not.
nan_inf_filter (`bool`, *optional*, defaults to `True`):
Whether to filter `nan` and `inf` losses for logging. If set to `True` the loss of every step that is
`nan` or `inf` is filtered and the average loss of the current logging window is taken instead. | 179 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/training_args.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.