Matt300209 commited on
Commit
715ec12
·
verified ·
1 Parent(s): d76af48

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/accelerate/__init__.py +51 -0
  2. venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/accelerate/__pycache__/parallelism_config.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/accelerate/accelerator.py +0 -0
  18. venv/lib/python3.10/site-packages/accelerate/big_modeling.py +789 -0
  19. venv/lib/python3.10/site-packages/accelerate/checkpointing.py +330 -0
  20. venv/lib/python3.10/site-packages/accelerate/commands/__init__.py +13 -0
  21. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/merge.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/to_fsdp2.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py +54 -0
  32. venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py +52 -0
  33. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py +917 -0
  42. venv/lib/python3.10/site-packages/accelerate/commands/config/config.py +89 -0
  43. venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py +256 -0
  44. venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py +122 -0
  45. venv/lib/python3.10/site-packages/accelerate/commands/config/default.py +163 -0
  46. venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py +274 -0
  47. venv/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  48. venv/lib/python3.10/site-packages/accelerate/commands/env.py +131 -0
  49. venv/lib/python3.10/site-packages/accelerate/commands/estimate.py +312 -0
  50. venv/lib/python3.10/site-packages/accelerate/commands/launch.py +1245 -0
venv/lib/python3.10/site-packages/accelerate/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ __version__ = "1.10.0"
15
+
16
+ from .accelerator import Accelerator
17
+ from .big_modeling import (
18
+ cpu_offload,
19
+ cpu_offload_with_hook,
20
+ disk_offload,
21
+ dispatch_model,
22
+ init_empty_weights,
23
+ init_on_device,
24
+ load_checkpoint_and_dispatch,
25
+ )
26
+ from .data_loader import skip_first_batches
27
+ from .inference import prepare_pippy
28
+ from .launchers import debug_launcher, notebook_launcher
29
+ from .parallelism_config import ParallelismConfig
30
+ from .state import PartialState
31
+ from .utils import (
32
+ AutocastKwargs,
33
+ DataLoaderConfiguration,
34
+ DDPCommunicationHookType,
35
+ DeepSpeedPlugin,
36
+ DistributedDataParallelKwargs,
37
+ DistributedType,
38
+ FullyShardedDataParallelPlugin,
39
+ GradScalerKwargs,
40
+ InitProcessGroupKwargs,
41
+ ProfileKwargs,
42
+ find_executable_batch_size,
43
+ infer_auto_device_map,
44
+ is_rich_available,
45
+ load_checkpoint_in_model,
46
+ synchronize_rng_states,
47
+ )
48
+
49
+
50
+ if is_rich_available():
51
+ from .utils import rich
venv/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc ADDED
Binary file (43.7 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (24.7 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc ADDED
Binary file (5.99 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc ADDED
Binary file (3.75 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc ADDED
Binary file (499 Bytes). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/parallelism_config.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc ADDED
Binary file (44.5 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc ADDED
Binary file (48.1 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/accelerator.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/accelerate/big_modeling.py ADDED
@@ -0,0 +1,789 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ import re
18
+ from contextlib import contextmanager
19
+ from functools import wraps
20
+ from typing import Optional, Union
21
+
22
+ import torch
23
+ import torch.nn as nn
24
+
25
+ from .hooks import (
26
+ AlignDevicesHook,
27
+ CpuOffload,
28
+ LayerwiseCastingHook,
29
+ UserCpuOffloadHook,
30
+ add_hook_to_module,
31
+ attach_align_device_hook,
32
+ attach_align_device_hook_on_blocks,
33
+ )
34
+ from .utils import (
35
+ OffloadedWeightsLoader,
36
+ check_cuda_p2p_ib_support,
37
+ check_device_map,
38
+ extract_submodules_state_dict,
39
+ find_tied_parameters,
40
+ get_balanced_memory,
41
+ infer_auto_device_map,
42
+ is_bnb_available,
43
+ is_mlu_available,
44
+ is_musa_available,
45
+ is_npu_available,
46
+ is_sdaa_available,
47
+ is_xpu_available,
48
+ load_checkpoint_in_model,
49
+ offload_state_dict,
50
+ parse_flag_from_env,
51
+ retie_parameters,
52
+ )
53
+ from .utils.constants import SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING
54
+ from .utils.other import recursive_getattr
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ @contextmanager
61
+ def init_empty_weights(include_buffers: bool = None):
62
+ """
63
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
64
+ empty model. Useful when just initializing the model would blow the available RAM.
65
+
66
+ Args:
67
+ include_buffers (`bool`, *optional*):
68
+ Whether or not to also put all buffers on the meta device while initializing.
69
+
70
+ Example:
71
+
72
+ ```python
73
+ import torch.nn as nn
74
+ from accelerate import init_empty_weights
75
+
76
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
77
+ with init_empty_weights():
78
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
79
+ ```
80
+
81
+ <Tip warning={true}>
82
+
83
+ Any model created under this context manager has no weights. As such you can't do something like
84
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
85
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
86
+ called.
87
+
88
+ </Tip>
89
+ """
90
+ if include_buffers is None:
91
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
92
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
93
+ yield f
94
+
95
+
96
+ @contextmanager
97
+ def init_on_device(device: torch.device, include_buffers: bool = None):
98
+ """
99
+ A context manager under which models are initialized with all parameters on the specified device.
100
+
101
+ Args:
102
+ device (`torch.device`):
103
+ Device to initialize all parameters on.
104
+ include_buffers (`bool`, *optional*):
105
+ Whether or not to also put all buffers on the meta device while initializing.
106
+
107
+ Example:
108
+
109
+ ```python
110
+ import torch.nn as nn
111
+ from accelerate import init_on_device
112
+
113
+ with init_on_device(device=torch.device("cuda")):
114
+ tst = nn.Linear(100, 100) # on `cuda` device
115
+ ```
116
+ """
117
+ if include_buffers is None:
118
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
119
+
120
+ if include_buffers:
121
+ with device:
122
+ yield
123
+ return
124
+
125
+ old_register_parameter = nn.Module.register_parameter
126
+ if include_buffers:
127
+ old_register_buffer = nn.Module.register_buffer
128
+
129
+ def register_empty_parameter(module, name, param):
130
+ old_register_parameter(module, name, param)
131
+ if param is not None:
132
+ param_cls = type(module._parameters[name])
133
+ kwargs = module._parameters[name].__dict__
134
+ kwargs["requires_grad"] = param.requires_grad
135
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
136
+
137
+ def register_empty_buffer(module, name, buffer, persistent=True):
138
+ old_register_buffer(module, name, buffer, persistent=persistent)
139
+ if buffer is not None:
140
+ module._buffers[name] = module._buffers[name].to(device)
141
+
142
+ # Patch tensor creation
143
+ if include_buffers:
144
+ tensor_constructors_to_patch = {
145
+ torch_function_name: getattr(torch, torch_function_name)
146
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
147
+ }
148
+ else:
149
+ tensor_constructors_to_patch = {}
150
+
151
+ def patch_tensor_constructor(fn):
152
+ def wrapper(*args, **kwargs):
153
+ kwargs["device"] = device
154
+ return fn(*args, **kwargs)
155
+
156
+ return wrapper
157
+
158
+ try:
159
+ nn.Module.register_parameter = register_empty_parameter
160
+ if include_buffers:
161
+ nn.Module.register_buffer = register_empty_buffer
162
+ for torch_function_name in tensor_constructors_to_patch.keys():
163
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
164
+ yield
165
+ finally:
166
+ nn.Module.register_parameter = old_register_parameter
167
+ if include_buffers:
168
+ nn.Module.register_buffer = old_register_buffer
169
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
170
+ setattr(torch, torch_function_name, old_torch_function)
171
+
172
+
173
+ def cpu_offload(
174
+ model: nn.Module,
175
+ execution_device: Optional[torch.device] = None,
176
+ offload_buffers: bool = False,
177
+ state_dict: Optional[dict[str, torch.Tensor]] = None,
178
+ preload_module_classes: Optional[list[str]] = None,
179
+ ):
180
+ """
181
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
182
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
183
+ state dict and put on the execution device passed as they are needed, then offloaded again.
184
+
185
+ Args:
186
+ model (`torch.nn.Module`):
187
+ The model to offload.
188
+ execution_device (`torch.device`, *optional*):
189
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
190
+ model first parameter device.
191
+ offload_buffers (`bool`, *optional*, defaults to `False`):
192
+ Whether or not to offload the buffers with the model parameters.
193
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
194
+ The state dict of the model that will be kept on CPU.
195
+ preload_module_classes (`List[str]`, *optional*):
196
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
197
+ of the forward. This should only be used for classes that have submodules which are registered but not
198
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
199
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
200
+ """
201
+ if execution_device is None:
202
+ execution_device = next(iter(model.parameters())).device
203
+ if state_dict is None:
204
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
205
+
206
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
207
+ attach_align_device_hook(
208
+ model,
209
+ execution_device=execution_device,
210
+ offload=True,
211
+ offload_buffers=offload_buffers,
212
+ weights_map=state_dict,
213
+ preload_module_classes=preload_module_classes,
214
+ )
215
+
216
+ return model
217
+
218
+
219
+ def cpu_offload_with_hook(
220
+ model: torch.nn.Module,
221
+ execution_device: Optional[Union[int, str, torch.device]] = None,
222
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
223
+ ):
224
+ """
225
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
226
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
227
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
228
+
229
+ Args:
230
+ model (`torch.nn.Module`):
231
+ The model to offload.
232
+ execution_device(`str`, `int` or `torch.device`, *optional*):
233
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
234
+ GPU 0 if there is a GPU, and finally to the CPU.
235
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
236
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
237
+ offload method will be called just before the forward of the model to which this hook is attached.
238
+
239
+ Example:
240
+
241
+ ```py
242
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
243
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
244
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
245
+
246
+ hid_1 = model_1(input)
247
+ for i in range(50):
248
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
249
+ hid_2 = model_2(hid_1)
250
+ # model2 is offloaded to the CPU just before this forward.
251
+ hid_3 = model_3(hid_3)
252
+
253
+ # For model3, you need to manually call the hook offload method.
254
+ hook_3.offload()
255
+ ```
256
+ """
257
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
258
+ add_hook_to_module(model, hook, append=True)
259
+ user_hook = UserCpuOffloadHook(model, hook)
260
+ return model, user_hook
261
+
262
+
263
+ def disk_offload(
264
+ model: nn.Module,
265
+ offload_dir: Union[str, os.PathLike],
266
+ execution_device: Optional[torch.device] = None,
267
+ offload_buffers: bool = False,
268
+ preload_module_classes: Optional[list[str]] = None,
269
+ ):
270
+ """
271
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
272
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
273
+ put on the execution device passed as they are needed, then offloaded again.
274
+
275
+ Args:
276
+ model (`torch.nn.Module`): The model to offload.
277
+ offload_dir (`str` or `os.PathLike`):
278
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
279
+ execution_device (`torch.device`, *optional*):
280
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
281
+ model's first parameter device.
282
+ offload_buffers (`bool`, *optional*, defaults to `False`):
283
+ Whether or not to offload the buffers with the model parameters.
284
+ preload_module_classes (`List[str]`, *optional*):
285
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
286
+ of the forward. This should only be used for classes that have submodules which are registered but not
287
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
288
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
289
+ """
290
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
291
+ offload_state_dict(offload_dir, model.state_dict())
292
+ if execution_device is None:
293
+ execution_device = next(iter(model.parameters())).device
294
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
295
+
296
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
297
+ attach_align_device_hook(
298
+ model,
299
+ execution_device=execution_device,
300
+ offload=True,
301
+ offload_buffers=offload_buffers,
302
+ weights_map=weights_map,
303
+ preload_module_classes=preload_module_classes,
304
+ )
305
+
306
+ return model
307
+
308
+
309
+ def dispatch_model(
310
+ model: nn.Module,
311
+ device_map: dict[str, Union[str, int, torch.device]],
312
+ main_device: Optional[torch.device] = None,
313
+ state_dict: Optional[dict[str, torch.Tensor]] = None,
314
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
315
+ offload_index: Optional[dict[str, str]] = None,
316
+ offload_buffers: bool = False,
317
+ skip_keys: Optional[Union[str, list[str]]] = None,
318
+ preload_module_classes: Optional[list[str]] = None,
319
+ force_hooks: bool = False,
320
+ ):
321
+ """
322
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
323
+ the CPU or even the disk.
324
+
325
+ Args:
326
+ model (`torch.nn.Module`):
327
+ The model to dispatch.
328
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
329
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
330
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
331
+ main_device (`str`, `int` or `torch.device`, *optional*):
332
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
333
+ `"disk"`.
334
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
335
+ The state dict of the part of the model that will be kept on CPU.
336
+ offload_dir (`str` or `os.PathLike`):
337
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
338
+ offload_index (`Dict`, *optional*):
339
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
340
+ to the index saved in `save_folder`.
341
+ offload_buffers (`bool`, *optional*, defaults to `False`):
342
+ Whether or not to offload the buffers with the model parameters.
343
+ skip_keys (`str` or `List[str]`, *optional*):
344
+ A list of keys to ignore when moving inputs or outputs between devices.
345
+ preload_module_classes (`List[str]`, *optional*):
346
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
347
+ of the forward. This should only be used for classes that have submodules which are registered but not
348
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
349
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
350
+ force_hooks (`bool`, *optional*, defaults to `False`):
351
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
352
+ single device.
353
+ """
354
+ # Error early if the device map is incomplete.
355
+ check_device_map(model, device_map)
356
+
357
+ # We need to force hook for quantized model that can't be moved with to()
358
+ if getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes":
359
+ # since bnb 0.43.2, we can move 4-bit model
360
+ if getattr(model, "is_loaded_in_8bit", False) or (
361
+ getattr(model, "is_loaded_in_4bit", False) and not is_bnb_available(min_version="0.43.2")
362
+ ):
363
+ force_hooks = True
364
+
365
+ # We attach hooks if the device_map has at least 2 different devices or if
366
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
367
+ # in the unique device and the user can decide where to dispatch the model.
368
+ # If the model is quantized, we always force-dispatch the model
369
+ if (len(set(device_map.values())) > 1) or force_hooks:
370
+ if main_device is None:
371
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
372
+ main_device = "cpu"
373
+ else:
374
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
375
+
376
+ if main_device != "cpu":
377
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
378
+ if state_dict is None and len(cpu_modules) > 0:
379
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
380
+
381
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
382
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
383
+ raise ValueError(
384
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
385
+ f"need to be offloaded: {', '.join(disk_modules)}."
386
+ )
387
+ if (
388
+ len(disk_modules) > 0
389
+ and offload_index is None
390
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
391
+ ):
392
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
393
+ offload_state_dict(offload_dir, disk_state_dict)
394
+
395
+ execution_device = {
396
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
397
+ }
398
+ execution_device[""] = main_device
399
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
400
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
401
+ save_folder = offload_dir if len(disk_modules) > 0 else None
402
+ if state_dict is not None or save_folder is not None or offload_index is not None:
403
+ device = main_device if offload_index is not None else None
404
+ weights_map = OffloadedWeightsLoader(
405
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
406
+ )
407
+ else:
408
+ weights_map = None
409
+
410
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
411
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
412
+ # original pointer) on each devices.
413
+ tied_params = find_tied_parameters(model)
414
+
415
+ tied_params_map = {}
416
+ for group in tied_params:
417
+ for param_name in group:
418
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
419
+ # to care about views of tensors through storage_offset.
420
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
421
+ tied_params_map[data_ptr] = {}
422
+
423
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
424
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
425
+
426
+ attach_align_device_hook_on_blocks(
427
+ model,
428
+ execution_device=execution_device,
429
+ offload=offload,
430
+ offload_buffers=offload_buffers,
431
+ weights_map=weights_map,
432
+ skip_keys=skip_keys,
433
+ preload_module_classes=preload_module_classes,
434
+ tied_params_map=tied_params_map,
435
+ )
436
+
437
+ # warn if there is any params on the meta device
438
+ offloaded_devices_str = " and ".join(
439
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
440
+ )
441
+ if len(offloaded_devices_str) > 0:
442
+ logger.warning(
443
+ f"Some parameters are on the meta device because they were offloaded to the {offloaded_devices_str}."
444
+ )
445
+
446
+ # Attaching the hook may break tied weights, so we retie them
447
+ retie_parameters(model, tied_params)
448
+
449
+ # add warning to cuda and to method
450
+ def add_warning(fn, model):
451
+ @wraps(fn)
452
+ def wrapper(*args, **kwargs):
453
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
454
+ if str(fn.__name__) == "to":
455
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
456
+ if to_device is not None:
457
+ logger.warning(warning_msg)
458
+ else:
459
+ logger.warning(warning_msg)
460
+ for param in model.parameters():
461
+ if param.device == torch.device("meta"):
462
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
463
+ return fn(*args, **kwargs)
464
+
465
+ return wrapper
466
+
467
+ # Make sure to update _accelerate_added_attributes in hooks.py if you add any hook
468
+ model.to = add_warning(model.to, model)
469
+ if is_npu_available():
470
+ model.npu = add_warning(model.npu, model)
471
+ elif is_mlu_available():
472
+ model.mlu = add_warning(model.mlu, model)
473
+ elif is_sdaa_available():
474
+ model.sdaa = add_warning(model.sdaa, model)
475
+ elif is_musa_available():
476
+ model.musa = add_warning(model.musa, model)
477
+ elif is_xpu_available():
478
+ model.xpu = add_warning(model.xpu, model)
479
+ else:
480
+ model.cuda = add_warning(model.cuda, model)
481
+
482
+ # Check if we are using multi-gpus with RTX 4000 series
483
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
484
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
485
+ logger.warning(
486
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
487
+ "This can affect the multi-gpu inference when using accelerate device_map."
488
+ "Please make sure to update your driver to the latest version which resolves this."
489
+ )
490
+ else:
491
+ device = list(device_map.values())[0]
492
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
493
+ if is_npu_available() and isinstance(device, int):
494
+ device = f"npu:{device}"
495
+ elif is_mlu_available() and isinstance(device, int):
496
+ device = f"mlu:{device}"
497
+ elif is_sdaa_available() and isinstance(device, int):
498
+ device = f"sdaa:{device}"
499
+ elif is_musa_available() and isinstance(device, int):
500
+ device = f"musa:{device}"
501
+ if device != "disk":
502
+ model.to(device)
503
+ else:
504
+ raise ValueError(
505
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
506
+ )
507
+ # Convert OrderedDict back to dict for easier usage
508
+ model.hf_device_map = dict(device_map)
509
+ return model
510
+
511
+
512
+ def load_checkpoint_and_dispatch(
513
+ model: nn.Module,
514
+ checkpoint: Union[str, os.PathLike],
515
+ device_map: Optional[Union[str, dict[str, Union[int, str, torch.device]]]] = None,
516
+ max_memory: Optional[dict[Union[int, str], Union[int, str]]] = None,
517
+ no_split_module_classes: Optional[list[str]] = None,
518
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
519
+ offload_buffers: bool = False,
520
+ dtype: Optional[Union[str, torch.dtype]] = None,
521
+ offload_state_dict: Optional[bool] = None,
522
+ skip_keys: Optional[Union[str, list[str]]] = None,
523
+ preload_module_classes: Optional[list[str]] = None,
524
+ force_hooks: bool = False,
525
+ strict: bool = False,
526
+ full_state_dict: bool = True,
527
+ broadcast_from_rank0: bool = False,
528
+ ):
529
+ """
530
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
531
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
532
+
533
+ Args:
534
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
535
+ checkpoint (`str` or `os.PathLike`):
536
+ The folder checkpoint to load. It can be:
537
+ - a path to a file containing a whole model state dict
538
+ - a path to a `.json` file containing the index to a sharded checkpoint
539
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
540
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
541
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
542
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
543
+
544
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
545
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
546
+ Defaults to None, which means [`dispatch_model`] will not be called.
547
+ max_memory (`Dict`, *optional*):
548
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
549
+ and the available CPU RAM if unset.
550
+ no_split_module_classes (`List[str]`, *optional*):
551
+ A list of layer class names that should never be split across device (for instance any layer that has a
552
+ residual connection).
553
+ offload_folder (`str` or `os.PathLike`, *optional*):
554
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
555
+ offload_buffers (`bool`, *optional*, defaults to `False`):
556
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
557
+ well as the parameters.
558
+ dtype (`str` or `torch.dtype`, *optional*):
559
+ If provided, the weights will be converted to that type when loaded.
560
+ offload_state_dict (`bool`, *optional*):
561
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
562
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
563
+ picked contains `"disk"` values.
564
+ skip_keys (`str` or `List[str]`, *optional*):
565
+ A list of keys to ignore when moving inputs or outputs between devices.
566
+ preload_module_classes (`List[str]`, *optional*):
567
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
568
+ of the forward. This should only be used for classes that have submodules which are registered but not
569
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
570
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
571
+ force_hooks (`bool`, *optional*, defaults to `False`):
572
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
573
+ single device.
574
+ strict (`bool`, *optional*, defaults to `False`):
575
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
576
+ state_dict.
577
+ full_state_dict (`bool`, *optional*, defaults to `True`): if this is set to `True`, all the tensors in the
578
+ loaded state_dict will be gathered. No ShardedTensor and DTensor will be in the loaded state_dict.
579
+ broadcast_from_rank0 (`False`, *optional*, defaults to `False`): when the option is `True`, a distributed
580
+ `ProcessGroup` must be initialized. rank0 should receive a full state_dict and will broadcast the tensors
581
+ in the state_dict one by one to other ranks. Other ranks will receive the tensors and shard (if applicable)
582
+ according to the local shards in the model.
583
+
584
+ Example:
585
+
586
+ ```python
587
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
588
+ >>> from huggingface_hub import hf_hub_download
589
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
590
+
591
+ >>> # Download the Weights
592
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
593
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
594
+
595
+ >>> # Create a model and initialize it with empty weights
596
+ >>> config = AutoConfig.from_pretrained(checkpoint)
597
+ >>> with init_empty_weights():
598
+ ... model = AutoModelForCausalLM.from_config(config)
599
+
600
+ >>> # Load the checkpoint and dispatch it to the right devices
601
+ >>> model = load_checkpoint_and_dispatch(
602
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
603
+ ... )
604
+ ```
605
+ """
606
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
607
+ raise ValueError(
608
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'."
609
+ )
610
+ if isinstance(device_map, str):
611
+ if device_map != "sequential":
612
+ max_memory = get_balanced_memory(
613
+ model,
614
+ max_memory=max_memory,
615
+ no_split_module_classes=no_split_module_classes,
616
+ dtype=dtype,
617
+ low_zero=(device_map == "balanced_low_0"),
618
+ )
619
+ device_map = infer_auto_device_map(
620
+ model,
621
+ max_memory=max_memory,
622
+ no_split_module_classes=no_split_module_classes,
623
+ dtype=dtype,
624
+ offload_buffers=offload_buffers,
625
+ )
626
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
627
+ offload_state_dict = True
628
+ load_checkpoint_in_model(
629
+ model,
630
+ checkpoint,
631
+ device_map=device_map,
632
+ offload_folder=offload_folder,
633
+ dtype=dtype,
634
+ offload_state_dict=offload_state_dict,
635
+ offload_buffers=offload_buffers,
636
+ strict=strict,
637
+ full_state_dict=full_state_dict,
638
+ broadcast_from_rank0=broadcast_from_rank0,
639
+ )
640
+ if device_map is None:
641
+ return model
642
+ return dispatch_model(
643
+ model,
644
+ device_map=device_map,
645
+ offload_dir=offload_folder,
646
+ offload_buffers=offload_buffers,
647
+ skip_keys=skip_keys,
648
+ preload_module_classes=preload_module_classes,
649
+ force_hooks=force_hooks,
650
+ )
651
+
652
+
653
+ def attach_layerwise_casting_hooks(
654
+ module: torch.nn.Module,
655
+ storage_dtype: torch.dtype,
656
+ compute_dtype: torch.dtype,
657
+ skip_modules_pattern: Union[str, tuple[str, ...]] = None,
658
+ skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None,
659
+ non_blocking: bool = False,
660
+ ) -> None:
661
+ r"""
662
+ Applies layerwise casting to a given module. The module expected here is a PyTorch `nn.Module`. This is helpful for
663
+ reducing memory requirements when one doesn't want to fully quantize a model. Model params can be kept in say,
664
+ `torch.float8_e4m3fn` and upcasted to a higher precision like `torch.bfloat16` during forward pass and downcasted
665
+ back to `torch.float8_e4m3fn` to realize memory savings.
666
+
667
+ Args:
668
+ module (`torch.nn.Module`):
669
+ The module whose leaf modules will be cast to a high precision dtype for computation, and to a low
670
+ precision dtype for storage.
671
+ storage_dtype (`torch.dtype`):
672
+ The dtype to cast the module to before/after the forward pass for storage.
673
+ compute_dtype (`torch.dtype`):
674
+ The dtype to cast the module to during the forward pass for computation.
675
+ skip_modules_pattern (`tuple[str, ...]`, defaults to `None`):
676
+ A list of patterns to match the names of the modules to skip during the layerwise casting process. If set
677
+ to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the
678
+ module instead of its internal submodules.
679
+ skip_modules_classes (`tuple[type[torch.nn.Module], ...]`, defaults to `None`):
680
+ A list of module classes to skip during the layerwise casting process.
681
+ non_blocking (`bool`, defaults to `False`):
682
+ If `True`, the weight casting operations are non-blocking.
683
+
684
+ Example:
685
+
686
+ ```python
687
+ >>> from accelerate.hooks import attach_layerwise_casting_hooks
688
+ >>> from transformers import AutoModelForCausalLM
689
+ >>> import torch
690
+
691
+ >>> # Model
692
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
693
+ >>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
694
+
695
+ >>> # Attach hooks and perform inference
696
+ >>> attach_layerwise_casting_hooks(model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
697
+ >>> with torch.no_grad():
698
+ ... model(...)
699
+ ```
700
+
701
+ Users can also pass modules they want to avoid from getting downcasted.
702
+
703
+ ```py
704
+ >>> attach_layerwise_casting_hooks(
705
+ ... model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16, skip_modules_pattern=["norm"]
706
+ ... )
707
+ ```
708
+ """
709
+ _attach_layerwise_casting_hooks(
710
+ module, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking
711
+ )
712
+
713
+
714
+ def _attach_layerwise_casting_hooks(
715
+ module: torch.nn.Module,
716
+ storage_dtype: torch.dtype,
717
+ compute_dtype: torch.dtype,
718
+ skip_modules_pattern: Union[str, tuple[str, ...]] = None,
719
+ skip_modules_classes: Optional[tuple[type[torch.nn.Module], ...]] = None,
720
+ non_blocking: bool = False,
721
+ _prefix: str = "",
722
+ ):
723
+ should_skip = (skip_modules_classes is not None and isinstance(module, skip_modules_classes)) or (
724
+ skip_modules_pattern is not None and any(re.search(pattern, _prefix) for pattern in skip_modules_pattern)
725
+ )
726
+ if should_skip:
727
+ logger.debug(f'Skipping layerwise casting for layer "{_prefix}"')
728
+ return
729
+
730
+ if isinstance(module, SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING):
731
+ logger.debug(f'Applying layerwise casting to layer "{_prefix}"')
732
+ add_hook_to_module(
733
+ module,
734
+ LayerwiseCastingHook(storage_dtype=storage_dtype, compute_dtype=compute_dtype, non_blocking=non_blocking),
735
+ append=True,
736
+ )
737
+ return
738
+
739
+ for name, submodule in module.named_children():
740
+ layer_name = f"{_prefix}.{name}" if _prefix else name
741
+ _attach_layerwise_casting_hooks(
742
+ submodule,
743
+ storage_dtype,
744
+ compute_dtype,
745
+ skip_modules_pattern,
746
+ skip_modules_classes,
747
+ non_blocking,
748
+ _prefix=layer_name,
749
+ )
750
+
751
+
752
+ def _attach_context_parallel_hooks(
753
+ model: nn.Module,
754
+ ):
755
+ """
756
+ Monkeypatch huggingface's `transformers` model to fix attention mask issues when using context parallelism.
757
+
758
+ This function attaches forward_pre_hooks to each self_attn module of the model, where each hook checks the
759
+ args/kwargs, if they contain an attention mask, if it does, it will remove this mask, check if it is a causal mask,
760
+ if yes, will add a kwarg `is_causal=True`, otherwise will raise an error. This is because context parallelism does
761
+ not support attention masks. This function modifies the model in place.
762
+
763
+ Args:
764
+ model (`nn.Module`):
765
+ The model to attach the hooks to.
766
+
767
+ """
768
+
769
+ def _self_attn_pre_forward_hook(_module, module_args, module_kwargs):
770
+ if "attention_mask" in module_kwargs:
771
+ module_kwargs["attention_mask"] = None
772
+ module_kwargs["is_causal"] = True
773
+
774
+ return module_args, module_kwargs
775
+
776
+ for name, module in model.named_modules():
777
+ # We hope (assume) that if user uses their own model (without this structure which transformers uses), they read the docs saying they can't pass in attention masks
778
+ # Then these cases can happen:
779
+ # 1) some modules end with a `self-attn` module, in which case we attach the hook, but the
780
+ # there's no attention mask kwarg -> hook is a no-op
781
+ # 2) some modules end with a `self-attn` module, in which case we attach the hook, and the
782
+ # attention mask kwarg is passed -> hook will remove the attention mask and add
783
+ # `is_causal=True` kwarg, which either crashes the training or fixes it
784
+ # (training would crash anyway as attention mask isn't supported)
785
+ # 3) no modules end with a `self-attn` module, in which case we don't attach the hook, this is
786
+ # a no-op as well
787
+ if name.endswith("self_attn"):
788
+ # we want the hook to be executed first, to avoid any other hooks doing work on the attention mask
789
+ module.register_forward_pre_hook(_self_attn_pre_forward_hook, with_kwargs=True, prepend=True)
venv/lib/python3.10/site-packages/accelerate/checkpointing.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from pathlib import Path
17
+
18
+ import numpy as np
19
+ import torch
20
+ from safetensors.torch import load_model
21
+
22
+ from .utils import (
23
+ MODEL_NAME,
24
+ OPTIMIZER_NAME,
25
+ RNG_STATE_NAME,
26
+ SAFE_MODEL_NAME,
27
+ SAFE_WEIGHTS_NAME,
28
+ SAMPLER_NAME,
29
+ SCALER_NAME,
30
+ SCHEDULER_NAME,
31
+ WEIGHTS_NAME,
32
+ get_pretty_name,
33
+ is_cuda_available,
34
+ is_hpu_available,
35
+ is_mlu_available,
36
+ is_musa_available,
37
+ is_sdaa_available,
38
+ is_torch_version,
39
+ is_torch_xla_available,
40
+ is_xpu_available,
41
+ load,
42
+ save,
43
+ )
44
+
45
+
46
+ if is_torch_version(">=", "2.4.0"):
47
+ from torch.amp import GradScaler
48
+ else:
49
+ from torch.cuda.amp import GradScaler
50
+
51
+ if is_torch_xla_available():
52
+ import torch_xla.core.xla_model as xm
53
+
54
+ from .logging import get_logger
55
+ from .state import PartialState
56
+
57
+
58
+ logger = get_logger(__name__)
59
+
60
+
61
+ def save_accelerator_state(
62
+ output_dir: str,
63
+ model_states: list[dict],
64
+ optimizers: list,
65
+ schedulers: list,
66
+ dataloaders: list,
67
+ process_index: int,
68
+ step: int,
69
+ scaler: GradScaler = None,
70
+ save_on_each_node: bool = False,
71
+ safe_serialization: bool = True,
72
+ ):
73
+ """
74
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
75
+
76
+ <Tip>
77
+
78
+ If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
79
+ `pickle`.
80
+
81
+ </Tip>
82
+
83
+ Args:
84
+ output_dir (`str` or `os.PathLike`):
85
+ The name of the folder to save all relevant weights and states.
86
+ model_states (`List[torch.nn.Module]`):
87
+ A list of model states
88
+ optimizers (`List[torch.optim.Optimizer]`):
89
+ A list of optimizer instances
90
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
91
+ A list of learning rate schedulers
92
+ dataloaders (`List[torch.utils.data.DataLoader]`):
93
+ A list of dataloader instances to save their sampler states
94
+ process_index (`int`):
95
+ The current process index in the Accelerator state
96
+ step (`int`):
97
+ The current step in the internal step tracker
98
+ scaler (`torch.amp.GradScaler`, *optional*):
99
+ An optional gradient scaler instance to save;
100
+ save_on_each_node (`bool`, *optional*):
101
+ Whether to save on every node, or only the main node.
102
+ safe_serialization (`bool`, *optional*, defaults to `True`):
103
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
104
+ """
105
+ output_dir = Path(output_dir)
106
+ # Model states
107
+ for i, state in enumerate(model_states):
108
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
109
+ if i > 0:
110
+ weights_name = weights_name.replace(".", f"_{i}.")
111
+ output_model_file = output_dir.joinpath(weights_name)
112
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
113
+ logger.info(f"Model weights saved in {output_model_file}")
114
+ # Optimizer states
115
+ for i, opt in enumerate(optimizers):
116
+ state = opt.state_dict()
117
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
118
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
119
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
120
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
121
+ # Scheduler states
122
+ for i, scheduler in enumerate(schedulers):
123
+ state = scheduler.state_dict()
124
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
125
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
126
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
127
+ logger.info(f"Scheduler state saved in {output_scheduler_file}")
128
+ # DataLoader states
129
+ for i, dataloader in enumerate(dataloaders):
130
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
131
+ output_sampler_file = output_dir.joinpath(sampler_name)
132
+ # Only save if we have our custom sampler
133
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
134
+
135
+ if isinstance(dataloader.dataset, IterableDatasetShard):
136
+ sampler = dataloader.get_sampler()
137
+ if isinstance(sampler, SeedableRandomSampler):
138
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
139
+ if getattr(dataloader, "use_stateful_dataloader", False):
140
+ dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin"
141
+ output_dataloader_state_dict_file = output_dir.joinpath(dataloader_state_dict_name)
142
+ state_dict = dataloader.state_dict()
143
+ torch.save(state_dict, output_dataloader_state_dict_file)
144
+ logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
145
+
146
+ # GradScaler state
147
+ if scaler is not None:
148
+ state = scaler.state_dict()
149
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
150
+ torch.save(state, output_scaler_file)
151
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
152
+ # Random number generator states
153
+ states = {}
154
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
155
+ states["step"] = step
156
+ states["random_state"] = random.getstate()
157
+ states["numpy_random_seed"] = np.random.get_state()
158
+ states["torch_manual_seed"] = torch.get_rng_state()
159
+ if is_xpu_available():
160
+ states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
161
+ if is_mlu_available():
162
+ states["torch_mlu_manual_seed"] = torch.mlu.get_rng_state_all()
163
+ elif is_sdaa_available():
164
+ states["torch_sdaa_manual_seed"] = torch.sdaa.get_rng_state_all()
165
+ elif is_musa_available():
166
+ states["torch_musa_manual_seed"] = torch.musa.get_rng_state_all()
167
+ if is_hpu_available():
168
+ states["torch_hpu_manual_seed"] = torch.hpu.get_rng_state_all()
169
+ if is_cuda_available():
170
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
171
+ if is_torch_xla_available():
172
+ states["xm_seed"] = xm.get_rng_state()
173
+ output_states_file = output_dir.joinpath(states_name)
174
+ torch.save(states, output_states_file)
175
+ logger.info(f"Random states saved in {output_states_file}")
176
+ return output_dir
177
+
178
+
179
+ def load_accelerator_state(
180
+ input_dir,
181
+ models,
182
+ optimizers,
183
+ schedulers,
184
+ dataloaders,
185
+ process_index,
186
+ scaler=None,
187
+ map_location=None,
188
+ load_kwargs=None,
189
+ **load_model_func_kwargs,
190
+ ):
191
+ """
192
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
193
+
194
+ Args:
195
+ input_dir (`str` or `os.PathLike`):
196
+ The name of the folder to load all relevant weights and states.
197
+ models (`List[torch.nn.Module]`):
198
+ A list of model instances
199
+ optimizers (`List[torch.optim.Optimizer]`):
200
+ A list of optimizer instances
201
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
202
+ A list of learning rate schedulers
203
+ process_index (`int`):
204
+ The current process index in the Accelerator state
205
+ scaler (`torch.amp.GradScaler`, *optional*):
206
+ An optional *GradScaler* instance to load
207
+ map_location (`str`, *optional*):
208
+ What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
209
+ load_kwargs (`dict`, *optional*):
210
+ Additional arguments that can be passed to the `load` function.
211
+ load_model_func_kwargs (`dict`, *optional*):
212
+ Additional arguments that can be passed to the model's `load_state_dict` method.
213
+
214
+ Returns:
215
+ `dict`: Contains the `Accelerator` attributes to override while loading the state.
216
+ """
217
+ # stores the `Accelerator` attributes to override
218
+ override_attributes = dict()
219
+ if map_location not in [None, "cpu", "on_device"]:
220
+ raise TypeError(
221
+ "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
222
+ )
223
+ if map_location is None:
224
+ map_location = "cpu"
225
+ elif map_location == "on_device":
226
+ map_location = PartialState().device
227
+
228
+ if load_kwargs is None:
229
+ load_kwargs = {}
230
+
231
+ input_dir = Path(input_dir)
232
+ # Model states
233
+ for i, model in enumerate(models):
234
+ ending = f"_{i}" if i > 0 else ""
235
+ input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
236
+ if input_model_file.exists():
237
+ load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs)
238
+ else:
239
+ # Load with torch
240
+ input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
241
+ state_dict = load(input_model_file, map_location=map_location)
242
+ model.load_state_dict(state_dict, **load_model_func_kwargs)
243
+ logger.info("All model weights loaded successfully")
244
+
245
+ # Optimizer states
246
+ for i, opt in enumerate(optimizers):
247
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
248
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
249
+ optimizer_state = load(input_optimizer_file, map_location=map_location, **load_kwargs)
250
+ optimizers[i].load_state_dict(optimizer_state)
251
+ logger.info("All optimizer states loaded successfully")
252
+
253
+ # Scheduler states
254
+ for i, scheduler in enumerate(schedulers):
255
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
256
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
257
+ scheduler_state = load(input_scheduler_file, **load_kwargs)
258
+ scheduler.load_state_dict(scheduler_state)
259
+ logger.info("All scheduler states loaded successfully")
260
+
261
+ for i, dataloader in enumerate(dataloaders):
262
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
263
+ input_sampler_file = input_dir.joinpath(sampler_name)
264
+ # Only load if we have our custom sampler
265
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
266
+
267
+ if isinstance(dataloader.dataset, IterableDatasetShard):
268
+ sampler = dataloader.get_sampler()
269
+ if isinstance(sampler, SeedableRandomSampler):
270
+ sampler = dataloader.set_sampler(load(input_sampler_file))
271
+ if getattr(dataloader, "use_stateful_dataloader", False):
272
+ dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin"
273
+ input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name)
274
+ if input_dataloader_state_dict_file.exists():
275
+ state_dict = load(input_dataloader_state_dict_file, **load_kwargs)
276
+ dataloader.load_state_dict(state_dict)
277
+ logger.info("All dataloader sampler states loaded successfully")
278
+
279
+ # GradScaler state
280
+ if scaler is not None:
281
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
282
+ scaler_state = load(input_scaler_file)
283
+ scaler.load_state_dict(scaler_state)
284
+ logger.info("GradScaler state loaded successfully")
285
+
286
+ # Random states
287
+ try:
288
+ states = load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
289
+ if "step" in states:
290
+ override_attributes["step"] = states["step"]
291
+ random.setstate(states["random_state"])
292
+ np.random.set_state(states["numpy_random_seed"])
293
+ torch.set_rng_state(states["torch_manual_seed"])
294
+ if is_xpu_available():
295
+ torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
296
+ if is_mlu_available():
297
+ torch.mlu.set_rng_state_all(states["torch_mlu_manual_seed"])
298
+ elif is_sdaa_available():
299
+ torch.sdaa.set_rng_state_all(states["torch_sdaa_manual_seed"])
300
+ elif is_musa_available():
301
+ torch.musa.set_rng_state_all(states["torch_musa_manual_seed"])
302
+ else:
303
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
304
+ if is_torch_xla_available():
305
+ xm.set_rng_state(states["xm_seed"])
306
+ logger.info("All random states loaded successfully")
307
+ except Exception:
308
+ logger.info("Could not load random states")
309
+
310
+ return override_attributes
311
+
312
+
313
+ def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
314
+ """
315
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
316
+ """
317
+ # Should this be the right way to get a qual_name type value from `obj`?
318
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
319
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
320
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
321
+
322
+
323
+ def load_custom_state(obj, path, index: int = 0):
324
+ """
325
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`. Will always set `weights_only=False` when
326
+ loading the state.
327
+ """
328
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
329
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
330
+ obj.load_state_dict(load(load_location, map_location="cpu", weights_only=False))
venv/lib/python3.10/site-packages/accelerate/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (255 Bytes). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc ADDED
Binary file (33.1 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/merge.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/to_fsdp2.cpython-310.pyc ADDED
Binary file (4.16 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from accelerate.commands.config import get_config_parser
18
+ from accelerate.commands.env import env_command_parser
19
+ from accelerate.commands.estimate import estimate_command_parser
20
+ from accelerate.commands.launch import launch_command_parser
21
+ from accelerate.commands.merge import merge_command_parser
22
+ from accelerate.commands.test import test_command_parser
23
+ from accelerate.commands.to_fsdp2 import to_fsdp2_command_parser
24
+ from accelerate.commands.tpu import tpu_command_parser
25
+ from accelerate.commands.utils import CustomArgumentParser
26
+
27
+
28
+ def main():
29
+ parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
30
+ subparsers = parser.add_subparsers(help="accelerate command helpers")
31
+
32
+ # Register commands
33
+ get_config_parser(subparsers=subparsers)
34
+ estimate_command_parser(subparsers=subparsers)
35
+ env_command_parser(subparsers=subparsers)
36
+ launch_command_parser(subparsers=subparsers)
37
+ merge_command_parser(subparsers=subparsers)
38
+ tpu_command_parser(subparsers=subparsers)
39
+ test_command_parser(subparsers=subparsers)
40
+ to_fsdp2_command_parser(subparsers=subparsers)
41
+
42
+ # Let's go
43
+ args = parser.parse_args()
44
+
45
+ if not hasattr(args, "func"):
46
+ parser.print_help()
47
+ exit(1)
48
+
49
+ # Run
50
+ args.func(args)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/config/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from .config import config_command_parser
20
+ from .config_args import default_config_file, load_config_from_file # noqa: F401
21
+ from .default import default_command_parser
22
+ from .update import update_command_parser
23
+
24
+
25
+ def get_config_parser(subparsers=None):
26
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
27
+ # The main config parser
28
+ config_parser = config_command_parser(subparsers)
29
+ # The subparser to add commands to
30
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
31
+
32
+ # Then add other parsers with the parent parser
33
+ default_command_parser(subcommands, parents=[parent_parser])
34
+ update_command_parser(subcommands, parents=[parent_parser])
35
+
36
+ return config_parser
37
+
38
+
39
+ def main():
40
+ config_parser = get_config_parser()
41
+ args = config_parser.parse_args()
42
+
43
+ if not hasattr(args, "func"):
44
+ config_parser.print_help()
45
+ exit(1)
46
+
47
+ # Run
48
+ args.func(args)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc ADDED
Binary file (7.49 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (3.07 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc ADDED
Binary file (4.29 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc ADDED
Binary file (7.06 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
venv/lib/python3.10/site-packages/accelerate/commands/config/cluster.py ADDED
@@ -0,0 +1,917 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+
19
+ from ...utils import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ is_deepspeed_available,
23
+ is_fp8_available,
24
+ is_hpu_available,
25
+ is_mlu_available,
26
+ is_mps_available,
27
+ is_msamp_available,
28
+ is_musa_available,
29
+ is_npu_available,
30
+ is_sdaa_available,
31
+ is_transformer_engine_available,
32
+ is_transformers_available,
33
+ is_xpu_available,
34
+ )
35
+ from ...utils.constants import (
36
+ DEEPSPEED_MULTINODE_LAUNCHERS,
37
+ FSDP2_STATE_DICT_TYPE,
38
+ FSDP_AUTO_WRAP_POLICY,
39
+ FSDP_BACKWARD_PREFETCH,
40
+ FSDP_SHARDING_STRATEGY,
41
+ FSDP_STATE_DICT_TYPE,
42
+ TORCH_DYNAMO_MODES,
43
+ )
44
+ from .config_args import ClusterConfig
45
+ from .config_utils import (
46
+ DYNAMO_BACKENDS,
47
+ _ask_field,
48
+ _ask_options,
49
+ _convert_distributed_mode,
50
+ _convert_dynamo_backend,
51
+ _convert_fp8_backend,
52
+ _convert_mixed_precision,
53
+ _convert_yes_no_to_bool,
54
+ )
55
+
56
+
57
+ def get_cluster_input():
58
+ distributed_type = _ask_options(
59
+ "Which type of machine are you using?",
60
+ [
61
+ "No distributed training",
62
+ "multi-CPU",
63
+ "multi-XPU",
64
+ "multi-HPU",
65
+ "multi-GPU",
66
+ "multi-NPU",
67
+ "multi-MLU",
68
+ "multi-SDAA",
69
+ "multi-MUSA",
70
+ "TPU",
71
+ ],
72
+ _convert_distributed_mode,
73
+ )
74
+
75
+ machine_rank = 0
76
+ num_machines = 1
77
+ num_processes = 1
78
+ gpu_ids = None
79
+ main_process_ip = None
80
+ main_process_port = None
81
+ rdzv_backend = "static"
82
+ same_network = True
83
+ debug = False
84
+
85
+ if distributed_type in [
86
+ DistributedType.MULTI_GPU,
87
+ DistributedType.MULTI_MLU,
88
+ DistributedType.MULTI_SDAA,
89
+ DistributedType.MULTI_MUSA,
90
+ DistributedType.MULTI_NPU,
91
+ DistributedType.MULTI_XPU,
92
+ DistributedType.MULTI_CPU,
93
+ DistributedType.MULTI_HPU,
94
+ ]:
95
+ num_machines = _ask_field(
96
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
97
+ int,
98
+ default=1,
99
+ )
100
+ if num_machines > 1:
101
+ machine_rank = _ask_options(
102
+ "What is the rank of this machine?",
103
+ list(range(num_machines)),
104
+ int,
105
+ )
106
+ main_process_ip = _ask_field(
107
+ "What is the IP address of the machine that will host the main process? ",
108
+ )
109
+ main_process_port = _ask_field(
110
+ "What is the port you will use to communicate with the main process? ",
111
+ int,
112
+ )
113
+ same_network = _ask_field(
114
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
115
+ _convert_yes_no_to_bool,
116
+ default=True,
117
+ error_message="Please enter yes or no.",
118
+ )
119
+ if not same_network:
120
+ rdzv_backend = _ask_field(
121
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
122
+ )
123
+ debug = _ask_field(
124
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
125
+ _convert_yes_no_to_bool,
126
+ default=False,
127
+ error_message="Please enter yes or no.",
128
+ )
129
+
130
+ if distributed_type == DistributedType.NO:
131
+ use_cpu = _ask_field(
132
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
133
+ _convert_yes_no_to_bool,
134
+ default=False,
135
+ error_message="Please enter yes or no.",
136
+ )
137
+ elif distributed_type == DistributedType.MULTI_CPU:
138
+ use_cpu = True
139
+ else:
140
+ use_cpu = False
141
+
142
+ ipex_config = {}
143
+ mpirun_config = {}
144
+ if use_cpu or is_xpu_available():
145
+ ipex_config["ipex"] = _ask_field(
146
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU/XPU? [yes/NO]:",
147
+ _convert_yes_no_to_bool,
148
+ default=False,
149
+ error_message="Please enter yes or no.",
150
+ )
151
+
152
+ if use_cpu:
153
+ if distributed_type == DistributedType.MULTI_CPU:
154
+ use_mpirun = _ask_field(
155
+ "Do you want accelerate to launch mpirun? [yes/NO]: ",
156
+ _convert_yes_no_to_bool,
157
+ default=False,
158
+ error_message="Please enter yes or no.",
159
+ )
160
+ if use_mpirun:
161
+ mpirun_hostfile = _ask_field(
162
+ "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
163
+ str,
164
+ default="~/hostfile",
165
+ )
166
+ mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
167
+ mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
168
+
169
+ dynamo_config = {}
170
+ use_dynamo = _ask_field(
171
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
172
+ _convert_yes_no_to_bool,
173
+ default=False,
174
+ error_message="Please enter yes or no.",
175
+ )
176
+ if use_dynamo:
177
+ prefix = "dynamo_"
178
+ dynamo_config[prefix + "backend"] = _ask_options(
179
+ "Which dynamo backend would you like to use?",
180
+ [x.lower() for x in DYNAMO_BACKENDS],
181
+ _convert_dynamo_backend,
182
+ default=2,
183
+ )
184
+ use_custom_options = _ask_field(
185
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
186
+ _convert_yes_no_to_bool,
187
+ default=False,
188
+ error_message="Please enter yes or no.",
189
+ )
190
+
191
+ if use_custom_options:
192
+ dynamo_config[prefix + "mode"] = _ask_options(
193
+ "Which mode do you want to use?",
194
+ TORCH_DYNAMO_MODES,
195
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
196
+ default=0,
197
+ )
198
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
199
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
200
+ _convert_yes_no_to_bool,
201
+ default=False,
202
+ error_message="Please enter yes or no.",
203
+ )
204
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
205
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
206
+ _convert_yes_no_to_bool,
207
+ default=False,
208
+ error_message="Please enter yes or no.",
209
+ )
210
+ dynamo_config[prefix + "use_regional_compilation"] = _ask_field(
211
+ "Do you want to enable regional compilation? [yes/NO]: ",
212
+ _convert_yes_no_to_bool,
213
+ default=False,
214
+ error_message="Please enter yes or no.",
215
+ )
216
+
217
+ use_mps = not use_cpu and is_mps_available()
218
+ deepspeed_config = {}
219
+ if (
220
+ distributed_type
221
+ in [
222
+ DistributedType.MULTI_GPU,
223
+ DistributedType.MULTI_XPU,
224
+ DistributedType.MULTI_HPU,
225
+ DistributedType.MULTI_NPU,
226
+ DistributedType.MULTI_MLU,
227
+ DistributedType.MULTI_SDAA,
228
+ DistributedType.MULTI_MUSA,
229
+ DistributedType.NO,
230
+ ]
231
+ and not use_mps
232
+ ):
233
+ use_deepspeed = _ask_field(
234
+ "Do you want to use DeepSpeed? [yes/NO]: ",
235
+ _convert_yes_no_to_bool,
236
+ default=False,
237
+ error_message="Please enter yes or no.",
238
+ )
239
+ if use_deepspeed:
240
+ distributed_type = DistributedType.DEEPSPEED
241
+ assert is_deepspeed_available(), (
242
+ "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
243
+ )
244
+
245
+ if distributed_type == DistributedType.DEEPSPEED:
246
+ use_deepspeed_config = _ask_field(
247
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
248
+ _convert_yes_no_to_bool,
249
+ default=False,
250
+ error_message="Please enter yes or no.",
251
+ )
252
+ if use_deepspeed_config:
253
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
254
+ "Please enter the path to the json DeepSpeed config file: ",
255
+ str,
256
+ default="none",
257
+ )
258
+ else:
259
+ deepspeed_config["zero_stage"] = _ask_options(
260
+ "What should be your DeepSpeed's ZeRO optimization stage?",
261
+ [0, 1, 2, 3],
262
+ int,
263
+ default=2,
264
+ )
265
+
266
+ deepspeed_devices = ["none", "cpu", "nvme"]
267
+ if deepspeed_config["zero_stage"] >= 2:
268
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
269
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
270
+ )
271
+ deepspeed_config["offload_param_device"] = _ask_options(
272
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
273
+ )
274
+ if deepspeed_config["offload_param_device"] == "nvme":
275
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
276
+ "Nvme Path to offload parameters?",
277
+ str,
278
+ default="/nvme",
279
+ )
280
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
281
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
282
+ "Nvme Path to offload optimizer states?",
283
+ str,
284
+ default="/nvme",
285
+ )
286
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
287
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
288
+ int,
289
+ default=1,
290
+ )
291
+ use_gradient_clipping = _ask_field(
292
+ "Do you want to use gradient clipping? [yes/NO]: ",
293
+ _convert_yes_no_to_bool,
294
+ default=False,
295
+ error_message="Please enter yes or no.",
296
+ )
297
+ if use_gradient_clipping:
298
+ deepspeed_config["gradient_clipping"] = _ask_field(
299
+ "What is the gradient clipping value? [1.0]: ",
300
+ float,
301
+ default=1.0,
302
+ )
303
+ if deepspeed_config["zero_stage"] == 3:
304
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
305
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
306
+ _convert_yes_no_to_bool,
307
+ default=False,
308
+ error_message="Please enter yes or no.",
309
+ )
310
+ deepspeed_config["zero3_init_flag"] = _ask_field(
311
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
312
+ _convert_yes_no_to_bool,
313
+ default=False,
314
+ error_message="Please enter yes or no.",
315
+ )
316
+ if deepspeed_config["zero3_init_flag"]:
317
+ if not is_transformers_available():
318
+ raise Exception(
319
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
320
+ "Please run `pip3 install transformers`."
321
+ )
322
+ use_moe = _ask_field(
323
+ "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ",
324
+ _convert_yes_no_to_bool,
325
+ default=False,
326
+ error_message="Please enter yes or no.",
327
+ )
328
+ if use_moe:
329
+ deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field(
330
+ "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : "
331
+ " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ",
332
+ str,
333
+ )
334
+
335
+ if num_machines > 1:
336
+ launcher_query = "Which Type of launcher do you want to use?"
337
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
338
+ launcher_query,
339
+ DEEPSPEED_MULTINODE_LAUNCHERS,
340
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
341
+ )
342
+
343
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
344
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
345
+ "DeepSpeed configures multi-node compute resources with hostfile. "
346
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
347
+ "for more information please refer official [documentation]"
348
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
349
+ "Please specify the location of hostfile: ",
350
+ str,
351
+ )
352
+
353
+ is_exclusion_filter = _ask_field(
354
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
355
+ _convert_yes_no_to_bool,
356
+ default=False,
357
+ error_message="Please enter yes or no.",
358
+ )
359
+ if is_exclusion_filter:
360
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
361
+ "DeepSpeed exclusion filter string: ",
362
+ str,
363
+ )
364
+
365
+ is_inclusion_filter = _ask_field(
366
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
367
+ _convert_yes_no_to_bool,
368
+ default=False,
369
+ error_message="Please enter yes or no.",
370
+ )
371
+ if is_inclusion_filter:
372
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
373
+ "DeepSpeed inclusion filter string: ",
374
+ str,
375
+ )
376
+
377
+ fsdp_config = {}
378
+
379
+ if distributed_type in [
380
+ DistributedType.MULTI_GPU,
381
+ DistributedType.MULTI_NPU,
382
+ DistributedType.MULTI_MLU,
383
+ DistributedType.MULTI_SDAA,
384
+ DistributedType.MULTI_MUSA,
385
+ DistributedType.MULTI_XPU,
386
+ DistributedType.MULTI_HPU,
387
+ ]:
388
+ use_fsdp = _ask_field(
389
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
390
+ _convert_yes_no_to_bool,
391
+ default=False,
392
+ error_message="Please enter yes or no.",
393
+ )
394
+ if use_fsdp:
395
+ distributed_type = DistributedType.FSDP
396
+ if distributed_type == DistributedType.FSDP:
397
+ fsdp_config["fsdp_version"] = _ask_options(
398
+ "What should be your FSDP version? [2]: ",
399
+ [1, 2],
400
+ lambda x: int(x) + 1,
401
+ default=1,
402
+ )
403
+ fsdp_version = fsdp_config["fsdp_version"] # extract to a variable to simplify usage later
404
+
405
+ if fsdp_version == 1:
406
+ sharding_strategy_query = "What should be your sharding strategy?"
407
+ fsdp_config["fsdp_reshard_after_forward"] = _ask_options(
408
+ sharding_strategy_query,
409
+ FSDP_SHARDING_STRATEGY,
410
+ lambda x: FSDP_SHARDING_STRATEGY[int(x)],
411
+ )
412
+ else:
413
+ fsdp_config["fsdp_reshard_after_forward"] = _ask_field(
414
+ "Do you want to enable resharding after forward? [YES/no]: ",
415
+ _convert_yes_no_to_bool,
416
+ default=True,
417
+ error_message="Please enter yes or no.",
418
+ )
419
+
420
+ fsdp_config["fsdp_offload_params"] = _ask_field(
421
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
422
+ _convert_yes_no_to_bool,
423
+ default=False,
424
+ error_message="Please enter yes or no.",
425
+ )
426
+
427
+ fsdp_wrap_query = "What should be your auto wrap policy?"
428
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
429
+ fsdp_wrap_query,
430
+ FSDP_AUTO_WRAP_POLICY,
431
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
432
+ )
433
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
434
+ use_no_split_modules = _ask_field(
435
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
436
+ _convert_yes_no_to_bool,
437
+ default=False,
438
+ error_message="Please enter yes or no.",
439
+ )
440
+ if not use_no_split_modules:
441
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
442
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
443
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
444
+ str,
445
+ )
446
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
447
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
448
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
449
+ int,
450
+ default=100000000,
451
+ )
452
+ # Removed in FSDP2, ask for user input for FSDP1
453
+ if fsdp_version == 1:
454
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
455
+ fsdp_config["fsdp_backward_prefetch"] = _ask_options(
456
+ fsdp_backward_prefetch_query,
457
+ FSDP_BACKWARD_PREFETCH,
458
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
459
+ )
460
+
461
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
462
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
463
+ fsdp_state_dict_type_query,
464
+ FSDP_STATE_DICT_TYPE if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE,
465
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)] if fsdp_version == 1 else FSDP2_STATE_DICT_TYPE[int(x)],
466
+ default=0,
467
+ )
468
+ # Not implemented in FSDP2, ask for user input for FSDP1
469
+ if fsdp_version == 1:
470
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
471
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
472
+ _convert_yes_no_to_bool,
473
+ default=False,
474
+ error_message="Please enter yes or no.",
475
+ )
476
+ # Obsolete in FSDP2, ask for user input for FSDP1
477
+ if fsdp_version == 1:
478
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
479
+ "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
480
+ _convert_yes_no_to_bool,
481
+ default=True,
482
+ error_message="Please enter yes or no.",
483
+ )
484
+ fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
485
+ "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
486
+ _convert_yes_no_to_bool,
487
+ default=True,
488
+ error_message="Please enter yes or no.",
489
+ )
490
+ # Obsolete in FSDP2, ask for user input for FSDP1
491
+ if fsdp_version == 1:
492
+ if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
493
+ fsdp_config["fsdp_sync_module_states"] = True
494
+ else:
495
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
496
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
497
+ _convert_yes_no_to_bool,
498
+ default=True,
499
+ error_message="Please enter yes or no.",
500
+ )
501
+ fsdp_config["fsdp_activation_checkpointing"] = _ask_field(
502
+ "Do you want to enable FSDP activation checkpointing? [yes/NO]: ",
503
+ _convert_yes_no_to_bool,
504
+ default=False,
505
+ error_message="Please enter yes or no.",
506
+ )
507
+
508
+ parallelism_config = {}
509
+
510
+ if fsdp_config.get("fsdp_version", 1) == 2:
511
+ use_parallelism_config = _ask_field(
512
+ "Do you want to use the parallelism config? [yes/NO]: ",
513
+ _convert_yes_no_to_bool,
514
+ default=False,
515
+ error_message="Please enter yes or no.",
516
+ )
517
+
518
+ if use_parallelism_config:
519
+ prefix = "parallelism_config_"
520
+ parallelism_config[prefix + "dp_replicate_size"] = _ask_field(
521
+ "What is the data parallelism replicate size? [1]: ",
522
+ int,
523
+ default=1,
524
+ error_message="Please enter an integer.",
525
+ )
526
+
527
+ parallelism_config[prefix + "dp_shard_size"] = _ask_field(
528
+ "What is the FSDP shard size? [1]: ",
529
+ int,
530
+ default=1,
531
+ error_message="Please enter an integer.",
532
+ )
533
+
534
+ parallelism_config[prefix + "tp_size"] = _ask_field(
535
+ "What is the tensor parallelism size? [1]: ",
536
+ int,
537
+ default=1,
538
+ error_message="Please enter an integer.",
539
+ )
540
+
541
+ parallelism_config[prefix + "cp_size"] = _ask_field(
542
+ "What is the context parallelism size? [1]: ",
543
+ int,
544
+ default=1,
545
+ error_message="Please enter an integer.",
546
+ )
547
+ if parallelism_config[prefix + "cp_size"] > 1:
548
+ parallelism_config[prefix + "cp_comm_strategy"] = _ask_options(
549
+ "What is the compute parallelism communication strategy?",
550
+ ["allgather", "alltoall"],
551
+ lambda x: ["allgather", "alltoall"][int(x)],
552
+ default=0,
553
+ )
554
+
555
+ megatron_lm_config = {}
556
+ if distributed_type in [DistributedType.MULTI_GPU]:
557
+ use_megatron_lm = _ask_field(
558
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
559
+ _convert_yes_no_to_bool,
560
+ default=False,
561
+ error_message="Please enter yes or no.",
562
+ )
563
+ if use_megatron_lm:
564
+ distributed_type = DistributedType.MEGATRON_LM
565
+ if distributed_type == DistributedType.MEGATRON_LM:
566
+ prefix = "megatron_lm_"
567
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
568
+ "What is the Tensor Parallelism degree/size? [1]:",
569
+ int,
570
+ default=1,
571
+ error_message="Please enter an integer.",
572
+ )
573
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
574
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
575
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
576
+ _convert_yes_no_to_bool,
577
+ default=True,
578
+ error_message="Please enter yes or no.",
579
+ )
580
+
581
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
582
+ "What is the Pipeline Parallelism degree/size? [1]:",
583
+ int,
584
+ default=1,
585
+ error_message="Please enter an integer.",
586
+ )
587
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
588
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
589
+ "What is the number of micro-batches? [1]:",
590
+ int,
591
+ default=1,
592
+ error_message="Please enter an integer.",
593
+ )
594
+
595
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
596
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
597
+ _convert_yes_no_to_bool,
598
+ default=True,
599
+ error_message="Please enter yes or no.",
600
+ )
601
+
602
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
603
+ "Do you want to use distributed optimizer "
604
+ "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
605
+ _convert_yes_no_to_bool,
606
+ default=True,
607
+ error_message="Please enter yes or no.",
608
+ )
609
+
610
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
611
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
612
+ float,
613
+ default=1.0,
614
+ )
615
+ # TPU specific defaults
616
+ tpu_commands = None
617
+ tpu_command_file = None
618
+ tpu_downcast_bf16 = "no"
619
+ tpu_env = []
620
+ tpu_name = None
621
+ tpu_vm = None
622
+ tpu_zone = None
623
+ tpu_use_sudo = False
624
+ tpu_use_cluster = False
625
+
626
+ if distributed_type in [
627
+ DistributedType.MULTI_CPU,
628
+ DistributedType.MULTI_XPU,
629
+ DistributedType.MULTI_HPU,
630
+ DistributedType.MULTI_GPU,
631
+ DistributedType.MULTI_MLU,
632
+ DistributedType.MULTI_SDAA,
633
+ DistributedType.MULTI_MUSA,
634
+ DistributedType.MULTI_NPU,
635
+ DistributedType.XLA,
636
+ ]:
637
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
638
+ if machine_type == "TPU":
639
+ machine_type += " cores"
640
+ elif machine_type == "CPU":
641
+ machine_type = "processes"
642
+ else:
643
+ machine_type += "(s)"
644
+ num_processes = _ask_field(
645
+ f"How many {machine_type} should be used for distributed training? [1]:",
646
+ int,
647
+ default=1,
648
+ error_message="Please enter an integer.",
649
+ )
650
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
651
+ num_processes = _ask_field(
652
+ "How many GPU(s) should be used for distributed training? [1]:",
653
+ int,
654
+ default=1,
655
+ error_message="Please enter an integer.",
656
+ )
657
+ else:
658
+ num_processes = 1
659
+
660
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
661
+ raise ValueError(
662
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
663
+ )
664
+
665
+ if (
666
+ distributed_type
667
+ in [
668
+ DistributedType.MULTI_GPU,
669
+ DistributedType.MULTI_MLU,
670
+ DistributedType.MULTI_SDAA,
671
+ DistributedType.MULTI_MUSA,
672
+ DistributedType.MULTI_NPU,
673
+ DistributedType.MULTI_XPU,
674
+ DistributedType.MULTI_HPU,
675
+ DistributedType.NO,
676
+ ]
677
+ and not use_cpu
678
+ and not use_mps
679
+ ):
680
+ if is_npu_available():
681
+ machine_type = "NPU(s)"
682
+ elif is_mlu_available():
683
+ machine_type = "MLU(s)"
684
+ elif is_sdaa_available():
685
+ machine_type = "SDAA(s)"
686
+ elif is_musa_available():
687
+ machine_type = "MUSA(s)"
688
+ elif is_xpu_available():
689
+ machine_type = "XPU(s)"
690
+ elif is_hpu_available():
691
+ machine_type = "HPU(s)"
692
+ else:
693
+ machine_type = "GPU(s)"
694
+ gpu_ids = _ask_field(
695
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-separated list? [all]:",
696
+ default="all",
697
+ )
698
+
699
+ # CPU affinity is only supported on NVIDIA hardware for now
700
+ enable_cpu_affinity = False
701
+ if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
702
+ enable_cpu_affinity = _ask_field(
703
+ "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
704
+ _convert_yes_no_to_bool,
705
+ default=False,
706
+ error_message="Please enter yes or no.",
707
+ )
708
+
709
+ fp8_config = None
710
+ if distributed_type == DistributedType.XLA:
711
+ mixed_precision = "no"
712
+ main_training_function = _ask_field(
713
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
714
+ default="main",
715
+ )
716
+ tpu_use_cluster = _ask_field(
717
+ "Are you using a TPU cluster? [yes/NO]: ",
718
+ _convert_yes_no_to_bool,
719
+ default=False,
720
+ error_message="Please enter yes or no.",
721
+ )
722
+ if tpu_use_cluster:
723
+ tpu_name = _ask_field(
724
+ "What is the name of your TPU cluster? ",
725
+ default=None,
726
+ error_message="Please enter the name of your TPU cluster.",
727
+ )
728
+ tpu_zone = _ask_field(
729
+ "What is the zone of your TPU cluster? ",
730
+ default=None,
731
+ error_message="Please enter the zone of your TPU cluster.",
732
+ )
733
+ tpu_use_sudo = _ask_field(
734
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
735
+ default=False,
736
+ error_message="Please enter yes or no.",
737
+ )
738
+ run_commands = _ask_field(
739
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
740
+ _convert_yes_no_to_bool,
741
+ default=False,
742
+ error_message="Please enter yes or no.",
743
+ )
744
+ if run_commands:
745
+ use_command_file = _ask_field(
746
+ "Is this code located in a bash script? [yes/NO]: ",
747
+ _convert_yes_no_to_bool,
748
+ default=False,
749
+ error_message="Please enter yes or no.",
750
+ )
751
+ if use_command_file:
752
+ tpu_command_file = _ask_field(
753
+ "What is the path to your bash script? ",
754
+ default=None,
755
+ error_message="Please enter the path to your bash script.",
756
+ )
757
+ tpu_command_file = os.path.abspath(tpu_command_file)
758
+ else:
759
+ print("Please enter each command separately you wish to run on startup in each pod.")
760
+ tpu_commands = []
761
+ another_command = True
762
+ while another_command:
763
+ tpu_commands.append(
764
+ _ask_field(
765
+ "Please enter a single command to be ran ",
766
+ default=None,
767
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
768
+ )
769
+ )
770
+ another_command = _ask_field(
771
+ "Do you wish to add another command? [yes/NO]: ",
772
+ _convert_yes_no_to_bool,
773
+ default=False,
774
+ error_message="Please enter yes or no.",
775
+ )
776
+ tpu_vm = _ask_field(
777
+ "If not using an instance group, what are the names of the Compute VM instances to be used, separated by a comma: ",
778
+ default="",
779
+ ).split(",")
780
+ tpu_env = _ask_field(
781
+ "What environment variables do you wish to set in each pod, separated by a comma: ",
782
+ default="",
783
+ ).split(",")
784
+
785
+ else:
786
+ main_training_function = "main"
787
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
788
+ mixed_precision = None
789
+ else:
790
+ mixed_precision = _ask_options(
791
+ "Do you wish to use mixed precision?",
792
+ ["no", "fp16", "bf16", "fp8"],
793
+ _convert_mixed_precision,
794
+ )
795
+ if mixed_precision == "fp8":
796
+ if not is_fp8_available():
797
+ raise ValueError("FP8 (either Transformer Engine or MSAMP) is not installed on this machine.")
798
+ fp8_config = {}
799
+ fp8_config["backend"] = _ask_options(
800
+ "Which FP8 backend do you want to use?",
801
+ ["te", "msamp"],
802
+ _convert_fp8_backend,
803
+ )
804
+ if fp8_config["backend"] == "TE":
805
+ if not is_transformer_engine_available():
806
+ raise ValueError("TransformersEngine was selected, but it is not installed on this machine.")
807
+ fp8_config["use_autocast_during_eval"] = _ask_field(
808
+ "Do you want to use FP8 autocast during eval mode? Generally better metrics are found when this is disabled [yes/NO]: ",
809
+ _convert_yes_no_to_bool,
810
+ default=False,
811
+ )
812
+ fp8_config["margin"] = _ask_field(
813
+ "What margin should be used for gradient scaling? [0]: ",
814
+ int,
815
+ default=0,
816
+ )
817
+ fp8_config["interval"] = _ask_field(
818
+ "What interval should be used for for how often the scaling factor is recomputed? [1]: ",
819
+ int,
820
+ default=1,
821
+ )
822
+ fp8_config["fp8_format"] = _ask_options(
823
+ "Which weight format should be used?",
824
+ ["HYBRID", "E4M3", "E5M2"],
825
+ lambda i: ["HYBRID", "E4M3", "E5M2"][i],
826
+ default=0,
827
+ )
828
+ fp8_config["amax_history_length"] = _ask_field(
829
+ "What length of history should be used for the amax scaling factor computation? [1024]: ",
830
+ int,
831
+ default=1024,
832
+ )
833
+ fp8_config["amax_compute_algorithm"] = _ask_options(
834
+ "Which algorithm should be used for the amax scaling factor computation?",
835
+ ["max", "most_recent"],
836
+ lambda x: "max" if x == 0 else "most_recent",
837
+ default=0,
838
+ )
839
+ fp8_config["override_linear_precision"] = _ask_field(
840
+ "Do you want to to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision? [yes/NO]: ",
841
+ _convert_yes_no_to_bool,
842
+ default=False,
843
+ )
844
+ if fp8_config["override_linear_precision"]:
845
+ fprop = _ask_field(
846
+ "Should `fprop` be executed in higher precision? [yes/NO]: ",
847
+ _convert_yes_no_to_bool,
848
+ default=False,
849
+ )
850
+ dgrad = _ask_field(
851
+ "Should `dgrad` be executed in higher precision? [yes/NO]: ",
852
+ _convert_yes_no_to_bool,
853
+ default=False,
854
+ )
855
+ wgrad = _ask_field(
856
+ "Should `wgrad` be executed in higher precision? [yes/NO]: ",
857
+ _convert_yes_no_to_bool,
858
+ default=False,
859
+ )
860
+ fp8_config["override_linear_precision"] = (fprop, dgrad, wgrad)
861
+ else:
862
+ fp8_config["override_linear_precision"] = (False, False, False)
863
+
864
+ elif fp8_config["backend"] == "MSAMP":
865
+ if not is_msamp_available():
866
+ raise ValueError("MSAMP was selected, but it is not installed on this machine.")
867
+ fp8_config["optimization_level"] = _ask_options(
868
+ "Which optimization level should be used?",
869
+ ["O1", "O2"],
870
+ lambda x: "O1" if x == 0 else "O2",
871
+ default=1,
872
+ )
873
+
874
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
875
+ print(
876
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
877
+ )
878
+
879
+ if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
880
+ tpu_downcast_bf16 = _ask_field(
881
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
882
+ )
883
+
884
+ return ClusterConfig(
885
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
886
+ distributed_type=distributed_type,
887
+ num_processes=num_processes,
888
+ gpu_ids=gpu_ids,
889
+ mixed_precision=mixed_precision,
890
+ downcast_bf16=tpu_downcast_bf16,
891
+ machine_rank=machine_rank,
892
+ num_machines=num_machines,
893
+ main_process_ip=main_process_ip,
894
+ main_process_port=main_process_port,
895
+ main_training_function=main_training_function,
896
+ fp8_config=fp8_config,
897
+ deepspeed_config=deepspeed_config,
898
+ fsdp_config=fsdp_config,
899
+ parallelism_config=parallelism_config,
900
+ megatron_lm_config=megatron_lm_config,
901
+ ipex_config=ipex_config,
902
+ mpirun_config=mpirun_config,
903
+ use_cpu=use_cpu,
904
+ rdzv_backend=rdzv_backend,
905
+ same_network=same_network,
906
+ commands=tpu_commands,
907
+ command_file=tpu_command_file,
908
+ tpu_env=tpu_env,
909
+ tpu_name=tpu_name,
910
+ tpu_vm=tpu_vm,
911
+ tpu_zone=tpu_zone,
912
+ tpu_use_sudo=tpu_use_sudo,
913
+ tpu_use_cluster=tpu_use_cluster,
914
+ dynamo_config=dynamo_config,
915
+ debug=debug,
916
+ enable_cpu_affinity=enable_cpu_affinity,
917
+ )
venv/lib/python3.10/site-packages/accelerate/commands/config/config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.utils import ComputeEnvironment
21
+
22
+ from .cluster import get_cluster_input
23
+ from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
24
+ from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
25
+ from .sagemaker import get_sagemaker_input
26
+
27
+
28
+ description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
29
+
30
+
31
+ def get_user_input():
32
+ compute_environment = _ask_options(
33
+ "In which compute environment are you running?",
34
+ ["This machine", "AWS (Amazon SageMaker)"],
35
+ _convert_compute_environment,
36
+ )
37
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
38
+ config = get_sagemaker_input()
39
+ else:
40
+ config = get_cluster_input()
41
+ return config
42
+
43
+
44
+ def config_command_parser(subparsers=None):
45
+ if subparsers is not None:
46
+ parser = subparsers.add_parser("config", description=description)
47
+ else:
48
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
49
+
50
+ parser.add_argument(
51
+ "--config_file",
52
+ default=None,
53
+ help=(
54
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
55
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
56
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
57
+ "with 'huggingface'."
58
+ ),
59
+ )
60
+
61
+ if subparsers is not None:
62
+ parser.set_defaults(func=config_command)
63
+ return parser
64
+
65
+
66
+ def config_command(args):
67
+ config = get_user_input()
68
+ if args.config_file is not None:
69
+ config_file = args.config_file
70
+ else:
71
+ if not os.path.isdir(cache_dir):
72
+ os.makedirs(cache_dir)
73
+ config_file = default_yaml_config_file
74
+
75
+ if config_file.endswith(".json"):
76
+ config.to_json_file(config_file)
77
+ else:
78
+ config.to_yaml_file(config_file)
79
+ print(f"accelerate configuration saved at {config_file}")
80
+
81
+
82
+ def main():
83
+ parser = config_command_parser()
84
+ args = parser.parse_args()
85
+ config_command(args)
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/config/config_args.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ import os
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+ from typing import Optional, Union
22
+
23
+ import yaml
24
+
25
+ from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
26
+ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
27
+
28
+
29
+ hf_cache_home = os.path.expanduser(
30
+ os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
31
+ )
32
+ cache_dir = os.path.join(hf_cache_home, "accelerate")
33
+ default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
34
+ default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
35
+
36
+ # For backward compatibility: the default config is the json one if it's the only existing file.
37
+ if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
38
+ default_config_file = default_yaml_config_file
39
+ else:
40
+ default_config_file = default_json_config_file
41
+
42
+
43
+ def load_config_from_file(config_file):
44
+ if config_file is not None:
45
+ if not os.path.isfile(config_file):
46
+ raise FileNotFoundError(
47
+ f"The passed configuration file `{config_file}` does not exist. "
48
+ "Please pass an existing file to `accelerate launch`, or use the default one "
49
+ "created through `accelerate config` and run `accelerate launch` "
50
+ "without the `--config_file` argument."
51
+ )
52
+ else:
53
+ config_file = default_config_file
54
+ with open(config_file, encoding="utf-8") as f:
55
+ if config_file.endswith(".json"):
56
+ if (
57
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
58
+ == ComputeEnvironment.LOCAL_MACHINE
59
+ ):
60
+ config_class = ClusterConfig
61
+ else:
62
+ config_class = SageMakerConfig
63
+ return config_class.from_json_file(json_file=config_file)
64
+ else:
65
+ if (
66
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
67
+ == ComputeEnvironment.LOCAL_MACHINE
68
+ ):
69
+ config_class = ClusterConfig
70
+ else:
71
+ config_class = SageMakerConfig
72
+ return config_class.from_yaml_file(yaml_file=config_file)
73
+
74
+
75
+ @dataclass
76
+ class BaseConfig:
77
+ compute_environment: ComputeEnvironment
78
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
79
+ mixed_precision: str
80
+ use_cpu: bool
81
+ debug: bool
82
+
83
+ def to_dict(self):
84
+ result = self.__dict__
85
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
86
+
87
+ def _convert_enums(value):
88
+ if isinstance(value, Enum):
89
+ return value.value
90
+ if isinstance(value, dict):
91
+ if not bool(value):
92
+ return None
93
+ for key1, value1 in value.items():
94
+ value[key1] = _convert_enums(value1)
95
+ return value
96
+
97
+ for key, value in result.items():
98
+ result[key] = _convert_enums(value)
99
+ result = {k: v for k, v in result.items() if v is not None}
100
+ return result
101
+
102
+ @staticmethod
103
+ def process_config(config_dict):
104
+ """
105
+ Processes `config_dict` and sets default values for any missing keys
106
+ """
107
+ if "compute_environment" not in config_dict:
108
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
109
+ if "distributed_type" not in config_dict:
110
+ raise ValueError("A `distributed_type` must be specified in the config file.")
111
+ if "num_processes" not in config_dict and config_dict["distributed_type"] == DistributedType.NO:
112
+ config_dict["num_processes"] = 1
113
+ if "mixed_precision" not in config_dict:
114
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
115
+ if "fp16" in config_dict: # Convert the config to the new format.
116
+ del config_dict["fp16"]
117
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
118
+ dynamo_backend = config_dict.pop("dynamo_backend")
119
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
120
+ if "use_cpu" not in config_dict:
121
+ config_dict["use_cpu"] = False
122
+ if "debug" not in config_dict:
123
+ config_dict["debug"] = False
124
+ if "enable_cpu_affinity" not in config_dict:
125
+ config_dict["enable_cpu_affinity"] = False
126
+ return config_dict
127
+
128
+ @classmethod
129
+ def from_json_file(cls, json_file=None):
130
+ json_file = default_json_config_file if json_file is None else json_file
131
+ with open(json_file, encoding="utf-8") as f:
132
+ config_dict = json.load(f)
133
+ config_dict = cls.process_config(config_dict)
134
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
135
+ if len(extra_keys) > 0:
136
+ raise ValueError(
137
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
138
+ " version or fix (and potentially remove) these keys from your config file."
139
+ )
140
+
141
+ return cls(**config_dict)
142
+
143
+ def to_json_file(self, json_file):
144
+ with open(json_file, "w", encoding="utf-8") as f:
145
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
146
+ f.write(content)
147
+
148
+ @classmethod
149
+ def from_yaml_file(cls, yaml_file=None):
150
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
151
+ with open(yaml_file, encoding="utf-8") as f:
152
+ config_dict = yaml.safe_load(f)
153
+ config_dict = cls.process_config(config_dict)
154
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
155
+ if len(extra_keys) > 0:
156
+ raise ValueError(
157
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
158
+ " version or fix (and potentially remove) these keys from your config file."
159
+ )
160
+ return cls(**config_dict)
161
+
162
+ def to_yaml_file(self, yaml_file):
163
+ with open(yaml_file, "w", encoding="utf-8") as f:
164
+ yaml.safe_dump(self.to_dict(), f)
165
+
166
+ def __post_init__(self):
167
+ if isinstance(self.compute_environment, str):
168
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
169
+ if isinstance(self.distributed_type, str):
170
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
171
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
172
+ else:
173
+ self.distributed_type = DistributedType(self.distributed_type)
174
+ if getattr(self, "dynamo_config", None) is None:
175
+ self.dynamo_config = {}
176
+
177
+
178
+ @dataclass
179
+ class ClusterConfig(BaseConfig):
180
+ num_processes: int = -1 # For instance if we use SLURM and the user manually passes it in
181
+ machine_rank: int = 0
182
+ num_machines: int = 1
183
+ gpu_ids: Optional[str] = None
184
+ main_process_ip: Optional[str] = None
185
+ main_process_port: Optional[int] = None
186
+ rdzv_backend: Optional[str] = "static"
187
+ same_network: Optional[bool] = False
188
+ main_training_function: str = "main"
189
+ enable_cpu_affinity: bool = False
190
+
191
+ # args for FP8 training
192
+ fp8_config: dict = None
193
+ # args for deepspeed_plugin
194
+ deepspeed_config: dict = None
195
+ # args for fsdp
196
+ fsdp_config: dict = None
197
+ # args for parallelism config
198
+ parallelism_config: dict = None
199
+ # args for megatron_lm
200
+ megatron_lm_config: dict = None
201
+ # args for ipex
202
+ ipex_config: dict = None
203
+ # args for mpirun
204
+ mpirun_config: dict = None
205
+ # args for TPU
206
+ downcast_bf16: bool = False
207
+
208
+ # args for TPU pods
209
+ tpu_name: str = None
210
+ tpu_zone: str = None
211
+ tpu_use_cluster: bool = False
212
+ tpu_use_sudo: bool = False
213
+ command_file: str = None
214
+ commands: list[str] = None
215
+ tpu_vm: list[str] = None
216
+ tpu_env: list[str] = None
217
+
218
+ # args for dynamo
219
+ dynamo_config: dict = None
220
+
221
+ def __post_init__(self):
222
+ if self.deepspeed_config is None:
223
+ self.deepspeed_config = {}
224
+ if self.fsdp_config is None:
225
+ self.fsdp_config = {}
226
+ if self.megatron_lm_config is None:
227
+ self.megatron_lm_config = {}
228
+ if self.ipex_config is None:
229
+ self.ipex_config = {}
230
+ if self.mpirun_config is None:
231
+ self.mpirun_config = {}
232
+ if self.fp8_config is None:
233
+ self.fp8_config = {}
234
+ if self.parallelism_config is None:
235
+ self.parallelism_config = {}
236
+ return super().__post_init__()
237
+
238
+
239
+ @dataclass
240
+ class SageMakerConfig(BaseConfig):
241
+ ec2_instance_type: str
242
+ iam_role_name: str
243
+ image_uri: Optional[str] = None
244
+ profile: Optional[str] = None
245
+ region: str = "us-east-1"
246
+ num_machines: int = 1
247
+ gpu_ids: str = "all"
248
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
249
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
250
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
251
+ py_version: str = SAGEMAKER_PYTHON_VERSION
252
+ sagemaker_inputs_file: str = None
253
+ sagemaker_metrics_file: str = None
254
+ additional_args: dict = None
255
+ dynamo_config: dict = None
256
+ enable_cpu_affinity: bool = False
venv/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from ...utils.dataclasses import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ DynamoBackend,
23
+ FP8BackendType,
24
+ PrecisionType,
25
+ SageMakerDistributedType,
26
+ )
27
+ from ..menu import BulletMenu
28
+
29
+
30
+ DYNAMO_BACKENDS = [
31
+ "EAGER",
32
+ "AOT_EAGER",
33
+ "INDUCTOR",
34
+ "AOT_TS_NVFUSER",
35
+ "NVPRIMS_NVFUSER",
36
+ "CUDAGRAPHS",
37
+ "OFI",
38
+ "FX2TRT",
39
+ "ONNXRT",
40
+ "TENSORRT",
41
+ "AOT_TORCHXLA_TRACE_ONCE",
42
+ "TORHCHXLA_TRACE_ONCE",
43
+ "IPEX",
44
+ "TVM",
45
+ ]
46
+
47
+
48
+ def _ask_field(input_text, convert_value=None, default=None, error_message=None):
49
+ ask_again = True
50
+ while ask_again:
51
+ result = input(input_text)
52
+ try:
53
+ if default is not None and len(result) == 0:
54
+ return default
55
+ return convert_value(result) if convert_value is not None else result
56
+ except Exception:
57
+ if error_message is not None:
58
+ print(error_message)
59
+
60
+
61
+ def _ask_options(input_text, options=[], convert_value=None, default=0):
62
+ menu = BulletMenu(input_text, options)
63
+ result = menu.run(default_choice=default)
64
+ return convert_value(result) if convert_value is not None else result
65
+
66
+
67
+ def _convert_compute_environment(value):
68
+ value = int(value)
69
+ return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
70
+
71
+
72
+ def _convert_distributed_mode(value):
73
+ value = int(value)
74
+ return DistributedType(
75
+ [
76
+ "NO",
77
+ "MULTI_CPU",
78
+ "MULTI_XPU",
79
+ "MULTI_HPU",
80
+ "MULTI_GPU",
81
+ "MULTI_NPU",
82
+ "MULTI_MLU",
83
+ "MULTI_SDAA",
84
+ "MULTI_MUSA",
85
+ "XLA",
86
+ ][value]
87
+ )
88
+
89
+
90
+ def _convert_dynamo_backend(value):
91
+ value = int(value)
92
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
93
+
94
+
95
+ def _convert_mixed_precision(value):
96
+ value = int(value)
97
+ return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
98
+
99
+
100
+ def _convert_sagemaker_distributed_mode(value):
101
+ value = int(value)
102
+ return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
103
+
104
+
105
+ def _convert_fp8_backend(value):
106
+ value = int(value)
107
+ return FP8BackendType(["TE", "MSAMP"][value])
108
+
109
+
110
+ def _convert_yes_no_to_bool(value):
111
+ return {"yes": True, "no": False}[value.lower()]
112
+
113
+
114
+ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
115
+ """
116
+ A custom formatter that will remove the usage line from the help message for subcommands.
117
+ """
118
+
119
+ def _format_usage(self, usage, actions, groups, prefix):
120
+ usage = super()._format_usage(usage, actions, groups, prefix)
121
+ usage = usage.replace("<command> [<args>] ", "")
122
+ return usage
venv/lib/python3.10/site-packages/accelerate/commands/config/default.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ import torch
20
+
21
+ from ...utils import (
22
+ is_hpu_available,
23
+ is_mlu_available,
24
+ is_musa_available,
25
+ is_npu_available,
26
+ is_sdaa_available,
27
+ is_xpu_available,
28
+ )
29
+ from .config_args import ClusterConfig, default_json_config_file
30
+ from .config_utils import SubcommandHelpFormatter
31
+
32
+
33
+ description = "Create a default config file for Accelerate with only a few flags set."
34
+
35
+
36
+ def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file):
37
+ """
38
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
39
+ set CPU if it is a CPU-only machine.
40
+
41
+ Args:
42
+ mixed_precision (`str`, *optional*, defaults to "no"):
43
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
44
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
45
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
46
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overridden by setting
47
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
48
+ """
49
+ path = Path(save_location)
50
+ path.parent.mkdir(parents=True, exist_ok=True)
51
+ if path.exists():
52
+ print(
53
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
54
+ )
55
+ return False
56
+ mixed_precision = mixed_precision.lower()
57
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
58
+ raise ValueError(
59
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
60
+ )
61
+ config = {
62
+ "compute_environment": "LOCAL_MACHINE",
63
+ "mixed_precision": mixed_precision,
64
+ }
65
+ if is_mlu_available():
66
+ num_mlus = torch.mlu.device_count()
67
+ config["num_processes"] = num_mlus
68
+ config["use_cpu"] = False
69
+ if num_mlus > 1:
70
+ config["distributed_type"] = "MULTI_MLU"
71
+ else:
72
+ config["distributed_type"] = "NO"
73
+ if is_sdaa_available():
74
+ num_sdaas = torch.sdaa.device_count()
75
+ config["num_processes"] = num_sdaas
76
+ config["use_cpu"] = False
77
+ if num_sdaas > 1:
78
+ config["distributed_type"] = "MULTI_SDAA"
79
+ else:
80
+ config["distributed_type"] = "NO"
81
+ elif is_musa_available():
82
+ num_musas = torch.musa.device_count()
83
+ config["num_processes"] = num_musas
84
+ config["use_cpu"] = False
85
+ if num_musas > 1:
86
+ config["distributed_type"] = "MULTI_MUSA"
87
+ else:
88
+ config["distributed_type"] = "NO"
89
+ elif is_hpu_available():
90
+ num_hpus = torch.hpu.device_count()
91
+ config["num_processes"] = num_hpus
92
+ config["use_cpu"] = False
93
+ if num_hpus > 1:
94
+ config["distributed_type"] = "MULTI_HPU"
95
+ else:
96
+ config["distributed_type"] = "NO"
97
+ elif torch.cuda.is_available():
98
+ num_gpus = torch.cuda.device_count()
99
+ config["num_processes"] = num_gpus
100
+ config["use_cpu"] = False
101
+ if num_gpus > 1:
102
+ config["distributed_type"] = "MULTI_GPU"
103
+ else:
104
+ config["distributed_type"] = "NO"
105
+ elif is_xpu_available():
106
+ num_xpus = torch.xpu.device_count()
107
+ config["num_processes"] = num_xpus
108
+ config["use_cpu"] = False
109
+ if num_xpus > 1:
110
+ config["distributed_type"] = "MULTI_XPU"
111
+ else:
112
+ config["distributed_type"] = "NO"
113
+ elif is_npu_available():
114
+ num_npus = torch.npu.device_count()
115
+ config["num_processes"] = num_npus
116
+ config["use_cpu"] = False
117
+ if num_npus > 1:
118
+ config["distributed_type"] = "MULTI_NPU"
119
+ else:
120
+ config["distributed_type"] = "NO"
121
+ else:
122
+ num_xpus = 0
123
+ config["use_cpu"] = True
124
+ config["num_processes"] = 1
125
+ config["distributed_type"] = "NO"
126
+ config["debug"] = False
127
+ config["enable_cpu_affinity"] = False
128
+ config = ClusterConfig(**config)
129
+ config.to_json_file(path)
130
+ return path
131
+
132
+
133
+ def default_command_parser(parser, parents):
134
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
135
+ parser.add_argument(
136
+ "--config_file",
137
+ default=default_json_config_file,
138
+ help=(
139
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
140
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
141
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
142
+ "with 'huggingface'."
143
+ ),
144
+ dest="save_location",
145
+ )
146
+
147
+ parser.add_argument(
148
+ "--mixed_precision",
149
+ choices=["no", "fp16", "bf16"],
150
+ type=str,
151
+ help="Whether or not to use mixed precision training. "
152
+ "Choose between FP16 and BF16 (bfloat16) training. "
153
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
154
+ default="no",
155
+ )
156
+ parser.set_defaults(func=default_config_command)
157
+ return parser
158
+
159
+
160
+ def default_config_command(args):
161
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
162
+ if config_file:
163
+ print(f"accelerate configuration saved at {config_file}")
venv/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import json
17
+ import os
18
+
19
+ from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
20
+ from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
21
+ from ...utils.imports import is_boto3_available
22
+ from .config_args import SageMakerConfig
23
+ from .config_utils import (
24
+ DYNAMO_BACKENDS,
25
+ _ask_field,
26
+ _ask_options,
27
+ _convert_dynamo_backend,
28
+ _convert_mixed_precision,
29
+ _convert_sagemaker_distributed_mode,
30
+ _convert_yes_no_to_bool,
31
+ )
32
+
33
+
34
+ if is_boto3_available():
35
+ import boto3 # noqa: F401
36
+
37
+
38
+ def _create_iam_role_for_sagemaker(role_name):
39
+ iam_client = boto3.client("iam")
40
+
41
+ sagemaker_trust_policy = {
42
+ "Version": "2012-10-17",
43
+ "Statement": [
44
+ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
45
+ ],
46
+ }
47
+ try:
48
+ # create the role, associated with the chosen trust policy
49
+ iam_client.create_role(
50
+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)
51
+ )
52
+ policy_document = {
53
+ "Version": "2012-10-17",
54
+ "Statement": [
55
+ {
56
+ "Effect": "Allow",
57
+ "Action": [
58
+ "sagemaker:*",
59
+ "ecr:GetDownloadUrlForLayer",
60
+ "ecr:BatchGetImage",
61
+ "ecr:BatchCheckLayerAvailability",
62
+ "ecr:GetAuthorizationToken",
63
+ "cloudwatch:PutMetricData",
64
+ "cloudwatch:GetMetricData",
65
+ "cloudwatch:GetMetricStatistics",
66
+ "cloudwatch:ListMetrics",
67
+ "logs:CreateLogGroup",
68
+ "logs:CreateLogStream",
69
+ "logs:DescribeLogStreams",
70
+ "logs:PutLogEvents",
71
+ "logs:GetLogEvents",
72
+ "s3:CreateBucket",
73
+ "s3:ListBucket",
74
+ "s3:GetBucketLocation",
75
+ "s3:GetObject",
76
+ "s3:PutObject",
77
+ ],
78
+ "Resource": "*",
79
+ }
80
+ ],
81
+ }
82
+ # attach policy to role
83
+ iam_client.put_role_policy(
84
+ RoleName=role_name,
85
+ PolicyName=f"{role_name}_policy_permission",
86
+ PolicyDocument=json.dumps(policy_document, indent=2),
87
+ )
88
+ except iam_client.exceptions.EntityAlreadyExistsException:
89
+ print(f"role {role_name} already exists. Using existing one")
90
+
91
+
92
+ def _get_iam_role_arn(role_name):
93
+ iam_client = boto3.client("iam")
94
+ return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
95
+
96
+
97
+ def get_sagemaker_input():
98
+ credentials_configuration = _ask_options(
99
+ "How do you want to authorize?",
100
+ ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
101
+ int,
102
+ )
103
+ aws_profile = None
104
+ if credentials_configuration == 0:
105
+ aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default")
106
+ os.environ["AWS_PROFILE"] = aws_profile
107
+ else:
108
+ print(
109
+ "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
110
+ "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`"
111
+ )
112
+ aws_access_key_id = _ask_field("AWS Access Key ID: ")
113
+ os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
114
+
115
+ aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
116
+ os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
117
+
118
+ aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
119
+ os.environ["AWS_DEFAULT_REGION"] = aws_region
120
+
121
+ role_management = _ask_options(
122
+ "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
123
+ ["Provide IAM Role name", "Create new IAM role using credentials"],
124
+ int,
125
+ )
126
+ if role_management == 0:
127
+ iam_role_name = _ask_field("Enter your IAM role name: ")
128
+ else:
129
+ iam_role_name = "accelerate_sagemaker_execution_role"
130
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
131
+ _create_iam_role_for_sagemaker(iam_role_name)
132
+
133
+ is_custom_docker_image = _ask_field(
134
+ "Do you want to use custom Docker image? [yes/NO]: ",
135
+ _convert_yes_no_to_bool,
136
+ default=False,
137
+ error_message="Please enter yes or no.",
138
+ )
139
+ docker_image = None
140
+ if is_custom_docker_image:
141
+ docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
142
+
143
+ is_sagemaker_inputs_enabled = _ask_field(
144
+ "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
145
+ _convert_yes_no_to_bool,
146
+ default=False,
147
+ error_message="Please enter yes or no.",
148
+ )
149
+ sagemaker_inputs_file = None
150
+ if is_sagemaker_inputs_enabled:
151
+ sagemaker_inputs_file = _ask_field(
152
+ "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
153
+ lambda x: str(x).lower(),
154
+ )
155
+
156
+ is_sagemaker_metrics_enabled = _ask_field(
157
+ "Do you want to enable SageMaker metrics? [yes/NO]: ",
158
+ _convert_yes_no_to_bool,
159
+ default=False,
160
+ error_message="Please enter yes or no.",
161
+ )
162
+ sagemaker_metrics_file = None
163
+ if is_sagemaker_metrics_enabled:
164
+ sagemaker_metrics_file = _ask_field(
165
+ "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
166
+ lambda x: str(x).lower(),
167
+ )
168
+
169
+ distributed_type = _ask_options(
170
+ "What is the distributed mode?",
171
+ ["No distributed training", "Data parallelism"],
172
+ _convert_sagemaker_distributed_mode,
173
+ )
174
+ dynamo_config = {}
175
+ use_dynamo = _ask_field(
176
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
177
+ _convert_yes_no_to_bool,
178
+ default=False,
179
+ error_message="Please enter yes or no.",
180
+ )
181
+ if use_dynamo:
182
+ prefix = "dynamo_"
183
+ dynamo_config[prefix + "backend"] = _ask_options(
184
+ "Which dynamo backend would you like to use?",
185
+ [x.lower() for x in DYNAMO_BACKENDS],
186
+ _convert_dynamo_backend,
187
+ default=2,
188
+ )
189
+ use_custom_options = _ask_field(
190
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
191
+ _convert_yes_no_to_bool,
192
+ default=False,
193
+ error_message="Please enter yes or no.",
194
+ )
195
+
196
+ if use_custom_options:
197
+ dynamo_config[prefix + "mode"] = _ask_options(
198
+ "Which mode do you want to use?",
199
+ TORCH_DYNAMO_MODES,
200
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
201
+ default="default",
202
+ )
203
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
204
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
205
+ _convert_yes_no_to_bool,
206
+ default=False,
207
+ error_message="Please enter yes or no.",
208
+ )
209
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
210
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
211
+ _convert_yes_no_to_bool,
212
+ default=False,
213
+ error_message="Please enter yes or no.",
214
+ )
215
+ dynamo_config[prefix + "use_regional_compilation"] = _ask_field(
216
+ "Do you want to enable regional compilation? [yes/NO]: ",
217
+ _convert_yes_no_to_bool,
218
+ default=False,
219
+ error_message="Please enter yes or no.",
220
+ )
221
+
222
+ ec2_instance_query = "Which EC2 instance type you want to use for your training?"
223
+ if distributed_type != SageMakerDistributedType.NO:
224
+ ec2_instance_type = _ask_options(
225
+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
226
+ )
227
+ else:
228
+ ec2_instance_query += "? [ml.p3.2xlarge]:"
229
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
230
+
231
+ debug = False
232
+ if distributed_type != SageMakerDistributedType.NO:
233
+ debug = _ask_field(
234
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
235
+ _convert_yes_no_to_bool,
236
+ default=False,
237
+ error_message="Please enter yes or no.",
238
+ )
239
+
240
+ num_machines = 1
241
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
242
+ num_machines = _ask_field(
243
+ "How many machines do you want use? [1]: ",
244
+ int,
245
+ default=1,
246
+ )
247
+
248
+ mixed_precision = _ask_options(
249
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
250
+ ["no", "fp16", "bf16", "fp8"],
251
+ _convert_mixed_precision,
252
+ )
253
+
254
+ if use_dynamo and mixed_precision == "no":
255
+ print(
256
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
257
+ )
258
+
259
+ return SageMakerConfig(
260
+ image_uri=docker_image,
261
+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
262
+ distributed_type=distributed_type,
263
+ use_cpu=False,
264
+ dynamo_config=dynamo_config,
265
+ ec2_instance_type=ec2_instance_type,
266
+ profile=aws_profile,
267
+ region=aws_region,
268
+ iam_role_name=iam_role_name,
269
+ mixed_precision=mixed_precision,
270
+ num_machines=num_machines,
271
+ sagemaker_inputs_file=sagemaker_inputs_file,
272
+ sagemaker_metrics_file=sagemaker_metrics_file,
273
+ debug=debug,
274
+ )
venv/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
venv/lib/python3.10/site-packages/accelerate/commands/env.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import platform
20
+ import subprocess
21
+
22
+ import numpy as np
23
+ import psutil
24
+ import torch
25
+
26
+ from accelerate import __version__ as version
27
+ from accelerate.commands.config import default_config_file, load_config_from_file
28
+
29
+ from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_sdaa_available, is_xpu_available
30
+
31
+
32
+ def env_command_parser(subparsers=None):
33
+ if subparsers is not None:
34
+ parser = subparsers.add_parser("env")
35
+ else:
36
+ parser = argparse.ArgumentParser("Accelerate env command")
37
+
38
+ parser.add_argument(
39
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
40
+ )
41
+
42
+ if subparsers is not None:
43
+ parser.set_defaults(func=env_command)
44
+ return parser
45
+
46
+
47
+ def env_command(args):
48
+ pt_version = torch.__version__
49
+ pt_cuda_available = torch.cuda.is_available()
50
+ pt_xpu_available = is_xpu_available()
51
+ pt_mlu_available = is_mlu_available()
52
+ pt_sdaa_available = is_sdaa_available()
53
+ pt_musa_available = is_musa_available()
54
+ pt_npu_available = is_npu_available()
55
+
56
+ accelerator = "N/A"
57
+ if pt_cuda_available:
58
+ accelerator = "CUDA"
59
+ elif pt_xpu_available:
60
+ accelerator = "XPU"
61
+ elif pt_mlu_available:
62
+ accelerator = "MLU"
63
+ elif pt_sdaa_available:
64
+ accelerator = "SDAA"
65
+ elif pt_musa_available:
66
+ accelerator = "MUSA"
67
+ elif pt_npu_available:
68
+ accelerator = "NPU"
69
+
70
+ accelerate_config = "Not found"
71
+ # Get the default from the config file.
72
+ if args.config_file is not None or os.path.isfile(default_config_file):
73
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
74
+
75
+ # if we can run which, get it
76
+ command = None
77
+ bash_location = "Not found"
78
+ if os.name == "nt":
79
+ command = ["where", "accelerate"]
80
+ elif os.name == "posix":
81
+ command = ["which", "accelerate"]
82
+ if command is not None:
83
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
84
+ info = {
85
+ "`Accelerate` version": version,
86
+ "Platform": platform.platform(),
87
+ "`accelerate` bash location": bash_location,
88
+ "Python version": platform.python_version(),
89
+ "Numpy version": np.__version__,
90
+ "PyTorch version": f"{pt_version}",
91
+ "PyTorch accelerator": accelerator,
92
+ "System RAM": f"{psutil.virtual_memory().total / 1024**3:.2f} GB",
93
+ }
94
+ if pt_cuda_available:
95
+ info["GPU type"] = torch.cuda.get_device_name()
96
+ elif pt_xpu_available:
97
+ info["XPU type"] = torch.xpu.get_device_name()
98
+ elif pt_mlu_available:
99
+ info["MLU type"] = torch.mlu.get_device_name()
100
+ elif pt_sdaa_available:
101
+ info["SDAA type"] = torch.sdaa.get_device_name()
102
+ elif pt_musa_available:
103
+ info["MUSA type"] = torch.musa.get_device_name()
104
+ elif pt_npu_available:
105
+ info["CANN version"] = torch.version.cann
106
+
107
+ print("\nCopy-and-paste the text below in your GitHub issue\n")
108
+ print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
109
+
110
+ print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
111
+ accelerate_config_str = (
112
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
113
+ if isinstance(accelerate_config, dict)
114
+ else f"\t{accelerate_config}"
115
+ )
116
+ print(accelerate_config_str)
117
+
118
+ info["`Accelerate` configs"] = accelerate_config
119
+
120
+ return info
121
+
122
+
123
+ def main() -> int:
124
+ parser = env_command_parser()
125
+ args = parser.parse_args()
126
+ env_command(args)
127
+ return 0
128
+
129
+
130
+ if __name__ == "__main__":
131
+ raise SystemExit(main())
venv/lib/python3.10/site-packages/accelerate/commands/estimate.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import torch
17
+ from huggingface_hub import model_info
18
+ from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
19
+
20
+ from accelerate import init_empty_weights
21
+ from accelerate.commands.utils import CustomArgumentParser
22
+ from accelerate.utils import (
23
+ calculate_maximum_sizes,
24
+ convert_bytes,
25
+ is_timm_available,
26
+ is_transformers_available,
27
+ )
28
+
29
+
30
+ if is_transformers_available():
31
+ import transformers
32
+ from transformers import AutoConfig, AutoModel
33
+
34
+ if is_timm_available():
35
+ import timm
36
+
37
+
38
+ def verify_on_hub(repo: str, token: str = None):
39
+ "Verifies that the model is on the hub and returns the model info."
40
+ try:
41
+ return model_info(repo, token=token)
42
+ except (OSError, GatedRepoError):
43
+ return "gated"
44
+ except RepositoryNotFoundError:
45
+ return "repo"
46
+
47
+
48
+ def check_has_model(error):
49
+ """
50
+ Checks what library spawned `error` when a model is not found
51
+ """
52
+ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
53
+ return "timm"
54
+ elif (
55
+ is_transformers_available()
56
+ and isinstance(error, OSError)
57
+ and "does not appear to have a file named" in error.args[0]
58
+ ):
59
+ return "transformers"
60
+ else:
61
+ return "unknown"
62
+
63
+
64
+ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
65
+ """
66
+ Creates an empty model in full precision from its parent library on the `Hub` to calculate the overall memory
67
+ consumption.
68
+
69
+ Args:
70
+ model_name (`str`):
71
+ The model name on the Hub
72
+ library_name (`str`):
73
+ The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
74
+ metadata on the Hub to determine the library.
75
+ trust_remote_code (`bool`, `optional`, defaults to `False`):
76
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
77
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
78
+ execute code present on the Hub on your local machine.
79
+ access_token (`str`, `optional`, defaults to `None`):
80
+ The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
81
+
82
+ Returns:
83
+ `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
84
+
85
+ """
86
+ model_info = verify_on_hub(model_name, access_token)
87
+ # Simplified errors
88
+ if model_info == "gated":
89
+ raise GatedRepoError(
90
+ f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
91
+ )
92
+ elif model_info == "repo":
93
+ raise RepositoryNotFoundError(
94
+ f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
95
+ " make sure you are authenticated via `huggingface-cli login` and have access."
96
+ )
97
+ if library_name is None:
98
+ library_name = getattr(model_info, "library_name", False)
99
+ if not library_name:
100
+ raise ValueError(
101
+ f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
102
+ )
103
+ if library_name == "transformers":
104
+ if not is_transformers_available():
105
+ raise ImportError(
106
+ f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
107
+ )
108
+ print(f"Loading pretrained config for `{model_name}` from `transformers`...")
109
+ if model_info.config is None:
110
+ raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
111
+
112
+ auto_map = model_info.config.get("auto_map", False)
113
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
114
+ with init_empty_weights():
115
+ # remote code could specify a specific `AutoModel` class in the `auto_map`
116
+ constructor = AutoModel
117
+ if isinstance(auto_map, dict):
118
+ value = None
119
+ for key in auto_map.keys():
120
+ if key.startswith("AutoModelFor"):
121
+ value = key
122
+ break
123
+ if value is not None:
124
+ constructor = getattr(transformers, value)
125
+ # we need to pass the dtype, otherwise it is going to use the torch_dtype that is saved in the config
126
+ model = constructor.from_config(config, torch_dtype=torch.float32, trust_remote_code=trust_remote_code)
127
+ elif library_name == "timm":
128
+ if not is_timm_available():
129
+ raise ImportError(
130
+ f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
131
+ )
132
+ print(f"Loading pretrained config for `{model_name}` from `timm`...")
133
+ with init_empty_weights():
134
+ model = timm.create_model(model_name, pretrained=False)
135
+ else:
136
+ raise ValueError(
137
+ f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
138
+ )
139
+ return model
140
+
141
+
142
+ def create_ascii_table(headers: list, rows: list, title: str):
143
+ "Creates a pretty table from a list of rows, minimal version of `tabulate`."
144
+ sep_char, in_between = "│", "─"
145
+ column_widths = []
146
+ for i in range(len(headers)):
147
+ column_values = [row[i] for row in rows] + [headers[i]]
148
+ max_column_width = max(len(value) for value in column_values)
149
+ column_widths.append(max_column_width)
150
+
151
+ formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
152
+
153
+ pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
154
+ diff = 0
155
+
156
+ def make_row(left_char, middle_char, right_char):
157
+ return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
158
+
159
+ separator = make_row("├", "┼", "┤")
160
+ if len(title) > sum(column_widths):
161
+ diff = abs(len(title) - len(separator))
162
+ column_widths[-1] += diff
163
+
164
+ # Update with diff
165
+ separator = make_row("├", "┼", "┤")
166
+ initial_rows = [
167
+ make_row("┌", in_between, "┐"),
168
+ f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
169
+ make_row("├", "┬", "┤"),
170
+ ]
171
+ table = "\n".join(initial_rows) + "\n"
172
+ column_widths[-1] += diff
173
+ centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
174
+ table += f"{pattern % tuple(centered_line)}\n{separator}\n"
175
+ for i, line in enumerate(rows):
176
+ centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
177
+ table += f"{pattern % tuple(centered_line)}\n"
178
+ table += f"└{'┴'.join([in_between * n for n in column_widths])}┘"
179
+
180
+ return table
181
+
182
+
183
+ def estimate_command_parser(subparsers=None):
184
+ if subparsers is not None:
185
+ parser = subparsers.add_parser("estimate-memory")
186
+ else:
187
+ parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
188
+
189
+ parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
190
+ parser.add_argument(
191
+ "--library_name",
192
+ type=str,
193
+ help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
194
+ choices=["timm", "transformers"],
195
+ )
196
+ parser.add_argument(
197
+ "--dtypes",
198
+ type=str,
199
+ nargs="+",
200
+ default=["float32", "float16", "int8", "int4"],
201
+ help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
202
+ choices=["float32", "float16", "int8", "int4"],
203
+ )
204
+ parser.add_argument(
205
+ "--trust_remote_code",
206
+ action="store_true",
207
+ help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
208
+ should only be used for repositories you trust and in which you have read the code, as it will execute
209
+ code present on the Hub on your local machine.""",
210
+ default=False,
211
+ )
212
+
213
+ if subparsers is not None:
214
+ parser.set_defaults(func=estimate_command)
215
+ return parser
216
+
217
+
218
+ def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
219
+ """
220
+ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
221
+ 1.
222
+
223
+ Args:
224
+ bytes (`int`):
225
+ The size of the model being trained.
226
+ mixed_precision (`str`):
227
+ The mixed precision that would be ran.
228
+ msamp_config (`str`):
229
+ The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
230
+ """
231
+ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
232
+ fp32_size = bytes
233
+ fp16_size = bytes // 2
234
+
235
+ if mixed_precision == "float32":
236
+ memory_sizes["model"] = fp32_size
237
+ memory_sizes["gradients"] = fp32_size
238
+ memory_sizes["optimizer"] = fp32_size * 2
239
+ memory_sizes["step"] = fp32_size * 4
240
+ elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
241
+ # With native `TransformersEngine`, there is no memory savings with FP8
242
+ # With mixed precision training, the model has weights stored
243
+ # in FP16 and FP32
244
+ memory_sizes["model"] = fp32_size
245
+ # 1.5 from weight gradient + computation (GEMM)
246
+ memory_sizes["gradients"] = fp32_size + fp16_size
247
+ # 2x from optimizer states
248
+ memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
249
+ memory_sizes["step"] = memory_sizes["optimizer"]
250
+ return memory_sizes
251
+
252
+
253
+ def gather_data(args):
254
+ "Creates an empty model and gathers the data for the sizes"
255
+ try:
256
+ model = create_empty_model(
257
+ args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
258
+ )
259
+ except (RuntimeError, OSError) as e:
260
+ library = check_has_model(e)
261
+ if library != "unknown":
262
+ raise RuntimeError(
263
+ f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
264
+ )
265
+ raise e
266
+
267
+ total_size, largest_layer = calculate_maximum_sizes(model)
268
+
269
+ data = []
270
+
271
+ for dtype in args.dtypes:
272
+ dtype_total_size = total_size
273
+ dtype_largest_layer = largest_layer[0]
274
+ dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
275
+ if dtype == "float16":
276
+ dtype_total_size /= 2
277
+ dtype_largest_layer /= 2
278
+ elif dtype == "int8":
279
+ dtype_total_size /= 4
280
+ dtype_largest_layer /= 4
281
+ elif dtype == "int4":
282
+ dtype_total_size /= 8
283
+ dtype_largest_layer /= 8
284
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
285
+ return data
286
+
287
+
288
+ def estimate_command(args):
289
+ data = gather_data(args)
290
+ for row in data:
291
+ for i, item in enumerate(row):
292
+ if isinstance(item, (int, float)):
293
+ row[i] = convert_bytes(item)
294
+ elif isinstance(item, dict):
295
+ training_usage = max(item.values())
296
+ row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
297
+
298
+ headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
299
+
300
+ title = f"Memory Usage for loading `{args.model_name}`"
301
+ table = create_ascii_table(headers, data, title)
302
+ print(table)
303
+
304
+
305
+ def main():
306
+ parser = estimate_command_parser()
307
+ args = parser.parse_args()
308
+ estimate_command(args)
309
+
310
+
311
+ if __name__ == "__main__":
312
+ main()
venv/lib/python3.10/site-packages/accelerate/commands/launch.py ADDED
@@ -0,0 +1,1245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import importlib
19
+ import logging
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ import psutil
26
+ import torch
27
+
28
+ from accelerate.commands.config import default_config_file, load_config_from_file
29
+ from accelerate.commands.config.config_args import SageMakerConfig
30
+ from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
31
+ from accelerate.commands.utils import CustomArgumentParser
32
+ from accelerate.state import get_int_from_env
33
+ from accelerate.utils import (
34
+ ComputeEnvironment,
35
+ DistributedType,
36
+ PrepareForLaunch,
37
+ _filter_args,
38
+ check_cuda_p2p_ib_support,
39
+ convert_dict_to_env_variables,
40
+ is_bf16_available,
41
+ is_deepspeed_available,
42
+ is_hpu_available,
43
+ is_mlu_available,
44
+ is_musa_available,
45
+ is_npu_available,
46
+ is_rich_available,
47
+ is_sagemaker_available,
48
+ is_sdaa_available,
49
+ is_torch_xla_available,
50
+ is_xpu_available,
51
+ patch_environment,
52
+ prepare_deepspeed_cmd_env,
53
+ prepare_multi_gpu_env,
54
+ prepare_sagemager_args_inputs,
55
+ prepare_simple_launcher_cmd_env,
56
+ prepare_tpu,
57
+ str_to_bool,
58
+ )
59
+ from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
60
+
61
+
62
+ if is_rich_available():
63
+ from rich import get_console
64
+ from rich.logging import RichHandler
65
+
66
+ FORMAT = "%(message)s"
67
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
68
+
69
+
70
+ logger = logging.getLogger(__name__)
71
+
72
+
73
+ options_to_group = {
74
+ "multi_gpu": "Distributed GPUs",
75
+ "tpu": "TPU",
76
+ "use_deepspeed": "DeepSpeed Arguments",
77
+ "use_fsdp": "FSDP Arguments",
78
+ "use_megatron_lm": "Megatron-LM Arguments",
79
+ "fp8_backend": "FP8 Arguments",
80
+ }
81
+
82
+
83
+ def clean_option(option):
84
+ "Finds all cases of - after the first two characters and changes them to _"
85
+ if "fp8_backend" in option:
86
+ option = "--fp8_backend"
87
+ if option.startswith("--"):
88
+ return option[2:].replace("-", "_")
89
+
90
+
91
+ class CustomHelpFormatter(argparse.HelpFormatter):
92
+ """
93
+ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
94
+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
95
+ for that platform.
96
+ """
97
+
98
+ def __init__(self, *args, **kwargs):
99
+ super().__init__(*args, **kwargs)
100
+ self.titles = [
101
+ "Hardware Selection Arguments",
102
+ "Resource Selection Arguments",
103
+ "Training Paradigm Arguments",
104
+ "positional arguments",
105
+ "optional arguments",
106
+ ]
107
+
108
+ def add_argument(self, action: argparse.Action):
109
+ if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
110
+ args = sys.argv[2:]
111
+ else:
112
+ args = sys.argv[1:]
113
+
114
+ if len(args) > 1:
115
+ args = list(map(clean_option, args))
116
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
117
+ used_titles = [options_to_group[o] for o in used_platforms]
118
+ if action.container.title not in self.titles + used_titles:
119
+ action.help = argparse.SUPPRESS
120
+ elif action.container.title == "Hardware Selection Arguments":
121
+ if set(action.option_strings).isdisjoint(set(args)):
122
+ action.help = argparse.SUPPRESS
123
+ else:
124
+ action.help = action.help + " (currently selected)"
125
+ elif action.container.title == "Training Paradigm Arguments":
126
+ if set(action.option_strings).isdisjoint(set(args)):
127
+ action.help = argparse.SUPPRESS
128
+ else:
129
+ action.help = action.help + " (currently selected)"
130
+
131
+ action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
132
+ super().add_argument(action)
133
+
134
+ def end_section(self):
135
+ if len(self._current_section.items) < 2:
136
+ self._current_section.items = []
137
+ self._current_section.heading = ""
138
+ super().end_section()
139
+
140
+
141
+ def launch_command_parser(subparsers=None):
142
+ description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
143
+ if subparsers is not None:
144
+ parser = subparsers.add_parser(
145
+ "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
146
+ )
147
+ else:
148
+ parser = CustomArgumentParser(
149
+ "Accelerate launch command",
150
+ description=description,
151
+ add_help=False,
152
+ allow_abbrev=False,
153
+ formatter_class=CustomHelpFormatter,
154
+ )
155
+
156
+ parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
157
+
158
+ parser.add_argument(
159
+ "--config_file",
160
+ default=None,
161
+ help="The config file to use for the default values in the launching script.",
162
+ )
163
+ parser.add_argument(
164
+ "--quiet",
165
+ "-q",
166
+ action="store_true",
167
+ help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
168
+ )
169
+ # Hardware selection arguments
170
+ hardware_args = parser.add_argument_group(
171
+ "Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
172
+ )
173
+ hardware_args.add_argument(
174
+ "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
175
+ )
176
+ hardware_args.add_argument(
177
+ "--multi_gpu",
178
+ default=False,
179
+ action="store_true",
180
+ help="Whether or not this should launch a distributed GPU training.",
181
+ )
182
+ hardware_args.add_argument(
183
+ "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
184
+ )
185
+ # Resource selection arguments
186
+ resource_args = parser.add_argument_group(
187
+ "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
188
+ )
189
+ resource_args.add_argument(
190
+ "--mixed_precision",
191
+ type=str,
192
+ choices=["no", "fp16", "bf16", "fp8"],
193
+ help="Whether or not to use mixed precision training. "
194
+ "Choose between FP16 and BF16 (bfloat16) training. "
195
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
196
+ )
197
+ resource_args.add_argument(
198
+ "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
199
+ )
200
+ resource_args.add_argument(
201
+ "--num_machines", type=int, default=None, help="The total number of machines used in this training."
202
+ )
203
+ resource_args.add_argument(
204
+ "--num_cpu_threads_per_process",
205
+ type=int,
206
+ default=None,
207
+ help="The number of CPU threads per process. Can be tuned for optimal performance.",
208
+ )
209
+ resource_args.add_argument(
210
+ "--enable_cpu_affinity",
211
+ default=False,
212
+ action="store_true",
213
+ help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
214
+ )
215
+ # Dynamo arguments
216
+ resource_args.add_argument(
217
+ "--dynamo_backend",
218
+ type=str,
219
+ choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
220
+ help="Choose a backend to optimize your training with dynamo, see more at "
221
+ "https://github.com/pytorch/torchdynamo.",
222
+ )
223
+ resource_args.add_argument(
224
+ "--dynamo_mode",
225
+ type=str,
226
+ default="default",
227
+ choices=TORCH_DYNAMO_MODES,
228
+ help="Choose a mode to optimize your training with dynamo.",
229
+ )
230
+ resource_args.add_argument(
231
+ "--dynamo_use_fullgraph",
232
+ default=False,
233
+ action="store_true",
234
+ help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
235
+ )
236
+ resource_args.add_argument(
237
+ "--dynamo_use_dynamic",
238
+ default=False,
239
+ action="store_true",
240
+ help="Whether to enable dynamic shape tracing.",
241
+ )
242
+ resource_args.add_argument(
243
+ "--dynamo_use_regional_compilation",
244
+ default=False,
245
+ action="store_true",
246
+ help="Whether to enable regional compilation.",
247
+ )
248
+
249
+ # Training Paradigm arguments
250
+ paradigm_args = parser.add_argument_group(
251
+ "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
252
+ )
253
+ paradigm_args.add_argument(
254
+ "--use_deepspeed",
255
+ default=False,
256
+ action="store_true",
257
+ help="Whether to use deepspeed.",
258
+ )
259
+ paradigm_args.add_argument(
260
+ "--use_fsdp",
261
+ default=False,
262
+ action="store_true",
263
+ help="Whether to use fsdp.",
264
+ )
265
+ paradigm_args.add_argument(
266
+ "--use_parallelism_config",
267
+ default=False,
268
+ action="store_true",
269
+ help="Whether to use the parallelism config to configure the N-d distributed training.",
270
+ )
271
+ paradigm_args.add_argument(
272
+ "--use_megatron_lm",
273
+ default=False,
274
+ action="store_true",
275
+ help="Whether to use Megatron-LM.",
276
+ )
277
+
278
+ paradigm_args.add_argument(
279
+ "--use_xpu",
280
+ default=None,
281
+ action="store_true",
282
+ help="Whether to use IPEX plugin to speed up training on XPU specifically. This argument is deprecated and ignored, will be removed in Accelerate v1.20.",
283
+ )
284
+
285
+ # distributed GPU training arguments
286
+ distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
287
+ distributed_args.add_argument(
288
+ "--gpu_ids",
289
+ default=None,
290
+ help="What GPUs (by id) should be used for training on this machine as a comma-separated list",
291
+ )
292
+ distributed_args.add_argument(
293
+ "--same_network",
294
+ default=False,
295
+ action="store_true",
296
+ help="Whether all machines used for multinode training exist on the same local network.",
297
+ )
298
+ distributed_args.add_argument(
299
+ "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
300
+ )
301
+ distributed_args.add_argument(
302
+ "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
303
+ )
304
+ distributed_args.add_argument(
305
+ "--main_process_port",
306
+ type=int,
307
+ default=None,
308
+ help="The port to use to communicate with the machine of rank 0.",
309
+ )
310
+ distributed_args.add_argument(
311
+ "-t",
312
+ "--tee",
313
+ default="0",
314
+ type=str,
315
+ help="Tee std streams into a log file and also to console.",
316
+ )
317
+ distributed_args.add_argument(
318
+ "--log_dir",
319
+ type=str,
320
+ default=None,
321
+ help=(
322
+ "Base directory to use for log files when using torchrun/torch.distributed.run as launcher. "
323
+ "Use with --tee to redirect std streams info log files."
324
+ ),
325
+ )
326
+ distributed_args.add_argument(
327
+ "--role",
328
+ type=str,
329
+ default="default",
330
+ help="User-defined role for the workers.",
331
+ )
332
+ # Rendezvous related arguments
333
+ distributed_args.add_argument(
334
+ "--rdzv_backend",
335
+ type=str,
336
+ default="static",
337
+ help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
338
+ )
339
+ distributed_args.add_argument(
340
+ "--rdzv_conf",
341
+ type=str,
342
+ default="",
343
+ help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
344
+ )
345
+ distributed_args.add_argument(
346
+ "--max_restarts",
347
+ type=int,
348
+ default=0,
349
+ help="Maximum number of worker group restarts before failing.",
350
+ )
351
+ distributed_args.add_argument(
352
+ "--monitor_interval",
353
+ type=float,
354
+ default=0.1,
355
+ help="Interval, in seconds, to monitor the state of workers.",
356
+ )
357
+ parser.add_argument(
358
+ "-m",
359
+ "--module",
360
+ action="store_true",
361
+ help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
362
+ )
363
+ parser.add_argument(
364
+ "--no_python",
365
+ action="store_true",
366
+ help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
367
+ )
368
+
369
+ # TPU arguments
370
+ tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
371
+ tpu_args.add_argument(
372
+ "--tpu_cluster",
373
+ action="store_true",
374
+ dest="tpu_use_cluster",
375
+ help="Whether to use a GCP TPU pod for training.",
376
+ )
377
+ tpu_args.add_argument(
378
+ "--no_tpu_cluster",
379
+ action="store_false",
380
+ dest="tpu_use_cluster",
381
+ help="Should not be passed explicitly, this is for internal use only.",
382
+ )
383
+ tpu_args.add_argument(
384
+ "--tpu_use_sudo",
385
+ action="store_true",
386
+ help="Whether to use `sudo` when running the TPU training script in each pod.",
387
+ )
388
+ tpu_args.add_argument(
389
+ "--vm",
390
+ type=str,
391
+ action="append",
392
+ help=(
393
+ "List of single Compute VM instance names. "
394
+ "If not provided we assume usage of instance groups. For TPU pods."
395
+ ),
396
+ )
397
+ tpu_args.add_argument(
398
+ "--env",
399
+ type=str,
400
+ action="append",
401
+ help="List of environment variables to set on the Compute VM instances. For TPU pods.",
402
+ )
403
+ tpu_args.add_argument(
404
+ "--main_training_function",
405
+ type=str,
406
+ default=None,
407
+ help="The name of the main function to be executed in your script (only for TPU training).",
408
+ )
409
+ tpu_args.add_argument(
410
+ "--downcast_bf16",
411
+ action="store_true",
412
+ help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
413
+ )
414
+
415
+ # DeepSpeed arguments
416
+ deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
417
+ deepspeed_args.add_argument(
418
+ "--deepspeed_config_file",
419
+ default=None,
420
+ type=str,
421
+ help="DeepSpeed config file.",
422
+ )
423
+ deepspeed_args.add_argument(
424
+ "--zero_stage",
425
+ default=None,
426
+ type=int,
427
+ help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
428
+ "If unspecified, will default to `2`.",
429
+ )
430
+ deepspeed_args.add_argument(
431
+ "--offload_optimizer_device",
432
+ default=None,
433
+ type=str,
434
+ help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
435
+ "If unspecified, will default to 'none'.",
436
+ )
437
+ deepspeed_args.add_argument(
438
+ "--offload_param_device",
439
+ default=None,
440
+ type=str,
441
+ help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
442
+ "If unspecified, will default to 'none'.",
443
+ )
444
+ deepspeed_args.add_argument(
445
+ "--offload_optimizer_nvme_path",
446
+ default=None,
447
+ type=str,
448
+ help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
449
+ "If unspecified, will default to 'none'.",
450
+ )
451
+ deepspeed_args.add_argument(
452
+ "--offload_param_nvme_path",
453
+ default=None,
454
+ type=str,
455
+ help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
456
+ "If unspecified, will default to 'none'.",
457
+ )
458
+ deepspeed_args.add_argument(
459
+ "--gradient_accumulation_steps",
460
+ default=None,
461
+ type=int,
462
+ help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
463
+ "If unspecified, will default to `1`.",
464
+ )
465
+ deepspeed_args.add_argument(
466
+ "--gradient_clipping",
467
+ default=None,
468
+ type=float,
469
+ help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
470
+ "If unspecified, will default to `1.0`.",
471
+ )
472
+ deepspeed_args.add_argument(
473
+ "--zero3_init_flag",
474
+ default=None,
475
+ type=str,
476
+ help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
477
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
478
+ )
479
+ deepspeed_args.add_argument(
480
+ "--zero3_save_16bit_model",
481
+ default=None,
482
+ type=str,
483
+ help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
484
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
485
+ )
486
+ deepspeed_args.add_argument(
487
+ "--deepspeed_hostfile",
488
+ default=None,
489
+ type=str,
490
+ help="DeepSpeed hostfile for configuring multi-node compute resources.",
491
+ )
492
+ deepspeed_args.add_argument(
493
+ "--deepspeed_exclusion_filter",
494
+ default=None,
495
+ type=str,
496
+ help="DeepSpeed exclusion filter string when using mutli-node setup.",
497
+ )
498
+ deepspeed_args.add_argument(
499
+ "--deepspeed_inclusion_filter",
500
+ default=None,
501
+ type=str,
502
+ help="DeepSpeed inclusion filter string when using mutli-node setup.",
503
+ )
504
+ deepspeed_args.add_argument(
505
+ "--deepspeed_multinode_launcher",
506
+ default=None,
507
+ type=str,
508
+ help="DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`.",
509
+ )
510
+ deepspeed_args.add_argument(
511
+ "--deepspeed_moe_layer_cls_names",
512
+ default=None,
513
+ type=str,
514
+ help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
515
+ " (useful only when `use_deepspeed` flag is passed).",
516
+ )
517
+
518
+ # fsdp arguments
519
+ fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
520
+ fsdp_args.add_argument(
521
+ "--fsdp_version",
522
+ type=str,
523
+ default="1",
524
+ choices=["1", "2"],
525
+ help="FSDP version to use. (useful only when `use_fsdp` flag is passed).",
526
+ )
527
+ fsdp_args.add_argument(
528
+ "--fsdp_offload_params",
529
+ default="false",
530
+ type=str,
531
+ help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
532
+ )
533
+ fsdp_args.add_argument(
534
+ "--fsdp_min_num_params",
535
+ type=int,
536
+ default=1e8,
537
+ help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
538
+ )
539
+ # We enable this for backwards compatibility, throw a warning if this is set in `FullyShardedDataParallelPlugin`
540
+ fsdp_args.add_argument(
541
+ "--fsdp_sharding_strategy",
542
+ type=str,
543
+ default="FULL_SHARD",
544
+ help="FSDP's sharding strategy. (useful only when `use_fsdp` flag is passed and `fsdp_version=1`).",
545
+ )
546
+ fsdp_args.add_argument(
547
+ "--fsdp_reshard_after_forward",
548
+ type=str,
549
+ default="true",
550
+ help="FSDP's Reshard After Forward Strategy. (useful only when `use_fsdp` flag is passed). Supports either boolean (FSDP2) or `FULL_SHARD | SHARD_GRAD_OP | NO_RESHARD` (FSDP1).",
551
+ )
552
+ fsdp_args.add_argument(
553
+ "--fsdp_auto_wrap_policy",
554
+ type=str,
555
+ default=None,
556
+ help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
557
+ )
558
+ fsdp_args.add_argument(
559
+ "--fsdp_transformer_layer_cls_to_wrap",
560
+ default=None,
561
+ type=str,
562
+ help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
563
+ "(useful only when `use_fsdp` flag is passed).",
564
+ )
565
+ fsdp_args.add_argument(
566
+ "--fsdp_backward_prefetch",
567
+ default=None,
568
+ type=str,
569
+ help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
570
+ )
571
+ fsdp_args.add_argument(
572
+ "--fsdp_state_dict_type",
573
+ default=None,
574
+ type=str,
575
+ help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
576
+ )
577
+ fsdp_args.add_argument(
578
+ "--fsdp_forward_prefetch",
579
+ default="false",
580
+ type=str,
581
+ help="If True, then FSDP explicitly prefetches the next upcoming "
582
+ "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
583
+ )
584
+ fsdp_args.add_argument(
585
+ "--fsdp_use_orig_params",
586
+ default="true",
587
+ type=str,
588
+ help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
589
+ " (useful only when `use_fsdp` flag is passed).",
590
+ )
591
+ fsdp_args.add_argument(
592
+ "--fsdp_cpu_ram_efficient_loading",
593
+ default="true",
594
+ type=str,
595
+ help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
596
+ "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
597
+ "(useful only when `use_fsdp` flag is passed).",
598
+ )
599
+ fsdp_args.add_argument(
600
+ "--fsdp_sync_module_states",
601
+ default="true",
602
+ type=str,
603
+ help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
604
+ " (useful only when `use_fsdp` flag is passed).",
605
+ )
606
+ fsdp_args.add_argument(
607
+ "--fsdp_activation_checkpointing",
608
+ default="false",
609
+ type=str,
610
+ help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).",
611
+ )
612
+
613
+ # megatron_lm args
614
+ megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
615
+ megatron_lm_args.add_argument(
616
+ "--megatron_lm_tp_degree",
617
+ type=int,
618
+ default=1,
619
+ help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
620
+ )
621
+ megatron_lm_args.add_argument(
622
+ "--megatron_lm_pp_degree",
623
+ type=int,
624
+ default=1,
625
+ help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
626
+ )
627
+ megatron_lm_args.add_argument(
628
+ "--megatron_lm_num_micro_batches",
629
+ type=int,
630
+ default=None,
631
+ help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
632
+ )
633
+ megatron_lm_args.add_argument(
634
+ "--megatron_lm_sequence_parallelism",
635
+ default=None,
636
+ type=str,
637
+ help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
638
+ "(useful only when `use_megatron_lm` flag is passed).",
639
+ )
640
+ megatron_lm_args.add_argument(
641
+ "--megatron_lm_recompute_activations",
642
+ default=None,
643
+ type=str,
644
+ help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
645
+ "(useful only when `use_megatron_lm` flag is passed).",
646
+ )
647
+ megatron_lm_args.add_argument(
648
+ "--megatron_lm_use_distributed_optimizer",
649
+ default=None,
650
+ type=str,
651
+ help="Decides Whether (true|false) to use distributed optimizer "
652
+ "which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
653
+ "(useful only when `use_megatron_lm` flag is passed).",
654
+ )
655
+ megatron_lm_args.add_argument(
656
+ "--megatron_lm_gradient_clipping",
657
+ default=1.0,
658
+ type=float,
659
+ help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
660
+ "(useful only when `use_megatron_lm` flag is passed).",
661
+ )
662
+
663
+ # FP8 arguments
664
+ fp8_args = parser.add_argument_group(
665
+ "FP8 Arguments", "Arguments related to FP8 training (requires `--mixed_precision=fp8`)"
666
+ )
667
+ fp8_args.add_argument(
668
+ "--fp8_backend",
669
+ type=str,
670
+ choices=["te", "msamp"],
671
+ help="Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)",
672
+ )
673
+ fp8_args.add_argument(
674
+ "--fp8_use_autocast_during_eval",
675
+ default=False,
676
+ action="store_true",
677
+ help="Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.",
678
+ )
679
+ fp8_args.add_argument(
680
+ "--fp8_margin",
681
+ type=int,
682
+ default=0,
683
+ help="The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).",
684
+ )
685
+ fp8_args.add_argument(
686
+ "--fp8_interval",
687
+ type=int,
688
+ default=1,
689
+ help="The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).",
690
+ )
691
+ fp8_args.add_argument(
692
+ "--fp8_format",
693
+ type=str,
694
+ default="HYBRID",
695
+ choices=["HYBRID", "E4M3", "E5M2"],
696
+ help="The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).",
697
+ )
698
+ fp8_args.add_argument(
699
+ "--fp8_amax_history_len",
700
+ type=int,
701
+ default=1024,
702
+ help="The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).",
703
+ )
704
+ fp8_args.add_argument(
705
+ "--fp8_amax_compute_algo",
706
+ type=str,
707
+ default="most_recent",
708
+ choices=["max", "most_recent"],
709
+ help="The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).",
710
+ )
711
+ fp8_args.add_argument(
712
+ "--fp8_override_linear_precision",
713
+ type=lambda x: tuple(map(str_to_bool, x.split(","))),
714
+ default=(False, False, False),
715
+ help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-separated string of booleans (useful only when `--fp8_backend=te` is passed).",
716
+ )
717
+ fp8_args.add_argument(
718
+ "--fp8_opt_level",
719
+ type=str,
720
+ default="O2",
721
+ choices=["O1", "O2"],
722
+ help="What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).",
723
+ )
724
+
725
+ # AWS arguments
726
+ aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
727
+ aws_args.add_argument(
728
+ "--aws_access_key_id",
729
+ type=str,
730
+ default=None,
731
+ help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
732
+ )
733
+ aws_args.add_argument(
734
+ "--aws_secret_access_key",
735
+ type=str,
736
+ default=None,
737
+ help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
738
+ )
739
+ parser.add_argument(
740
+ "--debug",
741
+ action="store_true",
742
+ help="Whether to print out the torch.distributed stack trace when something fails.",
743
+ )
744
+ parser.add_argument(
745
+ "training_script",
746
+ type=str,
747
+ help=(
748
+ "The full path to the script to be launched in parallel, followed by all the arguments for the training "
749
+ "script."
750
+ ),
751
+ )
752
+
753
+ # MPI arguments
754
+ mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
755
+ mpirun_args.add_argument(
756
+ "--mpirun_hostfile",
757
+ type=str,
758
+ default=None,
759
+ help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
760
+ "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
761
+ )
762
+ mpirun_args.add_argument(
763
+ "--mpirun_ccl",
764
+ type=int,
765
+ default=1,
766
+ help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
767
+ )
768
+
769
+ # ParallelismConfig arguments
770
+ parallelism_config_args = parser.add_argument_group(
771
+ "ParallelismConfig Arguments",
772
+ "Arguments related to the ParallelismConfig used for distributed training.",
773
+ )
774
+ parallelism_config_args.add_argument(
775
+ "--parallelism_config_dp_replicate_size",
776
+ type=int,
777
+ default=1,
778
+ help="The number of processes for data parallel training. Defaults to 1 (no data parallelism).",
779
+ )
780
+
781
+ parallelism_config_args.add_argument(
782
+ "--parallelism_config_dp_shard_size",
783
+ type=int,
784
+ default=1,
785
+ help="The number of processes for FSDP sharding. Defaults to 1 (No FSDP sharding).",
786
+ )
787
+
788
+ parallelism_config_args.add_argument(
789
+ "--parallelism_config_tp_size",
790
+ type=int,
791
+ default=1,
792
+ help="The number of processes for tensor parallel training. Defaults to 1 (no tensor parallelism).",
793
+ )
794
+
795
+ parallelism_config_args.add_argument(
796
+ "--parallelism_config_cp_size",
797
+ type=int,
798
+ default=1,
799
+ help="The number of processese for context parallel training. Defaults to 1 (no context parallelism).",
800
+ )
801
+ parallelism_config_args.add_argument(
802
+ "--parallelism_config_cp_comm_strategy",
803
+ type=str,
804
+ default="allgather",
805
+ help="The communication strategy for context parallel training. Defaults to 'allgather'. Other option is alltoall",
806
+ )
807
+
808
+ # Other arguments of the training scripts
809
+ parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
810
+
811
+ if subparsers is not None:
812
+ parser.set_defaults(func=launch_command)
813
+ return parser
814
+
815
+
816
+ def simple_launcher(args):
817
+ cmd, current_env = prepare_simple_launcher_cmd_env(args)
818
+
819
+ process = subprocess.Popen(cmd, env=current_env)
820
+ process.wait()
821
+ if process.returncode != 0:
822
+ if not args.quiet:
823
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
824
+ else:
825
+ sys.exit(1)
826
+
827
+
828
+ def multi_gpu_launcher(args):
829
+ import torch.distributed.run as distrib_run
830
+
831
+ current_env = prepare_multi_gpu_env(args)
832
+ if not check_cuda_p2p_ib_support():
833
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
834
+ warn = False
835
+ if "NCCL_P2P_DISABLE" not in current_env:
836
+ current_env["NCCL_P2P_DISABLE"] = "1"
837
+ warn = True
838
+ if "NCCL_IB_DISABLE" not in current_env:
839
+ current_env["NCCL_IB_DISABLE"] = "1"
840
+ warn = True
841
+ if warn:
842
+ logger.warning(message)
843
+
844
+ debug = getattr(args, "debug", False)
845
+ args = _filter_args(
846
+ args,
847
+ distrib_run.get_args_parser(),
848
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
849
+ )
850
+
851
+ with patch_environment(**current_env):
852
+ try:
853
+ distrib_run.run(args)
854
+ except Exception:
855
+ if is_rich_available() and debug:
856
+ console = get_console()
857
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
858
+ console.print_exception(suppress=[__file__], show_locals=False)
859
+ else:
860
+ raise
861
+
862
+
863
+ def deepspeed_launcher(args):
864
+ import torch.distributed.run as distrib_run
865
+
866
+ if not is_deepspeed_available():
867
+ raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
868
+ else:
869
+ from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
870
+
871
+ cmd, current_env = prepare_deepspeed_cmd_env(args)
872
+ if not check_cuda_p2p_ib_support():
873
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
874
+ warn = False
875
+ if "NCCL_P2P_DISABLE" not in current_env:
876
+ current_env["NCCL_P2P_DISABLE"] = "1"
877
+ warn = True
878
+ if "NCCL_IB_DISABLE" not in current_env:
879
+ current_env["NCCL_IB_DISABLE"] = "1"
880
+ warn = True
881
+ if warn:
882
+ logger.warning(message)
883
+
884
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
885
+ with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
886
+ valid_env_items = convert_dict_to_env_variables(current_env)
887
+ if len(valid_env_items) > 1:
888
+ f.writelines(valid_env_items)
889
+
890
+ process = subprocess.Popen(cmd, env=current_env)
891
+ process.wait()
892
+ if process.returncode != 0:
893
+ if not args.quiet:
894
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
895
+ else:
896
+ sys.exit(1)
897
+ else:
898
+ debug = getattr(args, "debug", False)
899
+ args = _filter_args(
900
+ args,
901
+ distrib_run.get_args_parser(),
902
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
903
+ )
904
+ with patch_environment(**current_env):
905
+ try:
906
+ distrib_run.run(args)
907
+ except Exception:
908
+ if is_rich_available() and debug:
909
+ console = get_console()
910
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
911
+ console.print_exception(suppress=[__file__], show_locals=False)
912
+ else:
913
+ raise
914
+
915
+
916
+ def tpu_launcher(args):
917
+ import torch_xla.distributed.xla_multiprocessing as xmp
918
+
919
+ if args.no_python:
920
+ raise ValueError("--no_python cannot be used with TPU launcher")
921
+
922
+ args, current_env = prepare_tpu(args, {})
923
+
924
+ if args.module:
925
+ mod_name = args.training_script
926
+ else:
927
+ # Import training_script as a module
928
+ script_path = Path(args.training_script)
929
+ sys.path.append(str(script_path.parent.resolve()))
930
+ mod_name = script_path.stem
931
+
932
+ mod = importlib.import_module(mod_name)
933
+ if not hasattr(mod, args.main_training_function):
934
+ raise ValueError(
935
+ f"Your training script should have a function named {args.main_training_function}, or you should pass a "
936
+ "different value to `--main_training_function`."
937
+ )
938
+
939
+ # Patch sys.argv
940
+ sys.argv = [mod.__file__] + args.training_script_args
941
+
942
+ main_function = getattr(mod, args.main_training_function)
943
+ with patch_environment(**current_env):
944
+ xmp.spawn(PrepareForLaunch(main_function), args=())
945
+
946
+
947
+ def tpu_pod_launcher(args):
948
+ from torch_xla.distributed import xla_dist
949
+
950
+ current_env = {}
951
+ args, current_env = prepare_tpu(args, current_env, True)
952
+ debug = getattr(args, "debug", False)
953
+
954
+ training_script = args.training_script
955
+ training_script_args = args.training_script_args
956
+ new_args = _filter_args(
957
+ args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
958
+ )
959
+
960
+ if args.tpu_use_sudo:
961
+ new_cmd = ["sudo"]
962
+ else:
963
+ new_cmd = []
964
+
965
+ new_cmd += [
966
+ "accelerate-launch",
967
+ "--tpu",
968
+ "--no_tpu_cluster",
969
+ "--num_machines",
970
+ "1",
971
+ "--mixed_precision",
972
+ "no",
973
+ "--dynamo_backend",
974
+ "no",
975
+ "--num_processes",
976
+ str(args.num_processes),
977
+ "--main_training_function",
978
+ str(args.main_training_function),
979
+ training_script,
980
+ ] + training_script_args
981
+
982
+ new_args.positional = new_cmd
983
+ bad_flags = ""
984
+ for arg in vars(new_args):
985
+ if arg.startswith("docker_"):
986
+ value = getattr(new_args, arg)
987
+ if value != "" and value is not None:
988
+ bad_flags += f'{arg}="{value}"\n'
989
+ if bad_flags != "":
990
+ raise ValueError(
991
+ f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
992
+ )
993
+ new_args.env = [f"{k}={v}" for k, v in current_env.items()]
994
+ new_args.env.append("ACCELERATE_IN_TPU_POD=1")
995
+ try:
996
+ xla_dist.resolve_and_execute(new_args)
997
+ except Exception:
998
+ if is_rich_available() and debug:
999
+ console = get_console()
1000
+ console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
1001
+ console.print_exception(suppress=[__file__], show_locals=False)
1002
+ else:
1003
+ raise
1004
+
1005
+
1006
+ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
1007
+ if not is_sagemaker_available():
1008
+ raise ImportError(
1009
+ "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
1010
+ )
1011
+ if args.module or args.no_python:
1012
+ raise ValueError(
1013
+ "SageMaker requires a python training script file and cannot be used with --module or --no_python"
1014
+ )
1015
+
1016
+ from sagemaker.huggingface import HuggingFace
1017
+
1018
+ args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
1019
+
1020
+ huggingface_estimator = HuggingFace(**args)
1021
+
1022
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
1023
+ print(f"You can find your model data at: {huggingface_estimator.model_data}")
1024
+
1025
+
1026
+ def _validate_launch_command(args):
1027
+ # Sanity checks
1028
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
1029
+ raise ValueError(
1030
+ "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
1031
+ )
1032
+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
1033
+ raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
1034
+
1035
+ if (not args.use_fsdp or args.fsdp_version == 1) and args.use_parallelism_config:
1036
+ raise ValueError("You cannot use `--use_parallelism_config` without `--use_fsdp` and `--fsdp_version=2`. ")
1037
+
1038
+ defaults = None
1039
+ warned = []
1040
+ mp_from_config_flag = False
1041
+ # Get the default from the config file.
1042
+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
1043
+ defaults = load_config_from_file(args.config_file)
1044
+ if (
1045
+ not args.multi_gpu
1046
+ and not args.tpu
1047
+ and not args.tpu_use_cluster
1048
+ and not args.use_deepspeed
1049
+ and not args.use_fsdp
1050
+ and not args.use_megatron_lm
1051
+ ):
1052
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
1053
+ args.multi_gpu = (
1054
+ True
1055
+ if defaults.distributed_type
1056
+ in (
1057
+ DistributedType.MULTI_GPU,
1058
+ DistributedType.MULTI_NPU,
1059
+ DistributedType.MULTI_MLU,
1060
+ DistributedType.MULTI_SDAA,
1061
+ DistributedType.MULTI_MUSA,
1062
+ DistributedType.MULTI_XPU,
1063
+ DistributedType.MULTI_HPU,
1064
+ )
1065
+ else False
1066
+ )
1067
+ args.tpu = defaults.distributed_type == DistributedType.XLA
1068
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
1069
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
1070
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
1071
+ args.use_parallelism_config = defaults.parallelism_config != {}
1072
+ if args.gpu_ids is None:
1073
+ if defaults.gpu_ids is not None:
1074
+ args.gpu_ids = defaults.gpu_ids
1075
+ else:
1076
+ args.gpu_ids = "all"
1077
+
1078
+ if args.multi_gpu and args.num_machines is None:
1079
+ args.num_machines = defaults.num_machines
1080
+
1081
+ if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
1082
+ raise ValueError(
1083
+ "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
1084
+ "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
1085
+ )
1086
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
1087
+ # Update args with the defaults
1088
+ for name, attr in defaults.__dict__.items():
1089
+ if isinstance(attr, dict):
1090
+ # Copy defaults.somedict.somearg to args.somearg and
1091
+ # defaults.fsdp_config.x to args.fsdp_x
1092
+ for key, value in attr.items():
1093
+ if name == "fsdp_config" and not key.startswith("fsdp"):
1094
+ key = "fsdp_" + key
1095
+ elif name == "fp8_config" and not key.startswith("fp8"):
1096
+ key = "fp8_" + key
1097
+ if hasattr(args, "nondefault") and key not in args.nondefault:
1098
+ setattr(args, key, value)
1099
+ elif (
1100
+ name not in ["compute_environment", "mixed_precision", "distributed_type"]
1101
+ and getattr(args, name, None) is None
1102
+ ):
1103
+ # Those args are handled separately
1104
+ setattr(args, name, attr)
1105
+ if not args.debug:
1106
+ args.debug = defaults.debug
1107
+
1108
+ if not args.mixed_precision:
1109
+ if defaults.mixed_precision is None:
1110
+ args.mixed_precision = "no"
1111
+ else:
1112
+ args.mixed_precision = defaults.mixed_precision
1113
+ mp_from_config_flag = True
1114
+ else:
1115
+ native_amp = is_bf16_available(True)
1116
+ if (
1117
+ args.mixed_precision == "bf16"
1118
+ and not native_amp
1119
+ and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
1120
+ ):
1121
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
1122
+
1123
+ # Silently set the default here
1124
+ if args.dynamo_backend is None:
1125
+ args.dynamo_backend = "no"
1126
+ if args.num_processes == -1:
1127
+ raise ValueError("You need to manually pass in `--num_processes` using this config yaml.")
1128
+ else:
1129
+ if args.num_processes is None:
1130
+ if is_xpu_available():
1131
+ args.num_processes = torch.xpu.device_count()
1132
+ elif is_mlu_available():
1133
+ args.num_processes = torch.mlu.device_count()
1134
+ elif is_sdaa_available():
1135
+ args.num_processes = torch.sdaa.device_count()
1136
+ elif is_musa_available():
1137
+ args.num_processes = torch.musa.device_count()
1138
+ elif is_npu_available():
1139
+ args.num_processes = torch.npu.device_count()
1140
+ elif is_hpu_available():
1141
+ args.num_processes = torch.hpu.device_count()
1142
+ else:
1143
+ args.num_processes = torch.cuda.device_count()
1144
+ warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
1145
+ if args.debug is None:
1146
+ args.debug = False
1147
+ if (
1148
+ not args.multi_gpu
1149
+ and args.num_processes > 1
1150
+ and (
1151
+ (is_xpu_available() and torch.xpu.device_count() > 1)
1152
+ or (is_npu_available() and torch.npu.device_count() > 1)
1153
+ or (is_hpu_available() and torch.hpu.device_count() > 1)
1154
+ or (is_mlu_available() and torch.mlu.device_count() > 1)
1155
+ or (is_sdaa_available() and torch.sdaa.device_count() > 1)
1156
+ or (is_musa_available() and torch.musa.device_count() > 1)
1157
+ or (torch.cuda.is_available() and torch.cuda.device_count() > 1)
1158
+ )
1159
+ ):
1160
+ warned.append(
1161
+ "\t\tMore than one GPU was found, enabling multi-GPU training.\n"
1162
+ "\t\tIf this was unintended please pass in `--num_processes=1`."
1163
+ )
1164
+ args.multi_gpu = True
1165
+ if args.num_machines is None:
1166
+ warned.append("\t`--num_machines` was set to a value of `1`")
1167
+ args.num_machines = 1
1168
+ if args.mixed_precision is None:
1169
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
1170
+ args.mixed_precision = "no"
1171
+ if not hasattr(args, "use_cpu"):
1172
+ args.use_cpu = args.cpu
1173
+ if args.dynamo_backend is None:
1174
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
1175
+ args.dynamo_backend = "no"
1176
+ if args.debug:
1177
+ logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
1178
+
1179
+ is_aws_env_disabled = defaults is None or (
1180
+ defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
1181
+ )
1182
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
1183
+ args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1)
1184
+ if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0:
1185
+ local_size = get_int_from_env(
1186
+ ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
1187
+ max(int(args.num_processes / args.num_machines), 1),
1188
+ )
1189
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
1190
+ if threads_per_process > 1:
1191
+ args.num_cpu_threads_per_process = threads_per_process
1192
+ warned.append(
1193
+ f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
1194
+ )
1195
+
1196
+ if args.use_xpu is not None:
1197
+ logger.warning(
1198
+ "use_xpu is deprecated and ignored, will be removed in Accelerate v1.20. "
1199
+ "XPU is a PyTorch native citizen now, we don't need extra argument to enable it any more."
1200
+ )
1201
+
1202
+ if any(warned):
1203
+ message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
1204
+ message += "\n".join(warned)
1205
+ message += (
1206
+ "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
1207
+ )
1208
+ logger.warning(message)
1209
+ return args, defaults, mp_from_config_flag
1210
+
1211
+
1212
+ def launch_command(args):
1213
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
1214
+ # Use the proper launcher
1215
+ if args.use_deepspeed and not args.cpu:
1216
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
1217
+ if mp_from_config_flag:
1218
+ args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
1219
+ args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
1220
+ deepspeed_launcher(args)
1221
+ elif args.use_fsdp and not args.cpu:
1222
+ multi_gpu_launcher(args)
1223
+ elif args.use_megatron_lm and not args.cpu:
1224
+ multi_gpu_launcher(args)
1225
+ elif args.multi_gpu and not args.cpu:
1226
+ multi_gpu_launcher(args)
1227
+ elif args.tpu and not args.cpu:
1228
+ if args.tpu_use_cluster:
1229
+ tpu_pod_launcher(args)
1230
+ else:
1231
+ tpu_launcher(args)
1232
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
1233
+ sagemaker_launcher(defaults, args)
1234
+ else:
1235
+ simple_launcher(args)
1236
+
1237
+
1238
+ def main():
1239
+ parser = launch_command_parser()
1240
+ args = parser.parse_args()
1241
+ launch_command(args)
1242
+
1243
+
1244
+ if __name__ == "__main__":
1245
+ main()