ZTWHHH commited on
Commit
860c385
·
verified ·
1 Parent(s): a8fbe0a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. parrot/lib/python3.10/site-packages/deepspeed/__init__.py +343 -0
  3. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py +20 -0
  4. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py +82 -0
  10. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py +111 -0
  11. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py +222 -0
  12. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py +96 -0
  13. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py +98 -0
  14. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py +62 -0
  15. parrot/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py +140 -0
  16. parrot/lib/python3.10/site-packages/deepspeed/constants.py +21 -0
  17. parrot/lib/python3.10/site-packages/deepspeed/env_report.py +194 -0
  18. parrot/lib/python3.10/site-packages/deepspeed/git_version_info.py +22 -0
  19. parrot/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py +6 -0
  20. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py +7 -0
  21. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py +5 -0
  22. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py +77 -0
  32. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py +15 -0
  33. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py +20 -0
  34. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py +20 -0
  35. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py +20 -0
  36. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py +69 -0
  37. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py +20 -0
  38. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py +20 -0
  39. parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py +199 -0
  40. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/__init__.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/config.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/csv_monitor.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/monitor.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/tensorboard.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/wandb.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/deepspeed/monitor/csv_monitor.py +67 -0
  47. parrot/lib/python3.10/site-packages/deepspeed/monitor/monitor.py +53 -0
  48. parrot/lib/python3.10/site-packages/deepspeed/monitor/utils.py +24 -0
  49. parrot/lib/python3.10/site-packages/deepspeed/ops/__init__.py +17 -0
  50. parrot/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -176,3 +176,6 @@ parrot/lib/python3.10/site-packages/pillow.libs/libopenjp2-05423b53.so filter=lf
176
  parrot/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
177
  parrot/lib/libtk8.6.so filter=lfs diff=lfs merge=lfs -text
178
  parrot/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
176
  parrot/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
177
  parrot/lib/libtk8.6.so filter=lfs diff=lfs merge=lfs -text
178
  parrot/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
179
+ parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
180
+ parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
181
+ parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/deepspeed/__init__.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+ import types
8
+ import json
9
+ from typing import Optional, Union
10
+ import torch
11
+ from torch.optim import Optimizer
12
+ from torch.optim.lr_scheduler import _LRScheduler
13
+ from packaging import version as pkg_version
14
+
15
+ # Skip Triton import for AMD due to pytorch-triton-rocm module breaking device API in DeepSpeed
16
+ if not (hasattr(torch.version, 'hip') and torch.version.hip is not None):
17
+ try:
18
+ import triton # noqa: F401 # type: ignore
19
+ HAS_TRITON = True
20
+ except ImportError:
21
+ HAS_TRITON = False
22
+ else:
23
+ HAS_TRITON = False
24
+
25
+ from . import ops
26
+ from . import module_inject
27
+
28
+ from .accelerator import get_accelerator
29
+ from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpeedSchedulerCallable
30
+ from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER
31
+ from .runtime.hybrid_engine import DeepSpeedHybridEngine
32
+ from .runtime.pipe.engine import PipelineEngine
33
+ from .inference.engine import InferenceEngine
34
+ from .inference.config import DeepSpeedInferenceConfig
35
+ from .runtime.lr_schedules import add_tuning_arguments
36
+ from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError
37
+ from .runtime.activation_checkpointing import checkpointing
38
+ from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
39
+ from .module_inject import replace_transformer_layer, revert_transformer_layer
40
+
41
+ from .utils import log_dist, OnDevice, logger
42
+ from .comm.comm import init_distributed
43
+
44
+ from .runtime import zero
45
+ from .runtime import DeepSpeedOptimizer, ZeROOptimizer
46
+ from .runtime.compiler import is_compile_supported
47
+
48
+ from .pipe import PipelineModule
49
+
50
+ from .git_version_info import version, git_hash, git_branch
51
+
52
+
53
+ def _parse_version(version_str):
54
+ '''Parse a version string and extract the major, minor, and patch versions.'''
55
+ ver = pkg_version.parse(version_str)
56
+ return ver.major, ver.minor, ver.micro
57
+
58
+
59
+ # Export version information
60
+ __version__ = version
61
+ __version_major__, __version_minor__, __version_patch__ = _parse_version(__version__)
62
+ __git_hash__ = git_hash
63
+ __git_branch__ = git_branch
64
+
65
+ # Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init
66
+ dist = None
67
+
68
+
69
+ def initialize(args=None,
70
+ model: torch.nn.Module = None,
71
+ optimizer: Optional[Union[Optimizer, DeepSpeedOptimizerCallable]] = None,
72
+ model_parameters: Optional[torch.nn.Module] = None,
73
+ training_data: Optional[torch.utils.data.Dataset] = None,
74
+ lr_scheduler: Optional[Union[_LRScheduler, DeepSpeedSchedulerCallable]] = None,
75
+ mpu=None,
76
+ dist_init_required: Optional[bool] = None,
77
+ collate_fn=None,
78
+ config=None,
79
+ config_params=None):
80
+ """Initialize the DeepSpeed Engine.
81
+
82
+ Arguments:
83
+ args: an object containing local_rank and deepspeed_config fields.
84
+ This is optional if `config` is passed.
85
+
86
+ model: Required: nn.module class before apply any wrappers
87
+
88
+ optimizer: Optional: a user defined Optimizer or Callable that returns an Optimizer object.
89
+ This overrides any optimizer definition in the DeepSpeed json config.
90
+
91
+ model_parameters: Optional: An iterable of torch.Tensors or dicts.
92
+ Specifies what Tensors should be optimized.
93
+
94
+ training_data: Optional: Dataset of type torch.utils.data.Dataset
95
+
96
+ lr_scheduler: Optional: Learning Rate Scheduler Object or a Callable that takes an Optimizer and returns a Scheduler object.
97
+ The scheduler object should define a get_lr(), step(), state_dict(), and load_state_dict() methods
98
+
99
+ mpu: Optional: A model parallelism unit object that implements
100
+ get_{model,data}_parallel_{rank,group,world_size}()
101
+
102
+ dist_init_required: Optional: None will auto-initialize torch distributed if needed,
103
+ otherwise the user can force it to be initialized or not via boolean.
104
+
105
+ collate_fn: Optional: Merges a list of samples to form a
106
+ mini-batch of Tensor(s). Used when using batched loading from a
107
+ map-style dataset.
108
+
109
+ config: Optional: Instead of requiring args.deepspeed_config you can pass your deepspeed config
110
+ as an argument instead, as a path or a dictionary.
111
+
112
+ config_params: Optional: Same as `config`, kept for backwards compatibility.
113
+
114
+ Returns:
115
+ A tuple of ``engine``, ``optimizer``, ``training_dataloader``, ``lr_scheduler``
116
+
117
+ * ``engine``: DeepSpeed runtime engine which wraps the client model for distributed training.
118
+
119
+ * ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if
120
+ optimizer is specified in json config else ``None``.
121
+
122
+ * ``training_dataloader``: DeepSpeed dataloader if ``training_data`` was supplied,
123
+ otherwise ``None``.
124
+
125
+ * ``lr_scheduler``: Wrapped lr scheduler if user ``lr_scheduler`` is passed, or
126
+ if ``lr_scheduler`` specified in JSON configuration. Otherwise ``None``.
127
+ """
128
+ log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
129
+ __git_branch__),
130
+ ranks=[0])
131
+
132
+ # Disable zero.Init context if it's currently enabled
133
+ zero.partition_parameters.shutdown_init_context()
134
+
135
+ assert model is not None, "deepspeed.initialize requires a model"
136
+
137
+ global dist
138
+ from deepspeed import comm as dist
139
+ dist_backend = get_accelerator().communication_backend_name()
140
+ dist.init_distributed(dist_backend=dist_backend, dist_init_required=dist_init_required)
141
+
142
+ # Set config using config_params for backwards compat
143
+ if config is None and config_params is not None:
144
+ config = config_params
145
+
146
+ # Check for deepscale_config for backwards compat
147
+ if hasattr(args, "deepscale_config") and args.deepscale_config is not None:
148
+ logger.warning("************ --deepscale_config is deprecated, please use --deepspeed_config ************")
149
+ if hasattr(args, "deepspeed_config"):
150
+ assert (args.deepspeed_config is
151
+ None), "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
152
+ args.deepspeed_config = args.deepscale_config
153
+ args.deepscale_config = None
154
+
155
+ # Check that we have only one config passed
156
+ if hasattr(args, "deepspeed_config") and args.deepspeed_config is not None:
157
+ assert config is None, "Not sure how to proceed, we were given deepspeed configs in the deepspeed arguments and deepspeed.initialize() function call"
158
+ config = args.deepspeed_config
159
+ assert config is not None, "DeepSpeed requires --deepspeed_config to specify configuration file"
160
+
161
+ if not isinstance(model, PipelineModule):
162
+ config_class = DeepSpeedConfig(config, mpu)
163
+ if config_class.hybrid_engine.enabled:
164
+ engine = DeepSpeedHybridEngine(args=args,
165
+ model=model,
166
+ optimizer=optimizer,
167
+ model_parameters=model_parameters,
168
+ training_data=training_data,
169
+ lr_scheduler=lr_scheduler,
170
+ mpu=mpu,
171
+ dist_init_required=dist_init_required,
172
+ collate_fn=collate_fn,
173
+ config=config,
174
+ config_class=config_class)
175
+ else:
176
+ engine = DeepSpeedEngine(args=args,
177
+ model=model,
178
+ optimizer=optimizer,
179
+ model_parameters=model_parameters,
180
+ training_data=training_data,
181
+ lr_scheduler=lr_scheduler,
182
+ mpu=mpu,
183
+ dist_init_required=dist_init_required,
184
+ collate_fn=collate_fn,
185
+ config=config,
186
+ config_class=config_class)
187
+ else:
188
+ assert mpu is None, "mpu must be None with pipeline parallelism"
189
+ mpu = model.mpu()
190
+ config_class = DeepSpeedConfig(config, mpu)
191
+ engine = PipelineEngine(args=args,
192
+ model=model,
193
+ optimizer=optimizer,
194
+ model_parameters=model_parameters,
195
+ training_data=training_data,
196
+ lr_scheduler=lr_scheduler,
197
+ mpu=mpu,
198
+ dist_init_required=dist_init_required,
199
+ collate_fn=collate_fn,
200
+ config=config,
201
+ config_class=config_class)
202
+
203
+ # Restore zero.Init context if necessary
204
+ zero.partition_parameters.restore_init_context()
205
+
206
+ return_items = [engine, engine.optimizer, engine.training_dataloader, engine.lr_scheduler]
207
+ return tuple(return_items)
208
+
209
+
210
+ def _add_core_arguments(parser):
211
+ r"""Helper (internal) function to update an argument parser with an argument group of the core DeepSpeed arguments.
212
+ The core set of DeepSpeed arguments include the following:
213
+ 1) --deepspeed: boolean flag to enable DeepSpeed
214
+ 2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
215
+
216
+ This is a helper function to the public add_config_arguments()
217
+
218
+ Arguments:
219
+ parser: argument parser
220
+ Return:
221
+ parser: Updated Parser
222
+ """
223
+ group = parser.add_argument_group('DeepSpeed', 'DeepSpeed configurations')
224
+
225
+ group.add_argument('--deepspeed',
226
+ default=False,
227
+ action='store_true',
228
+ help='Enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
229
+
230
+ group.add_argument('--deepspeed_config', default=None, type=str, help='DeepSpeed json configuration file.')
231
+
232
+ group.add_argument('--deepscale',
233
+ default=False,
234
+ action='store_true',
235
+ help='Deprecated enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
236
+
237
+ group.add_argument('--deepscale_config',
238
+ default=None,
239
+ type=str,
240
+ help='Deprecated DeepSpeed json configuration file.')
241
+
242
+ return parser
243
+
244
+
245
+ def add_config_arguments(parser):
246
+ r"""Update the argument parser to enabling parsing of DeepSpeed command line arguments.
247
+ The set of DeepSpeed arguments include the following:
248
+ 1) --deepspeed: boolean flag to enable DeepSpeed
249
+ 2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
250
+
251
+ Arguments:
252
+ parser: argument parser
253
+ Return:
254
+ parser: Updated Parser
255
+ """
256
+ parser = _add_core_arguments(parser)
257
+
258
+ return parser
259
+
260
+
261
+ def default_inference_config():
262
+ """
263
+ Return a default DeepSpeed inference configuration dictionary.
264
+ """
265
+ return DeepSpeedInferenceConfig().dict()
266
+
267
+
268
+ def init_inference(model, config=None, **kwargs):
269
+ """Initialize the DeepSpeed InferenceEngine.
270
+
271
+ Description: all four cases are valid and supported in DS init_inference() API.
272
+
273
+ # Case 1: user provides no config and no kwargs. Default config will be used.
274
+
275
+ .. code-block:: python
276
+
277
+ generator.model = deepspeed.init_inference(generator.model)
278
+ string = generator("DeepSpeed is")
279
+ print(string)
280
+
281
+ # Case 2: user provides a config and no kwargs. User supplied config will be used.
282
+
283
+ .. code-block:: python
284
+
285
+ generator.model = deepspeed.init_inference(generator.model, config=config)
286
+ string = generator("DeepSpeed is")
287
+ print(string)
288
+
289
+ # Case 3: user provides no config and uses keyword arguments (kwargs) only.
290
+
291
+ .. code-block:: python
292
+
293
+ generator.model = deepspeed.init_inference(generator.model,
294
+ tensor_parallel={"tp_size": world_size},
295
+ dtype=torch.half,
296
+ replace_with_kernel_inject=True)
297
+ string = generator("DeepSpeed is")
298
+ print(string)
299
+
300
+ # Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence.
301
+
302
+ .. code-block:: python
303
+
304
+ generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True)
305
+ string = generator("DeepSpeed is")
306
+ print(string)
307
+
308
+ Arguments:
309
+ model: Required: original nn.module object without any wrappers
310
+
311
+ config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file
312
+
313
+ Returns:
314
+ A deepspeed.InferenceEngine wrapped model.
315
+ """
316
+ log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
317
+ __git_branch__),
318
+ ranks=[0])
319
+
320
+ # Load config_dict from config first
321
+ if config is None:
322
+ config = {}
323
+ if isinstance(config, str):
324
+ with open(config, "r") as f:
325
+ config_dict = json.load(f)
326
+ elif isinstance(config, dict):
327
+ config_dict = config
328
+ else:
329
+ raise ValueError(f"'config' argument expected string or dictionary, got {type(config)}")
330
+
331
+ # Update with values from kwargs, ensuring no conflicting overlap between config and kwargs
332
+ overlap_keys = set(config_dict.keys()).intersection(kwargs.keys())
333
+ # If there is overlap, error out if values are different
334
+ for key in overlap_keys:
335
+ if config_dict[key] != kwargs[key]:
336
+ raise ValueError(f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}")
337
+ config_dict.update(kwargs)
338
+
339
+ ds_inference_config = DeepSpeedInferenceConfig(**config_dict)
340
+
341
+ engine = InferenceEngine(model, config=ds_inference_config)
342
+
343
+ return engine
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_meg_2d import reshape_meg_2d_parallel
7
+
8
+ from .deepspeed_checkpoint import DeepSpeedCheckpoint
9
+
10
+ from .utils import (get_layer_ckpt_name_for_rank, get_model_ckpt_name_for_rank, get_zero_ckpt_name_for_rank)
11
+
12
+ from .reshape_utils import (merge_state)
13
+
14
+ from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
15
+
16
+ from .zero_checkpoint import ZeROCheckpoint
17
+
18
+ from .universal_checkpoint import enable_universal_checkpoint
19
+
20
+ from .constants import *
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc ADDED
Binary file (6.31 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Various symbolic constants used for model checkpointing
7
+ """
8
+
9
+ #########################################
10
+ # Optimizer checkpoint keys
11
+ #########################################
12
+ OPTIMIZER_STATE_DICT = "optimizer_state_dict"
13
+ FP32_GROUPS = "fp32_groups"
14
+ FP32_FLAT_GROUPS = 'fp32_flat_groups'
15
+
16
+ BASE_OPTIMIZER_STATE = 'base_optimizer_state'
17
+ BASE_OPTIMIZER_STATE_STEP = 'base_optimizer_state_step'
18
+ SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups"
19
+ GROUP_PADDINGS = 'group_paddings'
20
+ PARTITION_COUNT = 'partition_count'
21
+ ZERO_STAGE = 'zero_stage'
22
+ CLIP_GRAD = 'clip_grad'
23
+ FP32_WEIGHT_KEY = "fp32"
24
+ LOSS_SCALER = 'loss_scaler'
25
+
26
+ #########################################
27
+ # Module checkpoint keys
28
+ #########################################
29
+ PARAM = 'param'
30
+ PARAM_SHAPES = 'param_shapes'
31
+ BUFFER_NAMES = 'buffer_names'
32
+ FROZEN_PARAM_SHAPES = 'frozen_param_shapes'
33
+ FROZEN_PARAM_FRAGMENTS = 'frozen_param_fragments'
34
+
35
+ #########################################
36
+ # Checkpoint naming constants
37
+ #########################################
38
+ MODEL_FILE_PREFIX = 'mp_rank_'
39
+ ZERO_FILE_PREFIX = 'zero_pp_rank_'
40
+ OPTIM_FILE_SUFFIX = '_optim_states.pt'
41
+ MODEL_FILE_SUFFIX = '_model_states.pt'
42
+ LAYER_FILE_PREFIX = 'layer_'
43
+ BF16_ZERO_FILE_PREFIX = 'bf16_' + ZERO_FILE_PREFIX
44
+ FP16_ZERO_FILE_PREFIX = 'fp16_' + ZERO_FILE_PREFIX
45
+
46
+ #########################################
47
+ # Checkpoint utility keys
48
+ #########################################
49
+ DS_VERSION = 'ds_version'
50
+
51
+ #########################################
52
+ # Universal Checkpoint keys
53
+ #########################################
54
+ UNIVERSAL_CHECKPOINT_INFO = 'universal_checkpoint_info'
55
+ UNIVERSAL_CHECKPOINT_VERSION_KEY = 'universal_checkpoint_version'
56
+ # Reserve version 0.1 for the hardcoded logic used in BLOOM-176B training
57
+ UNIVERSAL_CHECKPOINT_VERSION_VALUE = 0.2
58
+
59
+ # Vocabulary padding
60
+ VOCAB_TENSOR = 'vocab_tensor'
61
+ PADDED_VOCAB_SIZE = 'padded_vocab_size'
62
+ ORIGINAL_VOCAB_SIZE = 'original_vocab_size'
63
+
64
+ # Parameter splitting/merging
65
+ PARAM_SLICE_MAPPINGS = 'param_slice_mappings'
66
+ CAT_DIM = "cat_dim"
67
+ # Following is a special case where a parameter effectively contains sub parameters.
68
+ # As an example, consider Megatron-DeepSpeed GPT SWIGLU implementation (mlp.h_to_4h).
69
+ # In this case, a single parameter ia allocated contiguously, but used as separate parameters.
70
+ # When using universal checkpoint, we have to normalize the representation of the full parameter.
71
+ # We normalize it by concatenating all slices of the sub params and then concatenating the sub params.
72
+ # All concat operations are done on CAT_DIM (currently, no support for different concat dims sub params and TP slicing).
73
+ # Similarly, load_hp_checkpoint_state has to take the needed actions when loading from universal.
74
+ PARAM_N_SUB_PARAMS = "param_n_sub_params"
75
+
76
+ # Regex list of parameters that require special handling
77
+ VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns'
78
+ PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns'
79
+ PARAMETER_TO_AVERAGE_PATTERNS = 'parameter_to_average_patterns'
80
+ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns'
81
+ TP_REPLICATED_PARAMETER_PATTERNS = 'tp_replicated_parameter_patterns'
82
+ PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0 = 'parameter_with_2_sub_params_cat_dim_0'
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_utils import (get_files, get_files_with_prefix, partition_data, get_zero_files)
7
+
8
+ from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
9
+
10
+ from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
11
+
12
+ PP_DIM = 'PP'
13
+ TP_DIM = 'TP'
14
+ DP_DIM = 'DP'
15
+
16
+
17
+ class model_3d_desc(object):
18
+
19
+ def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1):
20
+ self.pp_degree = pp_degree
21
+ self.tp_degree = tp_degree
22
+ self.dp_degree = dp_degree
23
+
24
+ def reshape(self, target_3d_desc, verbose=False):
25
+ valid_reshape, reshape_errors = self.can_reshape(target_3d_desc)
26
+ assert valid_reshape, ','.join(reshape_errors)
27
+ tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree,
28
+ old_tp_degree=self.tp_degree,
29
+ new_pp_degree=target_3d_desc.pp_degree,
30
+ new_tp_degree=target_3d_desc.tp_degree,
31
+ verbose=verbose)
32
+
33
+ flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map,
34
+ src_2d_size=self.pp_degree * self.tp_degree,
35
+ dp_degree=self.dp_degree)
36
+
37
+ return unflatten_dp_dimension(meg_2d_map=flat_3d_map, dp_degree=target_3d_desc.dp_degree)
38
+
39
+ def get_desc(self):
40
+ return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})'
41
+
42
+ def world_size(self):
43
+ return self.pp_degree * self.tp_degree * self.dp_degree
44
+
45
+ def is_valid(self, pp_index, tp_index, dp_index):
46
+ err_msg = []
47
+ valid = True
48
+ for index, degree, dim_name in [(pp_index, self.pp_degree, PP_DIM), (tp_index, self.tp_degree, TP_DIM),
49
+ (dp_index, self.dp_degree, DP_DIM)]:
50
+ if index >= degree:
51
+ valid = False
52
+ err_msg.append(f'{dim_name} indexing error: index {index} >= degree {degree}')
53
+
54
+ return valid, err_msg
55
+
56
+ def can_reshape(self, target_3d_desc):
57
+ err_msg = []
58
+ if target_3d_desc.pp_degree > self.pp_degree:
59
+ err_msg.append(
60
+ f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}')
61
+
62
+ if target_3d_desc.tp_degree > self.tp_degree:
63
+ err_msg.append(
64
+ f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}')
65
+
66
+ if target_3d_desc.dp_degree > self.dp_degree:
67
+ err_msg.append(
68
+ f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}')
69
+
70
+ return len(err_msg) == 0, err_msg
71
+
72
+
73
+ def get_model_3d_descriptor(dir):
74
+ file_list = get_files(dir)
75
+ zero_file_list = get_zero_files(dir)
76
+ num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01'))
77
+ if num_pp0_files > 0:
78
+ tp_degree = num_pp0_files
79
+ pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree
80
+ dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree))
81
+ else:
82
+ tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX))
83
+ dp_degree = max(1, len(zero_file_list) // tp_degree)
84
+ pp_degree = 1
85
+
86
+ return model_3d_desc(pp_degree, tp_degree, dp_degree)
87
+
88
+
89
+ def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree):
90
+ new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree)
91
+ for pp_index in range(meg_2d_map.pp_degree):
92
+ for tp_index in range(meg_2d_map.tp_degree):
93
+ dp0_indices = meg_2d_map.get_data(pp_index, tp_index)
94
+ for idx in dp0_indices:
95
+ dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)]
96
+ new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices)
97
+ return new_meg_2d_map
98
+
99
+
100
+ def unflatten_dp_dimension(meg_2d_map, dp_degree):
101
+ pp_degree = meg_2d_map.pp_degree
102
+ tp_degree = meg_2d_map.tp_degree
103
+ meg_2d_map_list = [meg_2d_parallel_map(pp_degree=pp_degree, tp_degree=tp_degree) for _ in range(dp_degree)]
104
+ for pp_index in range(pp_degree):
105
+ for tp_index in range(tp_degree):
106
+ flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index)
107
+ partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree)
108
+ for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list):
109
+ _2d_map.add_data(pp_index, tp_index, dp_indices)
110
+
111
+ return meg_2d_map_list
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_utils import partition_data
7
+
8
+
9
+ class meg_2d_parallel_map(object):
10
+
11
+ def __init__(self, pp_degree, tp_degree):
12
+ self.pp_degree = pp_degree
13
+ self.tp_degree = tp_degree
14
+ self.map = {}
15
+
16
+ def simple_init(self):
17
+ self.map = {
18
+ self._make_key(i // self.tp_degree, i % self.tp_degree): [i]
19
+ for i in range(self.pp_degree * self.tp_degree)
20
+ }
21
+
22
+ def add_data(self, pp_index, tp_index, data):
23
+ self._validate_indices(pp_index, tp_index)
24
+ assert type(data) is list
25
+
26
+ key = self._make_key(pp_index, tp_index)
27
+ if not key in self.map.keys():
28
+ self.map[key] = []
29
+ self.map[key] += data
30
+
31
+ def get_data(self, pp_index=None, tp_index=None):
32
+ self._validate_indices(pp_index, tp_index)
33
+ pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
34
+ tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
35
+
36
+ result = []
37
+ for i in pp_indices:
38
+ for j in tp_indices:
39
+ result += self.map[self._make_key(i, j)]
40
+
41
+ return result
42
+
43
+ def print_data(self, tag):
44
+ print(f'{tag}')
45
+ for key, value in self.map.items():
46
+ print(f'{key} = {value}')
47
+
48
+ def _validate_indices(self, pp_index, tp_index):
49
+ assert pp_index is None or pp_index < self.pp_degree
50
+ assert tp_index is None or tp_index < self.tp_degree
51
+
52
+ def _make_key(self, i, j):
53
+ return f'{i},{j}'
54
+
55
+
56
+ def _reshape_tp_dimension(old_2d_map, new_tp_degree):
57
+ old_pp_degree = old_2d_map.pp_degree
58
+ new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree)
59
+ for i in range(old_pp_degree):
60
+ ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None)
61
+ split_ranks = partition_data(ranks_for_pp_index, new_tp_degree)
62
+ for j in range(new_tp_degree):
63
+ new_2d_map.add_data(i, j, split_ranks[j])
64
+
65
+ return new_2d_map
66
+
67
+
68
+ def _reshape_pp_dimension(old_2d_map, new_pp_degree):
69
+ old_tp_degree = old_2d_map.tp_degree
70
+ new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree)
71
+ for i in range(old_tp_degree):
72
+ ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i)
73
+ split_ranks = partition_data(ranks_for_tp_index, new_pp_degree)
74
+ for j in range(new_pp_degree):
75
+ new_2d_map.add_data(j, i, split_ranks[j])
76
+
77
+ return new_2d_map
78
+
79
+
80
+ def reshape_meg_2d_parallel(old_pp_degree, old_tp_degree, new_pp_degree, new_tp_degree, verbose=False):
81
+ assert new_pp_degree <= old_pp_degree
82
+ assert new_tp_degree <= old_tp_degree
83
+
84
+ old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree)
85
+ old_2d_map.simple_init()
86
+ if verbose:
87
+ old_2d_map.print_data(f'original_2d_map:')
88
+
89
+ if old_tp_degree != new_tp_degree:
90
+ new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree)
91
+ else:
92
+ new_tp_map = old_2d_map
93
+ if verbose:
94
+ new_tp_map.print_data(f'after_tp_reshape:')
95
+
96
+ if old_pp_degree != new_pp_degree:
97
+ final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree)
98
+ else:
99
+ final_map = new_tp_map
100
+
101
+ if verbose:
102
+ final_map.print_data(f'final_2d_map:')
103
+
104
+ return final_map
105
+
106
+
107
+ def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None):
108
+ """
109
+ Initialize model data parallel groups.
110
+
111
+ Arguments:
112
+ tp_size: number of GPUs used to parallelize model tensor.
113
+ pp_size: number of GPUs used to parallelize model pipeline.
114
+ dp_size: number of GPUs used to parallelize model data.
115
+
116
+ Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
117
+ use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
118
+ the model pipeline. The present function will
119
+ create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
120
+ and 8 data-parallel groups as:
121
+ 8 data_parallel groups:
122
+ [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
123
+ 8 tensor model-parallel groups:
124
+ [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
125
+ 4 pipeline model-parallel groups:
126
+ [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
127
+ Note that for efficiency, the caller should make sure adjacent ranks
128
+ are on the same DGX box. For example if we are using 2 DGX-1 boxes
129
+ with a total of 16 GPUs, rank 0 to 7 belong to the first box and
130
+ ranks 8 to 15 belong to the second box.
131
+ """
132
+
133
+ world_size = tp_size * pp_size * dp_size
134
+
135
+ print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}")
136
+
137
+ tensor_model_parallel_size = min(tp_size, world_size)
138
+ pipeline_model_parallel_size = min(pp_size, world_size)
139
+ data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size)
140
+
141
+ num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
142
+ num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
143
+ num_data_parallel_groups = world_size // data_parallel_size
144
+
145
+ # Build the data-parallel groups.
146
+ all_dp_group_ranks = []
147
+ for i in range(pipeline_model_parallel_size):
148
+ start_rank = i * num_pipeline_model_parallel_groups
149
+ end_rank = (i + 1) * num_pipeline_model_parallel_groups
150
+ for j in range(tensor_model_parallel_size):
151
+ ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
152
+ all_dp_group_ranks.append(list(ranks))
153
+
154
+ print("DP", all_dp_group_ranks)
155
+
156
+ # Build the model-parallel groups.
157
+ all_pp_group_ranks = []
158
+ for i in range(data_parallel_size):
159
+ ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_dp_group_ranks]
160
+ all_pp_group_ranks.append(list(ranks))
161
+
162
+ print(f"PP", all_pp_group_ranks)
163
+
164
+ # Build the tensor model-parallel groups.
165
+ all_tp_group_ranks = []
166
+ for i in range(num_tensor_model_parallel_groups):
167
+ ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
168
+ all_tp_group_ranks.append(list(ranks))
169
+
170
+ print(f"TP", all_tp_group_ranks)
171
+
172
+ return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks
173
+
174
+ # # Build the pipeline model-parallel groups and embedding groups
175
+ # # (first and last rank in each pipeline model-parallel group).
176
+ # for i in range(num_pipeline_model_parallel_groups):
177
+ # ranks = range(i, world_size,
178
+ # num_pipeline_model_parallel_groups)
179
+ # print(f"EMB{i}", list(ranks))
180
+
181
+
182
+ def reshape(src, tgt):
183
+ """
184
+ reshape([tp_size_src, pp_size_src, dp_size_src],
185
+ [tp_size_tgt, pp_size_tgt, dp_size_tgt])
186
+ """
187
+
188
+ print(f"\n\n*** Reshaping: {src} => {tgt}")
189
+
190
+ tp_size_src, pp_size_src, dp_size_src = src
191
+ tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt
192
+
193
+ tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src)
194
+ tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src)
195
+ tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src)
196
+
197
+ # handle tp contraction first
198
+ print("\n*** TP contraction:")
199
+
200
+ for i, r in enumerate(tp_ranks1):
201
+ print(f'{tp_ranks1[i]} => {tp_ranks2[i]}')
202
+
203
+ # handle pp contraction next
204
+
205
+ print("\n*** PP contraction:")
206
+
207
+ for i, r in enumerate(pp_ranks1):
208
+ print(f'{pp_ranks2[i]} => {pp_ranks3[i]}')
209
+
210
+
211
+ # easy
212
+ #reshape([2,2,1],[1,1,1])
213
+
214
+ # probably need more logic to suggest how to pack
215
+ #reshape([4,4,1],[2,2,1])
216
+
217
+ #reshape([2,4,2], [8,32,1])
218
+
219
+ # get_mpu_ranks(2,2,2)
220
+ # get_mpu_ranks(4,2,1)
221
+ # get_mpu_ranks(2,4,1)
222
+ # get_mpu_ranks(1,1,8)
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ from collections import OrderedDict
9
+ from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)
10
+
11
+
12
+ def basic_folder_validation(dir):
13
+ assert os.path.exists(dir), f'{dir} path does not exist'
14
+ assert os.path.isdir(dir), f'{dir} is not a folder'
15
+
16
+
17
+ def get_files_with_prefix(all_files, prefix):
18
+ file_list = []
19
+ for file_path in all_files:
20
+ _, fname = os.path.split(file_path)
21
+ if fname.startswith(prefix):
22
+ file_list.append(file_path)
23
+
24
+ return sorted(file_list)
25
+
26
+
27
+ def validate_files(file_list):
28
+ for file in file_list:
29
+ if not os.path.isfile(file):
30
+ print(f'Error: {file} is not existent')
31
+
32
+
33
+ def get_files(dir):
34
+ file_list = []
35
+ for root, _, files in os.walk(dir):
36
+ for file in files:
37
+ file_list.append(os.path.join(root, file))
38
+ return file_list
39
+
40
+
41
+ def get_zero_files(dir):
42
+ file_list = get_files(dir)
43
+ for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:
44
+ zero_files = get_files_with_prefix(file_list, prefix)
45
+ if len(zero_files) > 0:
46
+ return zero_files
47
+
48
+ return []
49
+
50
+
51
+ def partition_data(data_list, num_partitions):
52
+ num_elems = len(data_list)
53
+ assert num_elems % num_partitions == 0
54
+ partition_size = num_elems // num_partitions
55
+ partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]
56
+ return partitions_list
57
+
58
+
59
+ def _key_list_to_string(key_list):
60
+ return '.'.join(key_list)
61
+
62
+
63
+ def merge_state_dict(dict_a, dict_b, key_list):
64
+ merged_dict = type(dict_a)({})
65
+
66
+ for key, value in dict_b.items():
67
+ if key in dict_a.keys():
68
+ merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])
69
+ else:
70
+ merged_dict[key] = value
71
+
72
+ return merged_dict
73
+
74
+
75
+ def merge_state_list(list_a, list_b, key_list):
76
+ if len(list_a) != len(list_b):
77
+ print(f'{_key_list_to_string(key_list)}')
78
+ raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')
79
+
80
+ return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]
81
+
82
+
83
+ def merge_state(state_a, state_b, key_list=[]):
84
+ if type(state_a) != type(state_b):
85
+ key_list_string = _key_list_to_string(key_list)
86
+ print(f'key_list = {key_list_string}')
87
+ raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')
88
+
89
+ if type(state_a) in (dict, OrderedDict):
90
+ return merge_state_dict(state_a, state_b, key_list)
91
+ elif type(state_a) in (list, tuple):
92
+ return type(state_a)(merge_state_list(state_a, state_b, key_list))
93
+ elif torch.is_tensor(state_a):
94
+ return torch.cat([state_a, state_b], 0)
95
+ else:
96
+ return state_a
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ import types
9
+ from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS)
10
+
11
+
12
+ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
13
+ hp_mapping = self._hp_mapping
14
+ optim_state_keys = hp_mapping.get_optim_state_keys()
15
+ hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys
16
+ #print(f'{hp_keys=}')
17
+ checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys}
18
+ for file in checkpoint_files.values():
19
+ assert os.path.isfile(file), f'{file} is not a valid file'
20
+
21
+ for key in hp_keys:
22
+ ckpt_file = checkpoint_files[key]
23
+ ckpt_dict = torch.load(ckpt_file)
24
+ full_hp_param = ckpt_dict[PARAM]
25
+
26
+ # need to deal with slices that were averaged.
27
+ # the opposite of averaging here becomes an exact copy of the first slice
28
+ # I thought of 2 ways:
29
+ # implementation a. find a way for a client to pass a dict with patterns
30
+ # if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
31
+ # tp_rank = 0
32
+ # tp_world_size = 1
33
+ # the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
34
+ # self.shape that means we automatically copy?
35
+ # implementation b.
36
+ # this version requires no additional data passed from the client
37
+ # if the shapes already match it must be slices that were averaged - so we just hack around those
38
+ if full_hp_param.shape == self.shape:
39
+ tp_rank = 0
40
+ tp_world_size = 1
41
+
42
+ # special case for word_embeddings weights which get padded differently depending on TP degree.
43
+ # the converter to universal currently strips the original padding completely so the saved
44
+ # weight is padding-free and we just need to add new padding depending on the target TP
45
+ # degree
46
+ is_vocab_tensor = ckpt_dict.get(VOCAB_TENSOR, False)
47
+ if is_vocab_tensor:
48
+ # In the absence of data passed from the user wrt new padded vocab specific to tp degree
49
+ # we can again derive that data by reverse engineering the target shapes like so:
50
+ padded_target_vocab_size = self.shape[0] * tp_world_size
51
+ assert padded_target_vocab_size >= full_hp_param.shape[0], \
52
+ f'Vocab tensor padded size {padded_target_vocab_size} < loaded universal size {full_hp_param.shape[0]}'
53
+ if padded_target_vocab_size > full_hp_param.shape[0]:
54
+ padding_size = padded_target_vocab_size - full_hp_param.shape[0]
55
+ full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
56
+
57
+ full_param_numel = full_hp_param.numel()
58
+ tp_slice_numel = self.numel()
59
+ # if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
60
+ # print_rank_0(f'{full_hp_param[:10]=}', force=True)
61
+
62
+
63
+ assert full_param_numel == tp_world_size * tp_slice_numel, \
64
+ f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
65
+ dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key)
66
+
67
+ # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
68
+ # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
69
+
70
+ # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
71
+ # special case is when a single parameter is effectively a container for multiple sub parameters
72
+ # (more details at PARAM_N_SUB_PARAMS definition)
73
+ chunk_dim = ckpt_dict.get(CAT_DIM, 0)
74
+ n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1)
75
+ if n_sub_params > 1:
76
+ sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim)
77
+ sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params]
78
+ tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim)
79
+ else:
80
+ # this performs the opposite of cat when merging TP slices
81
+ tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
82
+
83
+ tp_hp_slice = tp_hp_slice.flatten()
84
+
85
+ lp_frag_address = hp_mapping.lp_fragment_address
86
+ tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
87
+ assert dst_tensor.numel() == lp_frag_address.numel, \
88
+ f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
89
+
90
+ # print(f"{key} SHAPE: {tp_hp_slice.shape=}")
91
+ # print(f"{key} SHAPE: {dst_tensor.shape=}")
92
+ # print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
93
+ dst_tensor.data.copy_(tp_hp_fragment.data)
94
+
95
+
96
+ def enable_universal_checkpoint(param_list):
97
+ for param in param_list:
98
+ param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param)
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
9
+
10
+
11
+ def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
12
+ ckpt_name = os.path.join(
13
+ base_folder,
14
+ MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
15
+ )
16
+ return ckpt_name
17
+
18
+
19
+ def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
20
+ zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
21
+ mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
22
+ zero_ckpt_name = os.path.join(
23
+ base_folder,
24
+ zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
25
+ )
26
+ return zero_ckpt_name
27
+
28
+
29
+ def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
30
+ ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
31
+ ckpt_path = os.path.join(base_folder, ckpt_file)
32
+ return ckpt_path
33
+
34
+
35
+ # We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()
36
+ # saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
37
+ # This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
38
+ # Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
39
+ # It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
40
+ # See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
41
+ def clone_tensors_for_torch_save(item, device=torch.device('cpu')):
42
+ """
43
+ Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.
44
+ Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
45
+
46
+ Parameters:
47
+ - ``item``: tensor to clone or (possibly nested) container of tensors to clone.
48
+ - ``device``: target device (defaults to 'cpu')
49
+
50
+ Returns:
51
+ - copy of ``item`` with cloned tensors on target device
52
+ """
53
+ if torch.is_tensor(item):
54
+ return item.detach().clone().to(device)
55
+ elif isinstance(item, list):
56
+ return [clone_tensors_for_torch_save(v, device) for v in item]
57
+ elif isinstance(item, tuple):
58
+ return tuple([clone_tensors_for_torch_save(v, device) for v in item])
59
+ elif isinstance(item, dict):
60
+ return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})
61
+ else:
62
+ return item
parrot/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT)
9
+
10
+ from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state)
11
+
12
+ from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
13
+
14
+ GROUP_STATE_KEY = 'state'
15
+
16
+
17
+ class ZeROCheckpoint(object):
18
+
19
+ def __init__(self, dir):
20
+ basic_folder_validation(dir)
21
+ self.dir = dir
22
+ self.file_list = get_zero_files(dir)
23
+ self.num_files = len(self.file_list)
24
+ assert self.num_files > 0, f'No ZeRO files found in {dir}'
25
+
26
+ self.src_3d = get_model_3d_descriptor(dir)
27
+ self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree,
28
+ tp_degree=self.src_3d.tp_degree,
29
+ dp_degree=self.src_3d.dp_degree)
30
+ self._3d_file_map = self.src_3d.reshape(self.target_3d)
31
+
32
+ def get_src_world_size(self):
33
+ return self.src_3d.world_size()
34
+
35
+ def get_src_tp_degree(self):
36
+ return self.src_3d.tp_degree
37
+
38
+ def get_src_pp_degree(self):
39
+ return self.src_3d.pp_degree
40
+
41
+ def get_src_dp_degree(self):
42
+ return self.src_3d.dp_degree
43
+
44
+ def get_file_indices_for_rank(self, pp_index, tp_index, dp_index):
45
+ assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}'
46
+ dp_2d_map = self._3d_file_map[dp_index]
47
+ return dp_2d_map.get_data(pp_index, tp_index)
48
+
49
+ def get_files_for_rank(self, pp_index, tp_index, dp_index):
50
+ file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index)
51
+ return [self.file_list[idx] for idx in file_idx_list]
52
+
53
+ def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True):
54
+ state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index)
55
+ merged_sd = None
56
+ for state_file in state_file_list:
57
+ sd = torch.load(state_file, map_location=torch.device('cpu'))
58
+ for key in keys_to_ignore:
59
+ sd.pop(key, None)
60
+
61
+ if strip_tensor_paddings:
62
+ self._strip_tensor_paddings(sd)
63
+
64
+ if merged_sd is None:
65
+ merged_sd = sd
66
+ else:
67
+ merged_sd = merge_state(merged_sd, sd)
68
+
69
+ self._update_partition_count(merged_sd)
70
+ if strip_tensor_paddings:
71
+ self._clear_group_paddings(merged_sd)
72
+
73
+ return merged_sd
74
+
75
+ def print_3d_index_map(self, tag=None):
76
+ if tag:
77
+ print(f'3D index map: {tag}')
78
+ for dp_index, _2d_map in enumerate(self._3d_file_map):
79
+ _2d_map.print_data(f'dp = {dp_index}')
80
+
81
+ def print_3d_file_map(self, tag=None):
82
+ if tag:
83
+ print(f'3D file map: {tag}')
84
+ for dp_index, _2d_map in enumerate(self._3d_file_map):
85
+ for pp_index in _2d_map.pp_degree:
86
+ for tp_index in _2d_map.tp_degree:
87
+ file_index_list = _2d_map.get_data(pp_index, tp_index)
88
+ file_list = [self.file_list[idx] for idx in file_index_list]
89
+ print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}')
90
+
91
+ def reshape(self, target_3d_desc: model_3d_desc):
92
+ self.target_3d = target_3d_desc
93
+ self._3d_file_map = self.src_3d.reshape(self.target_3d)
94
+
95
+ def _strip_tensor_paddings(self, sd):
96
+ param_group_states = self._get_param_group_states(sd)
97
+ if param_group_states is None:
98
+ return
99
+
100
+ group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
101
+ if group_paddings is None:
102
+ return
103
+
104
+ for key, group_state in param_group_states.items():
105
+ if group_paddings[key] == 0:
106
+ continue
107
+ for state_name, state_value in group_state.items():
108
+ if torch.is_tensor(state_value):
109
+ raw_length = state_value.numel() - group_paddings[key]
110
+ group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone()
111
+
112
+ def _clear_group_paddings(self, sd):
113
+ group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
114
+ if group_paddings:
115
+ num_groups = len(group_paddings)
116
+ sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups
117
+
118
+ def _get_optimizer_state(self, sd, state_key):
119
+ optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
120
+ if optimizer_state is None:
121
+ return None
122
+
123
+ return optimizer_state.get(state_key, None)
124
+
125
+ def _get_param_group_states(self, sd):
126
+ optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
127
+ if optimizer_state is None:
128
+ return None
129
+
130
+ base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None)
131
+ if base_optimizer_state is None:
132
+ return None
133
+
134
+ return base_optimizer_state.get(GROUP_STATE_KEY, None)
135
+
136
+ def _update_partition_count(self, sd):
137
+ partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT)
138
+ if partition_counts:
139
+ num_groups = len(partition_counts)
140
+ sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups
parrot/lib/python3.10/site-packages/deepspeed/constants.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from datetime import timedelta
8
+
9
+ #############################################
10
+ # Torch distributed constants
11
+ #############################################
12
+ TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
13
+
14
+ # Default process group wide timeout, if applicable.
15
+ # This only applies to the gloo and nccl backends
16
+ # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
17
+ # To make an attempt at backwards compatibility with THD, we use an
18
+ # extraordinarily high default timeout, given that THD did not have timeouts.
19
+ default_pg_timeout = timedelta(minutes=int(os.getenv("DEEPSPEED_TIMEOUT", default=30)))
20
+ INFERENCE_GENERIC_MODE = 'generic'
21
+ INFERENCE_SPECIALIZED_MODE = 'specialized'
parrot/lib/python3.10/site-packages/deepspeed/env_report.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ import deepspeed
9
+ import subprocess
10
+ import argparse
11
+ from .ops.op_builder.all_ops import ALL_OPS
12
+ from .git_version_info import installed_ops, torch_info
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+ GREEN = '\033[92m'
16
+ RED = '\033[91m'
17
+ YELLOW = '\033[93m'
18
+ END = '\033[0m'
19
+ SUCCESS = f"{GREEN} [SUCCESS] {END}"
20
+ OKAY = f"{GREEN}[OKAY]{END}"
21
+ WARNING = f"{YELLOW}[WARNING]{END}"
22
+ FAIL = f'{RED}[FAIL]{END}'
23
+ INFO = '[INFO]'
24
+
25
+ color_len = len(GREEN) + len(END)
26
+ okay = f"{GREEN}[OKAY]{END}"
27
+ warning = f"{YELLOW}[WARNING]{END}"
28
+
29
+
30
+ def op_report(verbose=True):
31
+ max_dots = 23
32
+ max_dots2 = 11
33
+ h = ["op name", "installed", "compatible"]
34
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
35
+ print("DeepSpeed C++/CUDA extension op report")
36
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
37
+
38
+ print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n"
39
+ " runtime if needed. Op compatibility means that your system\n"
40
+ " meet the required dependencies to JIT install the op.")
41
+
42
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
43
+ print("JIT compiled ops requires ninja")
44
+ ninja_status = OKAY if ninja_installed() else FAIL
45
+ print('ninja', "." * (max_dots - 5), ninja_status)
46
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
47
+ print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2])
48
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
49
+ installed = f"{GREEN}[YES]{END}"
50
+ no = f"{YELLOW}[NO]{END}"
51
+ for op_name, builder in ALL_OPS.items():
52
+ dots = "." * (max_dots - len(op_name))
53
+ is_compatible = OKAY if builder.is_compatible(verbose) else no
54
+ is_installed = installed if installed_ops.get(op_name, False) else no
55
+ dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) - (len(is_installed) - color_len))
56
+ print(op_name, dots, is_installed, dots2, is_compatible)
57
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
58
+
59
+
60
+ def ninja_installed():
61
+ try:
62
+ import ninja # noqa: F401 # type: ignore
63
+ except ImportError:
64
+ return False
65
+ return True
66
+
67
+
68
+ def nvcc_version():
69
+ import torch.utils.cpp_extension
70
+ cuda_home = torch.utils.cpp_extension.CUDA_HOME
71
+ if cuda_home is None:
72
+ return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}"
73
+ try:
74
+ output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
75
+ except FileNotFoundError:
76
+ return f"{RED} [FAIL] nvcc missing {END}"
77
+ output_split = output.split()
78
+ release_idx = output_split.index("release")
79
+ release = output_split[release_idx + 1].replace(',', '').split(".")
80
+ return ".".join(release)
81
+
82
+
83
+ def installed_cann_path():
84
+ if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]):
85
+ return os.environ["ASCEND_HOME_PATH"]
86
+ return None
87
+
88
+
89
+ def installed_cann_version():
90
+ import re
91
+ ascend_path = installed_cann_path()
92
+ if ascend_path is None:
93
+ return f"CANN_HOME does not exist, unable to compile NPU op(s)"
94
+ cann_version = ""
95
+ for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)):
96
+ if cann_version:
97
+ break
98
+ install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)]
99
+ if install_files:
100
+ filepath = os.path.join(dirpath, install_files[0])
101
+ with open(filepath, "r") as f:
102
+ for line in f:
103
+ if line.find("version") != -1:
104
+ cann_version = line.strip().split("=")[-1]
105
+ break
106
+ return cann_version
107
+
108
+
109
+ def get_shm_size():
110
+ try:
111
+ shm_stats = os.statvfs('/dev/shm')
112
+ except (OSError, FileNotFoundError, ValueError):
113
+ return "UNKNOWN", None
114
+
115
+ shm_size = shm_stats.f_frsize * shm_stats.f_blocks
116
+ shm_hbytes = human_readable_size(shm_size)
117
+ warn = []
118
+ if shm_size < 512 * 1024**2:
119
+ warn.append(
120
+ f" {YELLOW} [WARNING] /dev/shm size might be too small, if running in docker increase to at least --shm-size='1gb' {END}"
121
+ )
122
+ if get_accelerator().communication_backend_name() == "nccl":
123
+ warn.append(
124
+ f" {YELLOW} [WARNING] see more details about NCCL requirements: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#sharing-data {END}"
125
+ )
126
+ return shm_hbytes, warn
127
+
128
+
129
+ def human_readable_size(size):
130
+ units = ['B', 'KB', 'MB', 'GB', 'TB']
131
+ i = 0
132
+ while size >= 1024 and i < len(units) - 1:
133
+ size /= 1024
134
+ i += 1
135
+ return f'{size:.2f} {units[i]}'
136
+
137
+
138
+ def debug_report():
139
+ max_dots = 33
140
+
141
+ report = [("torch install path", torch.__path__), ("torch version", torch.__version__),
142
+ ("deepspeed install path", deepspeed.__path__),
143
+ ("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}")]
144
+ if get_accelerator().device_name() == 'cuda':
145
+ hip_version = getattr(torch.version, "hip", None)
146
+ report.extend([("torch cuda version", torch.version.cuda), ("torch hip version", hip_version),
147
+ ("nvcc version", (None if hip_version else nvcc_version())),
148
+ ("deepspeed wheel compiled w.", f"torch {torch_info['version']}, " +
149
+ (f"hip {torch_info['hip_version']}" if hip_version else f"cuda {torch_info['cuda_version']}"))
150
+ ])
151
+ elif get_accelerator().device_name() == 'npu':
152
+ import torch_npu
153
+ report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']}"),
154
+ ("torch_npu install path", torch_npu.__path__), ("torch_npu version", torch_npu.__version__),
155
+ ("ascend_cann version", installed_cann_version())])
156
+ else:
157
+ report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']} ")])
158
+
159
+ report.append(("shared memory (/dev/shm) size", get_shm_size()))
160
+
161
+ print("DeepSpeed general environment info:")
162
+ for name, value in report:
163
+ warns = []
164
+ if isinstance(value, tuple):
165
+ value, warns = value
166
+ print(name, "." * (max_dots - len(name)), value)
167
+ if warns:
168
+ for warn in warns:
169
+ print(warn)
170
+
171
+
172
+ def parse_arguments():
173
+ parser = argparse.ArgumentParser()
174
+ parser.add_argument('--hide_operator_status',
175
+ action='store_true',
176
+ help='Suppress display of installation and compatibility statuses of DeepSpeed operators. ')
177
+ parser.add_argument('--hide_errors_and_warnings', action='store_true', help='Suppress warning and error messages.')
178
+ args = parser.parse_args()
179
+ return args
180
+
181
+
182
+ def main(hide_operator_status=False, hide_errors_and_warnings=False):
183
+ if not hide_operator_status:
184
+ op_report(verbose=not hide_errors_and_warnings)
185
+ debug_report()
186
+
187
+
188
+ def cli_main():
189
+ args = parse_arguments()
190
+ main(hide_operator_status=args.hide_operator_status, hide_errors_and_warnings=args.hide_errors_and_warnings)
191
+
192
+
193
+ if __name__ == "__main__":
194
+ main()
parrot/lib/python3.10/site-packages/deepspeed/git_version_info.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ try:
7
+ # This is populated by setup.py
8
+ from .git_version_info_installed import * # noqa: F401 # type: ignore
9
+ except ModuleNotFoundError:
10
+ import os
11
+ if os.path.isfile('version.txt'):
12
+ # Will be missing from checkouts that haven't been installed (e.g., readthedocs)
13
+ version = open('version.txt', 'r').read().strip()
14
+ else:
15
+ version = "0.0.0"
16
+ git_hash = '[none]'
17
+ git_branch = '[none]'
18
+
19
+ from .ops.op_builder.all_ops import ALL_OPS
20
+ installed_ops = dict.fromkeys(ALL_OPS.keys(), False)
21
+ compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
22
+ torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"}
parrot/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ version='0.14.0'
2
+ git_hash='unknown'
3
+ git_branch='unknown'
4
+ installed_ops={'async_io': False, 'fused_adam': False, 'cpu_adam': False, 'cpu_adagrad': False, 'cpu_lion': False, 'evoformer_attn': False, 'fused_lamb': False, 'fused_lion': False, 'inference_core_ops': False, 'cutlass_ops': False, 'transformer_inference': False, 'quantizer': False, 'ragged_device_ops': False, 'ragged_ops': False, 'random_ltd': False, 'sparse_attn': False, 'spatial_inference': False, 'transformer': False, 'stochastic_transformer': False}
5
+ compatible_ops={'async_io': False, 'fused_adam': True, 'cpu_adam': True, 'cpu_adagrad': True, 'cpu_lion': True, 'evoformer_attn': False, 'fused_lamb': True, 'fused_lion': True, 'inference_core_ops': False, 'cutlass_ops': False, 'transformer_inference': False, 'quantizer': True, 'ragged_device_ops': False, 'ragged_ops': False, 'random_ltd': True, 'sparse_attn': False, 'spatial_inference': False, 'transformer': True, 'stochastic_transformer': True, 'deepspeed_not_implemented': False}
6
+ torch_info={'version': '0.0', 'bf16_support': False, 'cuda_version': '0.0', 'nccl_version': '0.0', 'hip_version': '0.0'}
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .transformers.ds_transformer import DeepSpeedTransformerInference
7
+ from .transformers.clip_encoder import DSClipEncoder
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc ADDED
Binary file (873 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc ADDED
Binary file (877 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc ADDED
Binary file (869 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc ADDED
Binary file (903 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc ADDED
Binary file (869 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.accelerator import get_accelerator
8
+ from ..features.cuda_graph import CUDAGraph
9
+
10
+
11
+ class DSClipEncoder(CUDAGraph, torch.nn.Module):
12
+
13
+ def __init__(self, enc, enable_cuda_graph=False):
14
+ super().__init__(enable_cuda_graph=enable_cuda_graph)
15
+ enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
16
+ self.enc = enc
17
+ self.device = self.enc.device
18
+ self.dtype = self.enc.dtype
19
+ self.cuda_graph_created = [False, False]
20
+ self.static_inputs = [None, None]
21
+ self.static_kwargs = [None, None]
22
+ self.static_output = [None, None]
23
+ self._cuda_graphs = [None, None]
24
+ self.iter = 0
25
+ self.config = self.enc.config
26
+
27
+ def _build_causal_attention_mask(self, bsz, seq_len, dtype):
28
+ mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
29
+ mask.fill_(torch.tensor(torch.finfo(dtype).min))
30
+ mask.triu_(1)
31
+ mask = mask.unsqueeze(1)
32
+ return mask
33
+
34
+ def _graph_replay(self, *inputs, **kwargs):
35
+ for i in range(len(inputs)):
36
+ if torch.is_tensor(inputs[i]):
37
+ self.static_inputs[self.iter][i].copy_(inputs[i])
38
+ for k in kwargs:
39
+ if torch.is_tensor(kwargs[k]):
40
+ self.static_kwargs[self.iter][k].copy_(kwargs[k])
41
+ get_accelerator().replay_graph(self._cuda_graphs[self.iter])
42
+ return self.static_output[self.iter]
43
+
44
+ def forward(self, *inputs, **kwargs):
45
+ if self.enable_cuda_graph:
46
+ if self.cuda_graph_created[self.iter]:
47
+ outputs = self._graph_replay(*inputs, **kwargs)
48
+ else:
49
+ self._create_cuda_graph(*inputs, **kwargs)
50
+ outputs = self._graph_replay(*inputs, **kwargs)
51
+ self.iter = (self.iter + 1) % 2
52
+ return outputs
53
+ else:
54
+ return self.enc(*inputs, **kwargs)
55
+
56
+ def _create_cuda_graph(self, *inputs, **kwargs):
57
+ # warmup to create the workspace and cublas handle
58
+ cuda_stream = torch.cuda.Stream()
59
+ cuda_stream.wait_stream(torch.cuda.current_stream())
60
+ with torch.cuda.stream(cuda_stream):
61
+ for i in range(3):
62
+ ret = self._forward(*inputs, **kwargs)
63
+ torch.cuda.current_stream().wait_stream(cuda_stream)
64
+
65
+ # create cuda_graph and assign static_inputs and static_outputs
66
+ self._cuda_graphs[self.iter] = get_accelerator().create_graph()
67
+ self.static_inputs[self.iter] = inputs
68
+ self.static_kwargs[self.iter] = kwargs
69
+
70
+ with get_accelerator().capture_to_graph(self._cuda_graphs[self.iter]):
71
+ self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
72
+ **self.static_kwargs[self.iter])
73
+
74
+ self.cuda_graph_created[self.iter] = True
75
+
76
+ def _forward(self, *inputs, **kwargs):
77
+ return self.enc(*inputs, **kwargs)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch.nn as nn
7
+
8
+
9
+ class DeepSpeedTransformerBase(nn.module):
10
+
11
+ def __init__(self):
12
+ pass
13
+
14
+ # this would be the new clean base class that will replace DeepSpeedTransformerInference.
15
+ # we currently don't know how this will look like but keeping it here as a placeholder.
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedBERTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed BERT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedBloomInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed Bloom Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedGPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed GPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed import comm as dist
8
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
9
+
10
+ inference_module = None
11
+
12
+
13
+ class DeepSpeedLlama2Inference(DeepSpeedTransformerInference):
14
+ """Initialize the DeepSpeed OPT Transformer Layer.
15
+ """
16
+
17
+ def __init__(self,
18
+ config,
19
+ mp_group=None,
20
+ quantize_scales=None,
21
+ quantize_groups=1,
22
+ merge_count=1,
23
+ mlp_extra_grouping=False):
24
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
25
+
26
+ def forward(self, *args, **kwargs):
27
+
28
+ input = args[0]
29
+ input_mask = None
30
+ # Allocate memory only on first layer forward
31
+ if self.config.layer_id == 0 and self._alloc_workspace:
32
+ self.allocate_workspace(self.config.hidden_size, self.config.heads,
33
+ input.size()[1],
34
+ input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
35
+ self.config.bigscience_bloom,
36
+ dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
37
+ self.config.min_out_tokens)
38
+ self._alloc_workspace = False
39
+
40
+ get_present = True
41
+
42
+ # We set the prev key/value to None when there is a prompt
43
+ if input.shape[1] > 1:
44
+ self.layer_past = None
45
+ layer_past = self.layer_past
46
+
47
+ input_type = input.dtype
48
+
49
+ if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
50
+ and input.dtype == torch.float:
51
+ target_dtype = torch.half if self.dtype == torch.int8 else self.dtype
52
+ input = input.to(target_dtype)
53
+
54
+ with torch.no_grad():
55
+ attention_output, key, value, context_outputtn_ctx, inp_norm = \
56
+ self.attention(input,
57
+ input_mask,
58
+ None,
59
+ layer_past,
60
+ get_present,
61
+ None, None, None,
62
+ self.norm_w,
63
+ self.norm_b,
64
+ None)
65
+ self.layer_past = (key, value)
66
+ output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
67
+
68
+ output = output.to(input_type)
69
+ return output
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed Megatron GPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedOPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed OPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
parrot/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from deepspeed import comm as dist
9
+ from deepspeed.utils.logging import log_dist
10
+
11
+ from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP
12
+ from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention
13
+ from deepspeed.accelerator import get_accelerator
14
+ from deepspeed.ops.op_builder import InferenceBuilder
15
+ import deepspeed
16
+ if deepspeed.HAS_TRITON:
17
+ from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP
18
+ from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention
19
+
20
+ inference_module = None
21
+
22
+
23
+ class DeepSpeedTransformerInference(nn.Module):
24
+ """Initialize the DeepSpeed Transformer Layer.
25
+ Arguments:
26
+ layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
27
+ layer_id will be 0,1,2...23 when each layer object is instantiated
28
+ config: An object of DeepSpeedInferenceConfig
29
+ mp_group: Model parallelism group initialized on the modeling side.
30
+ quantize_scales: This argument groups all the layers' scales used for quantization
31
+ quantize_groups: Number of groups used for quantizing the model
32
+ merge_count: Shows the number of model-parallel checkpoints merged before running inference.
33
+ We use this argument to control the quantization scale for the model parameters if a bigger
34
+ quantize-grouping than 1 is used.
35
+ mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
36
+ of a Transformer layer. We use this feature for quantization to reduce the convergence impact
37
+ for specific downstream tasks.
38
+ """
39
+ layer_id = 0
40
+
41
+ def __init__(self,
42
+ config,
43
+ mp_group=None,
44
+ quantize_scales=None,
45
+ quantize_groups=1,
46
+ merge_count=1,
47
+ mlp_extra_grouping=False):
48
+ super(DeepSpeedTransformerInference, self).__init__()
49
+
50
+ self.config = config
51
+ self.config.layer_id = DeepSpeedTransformerInference.layer_id
52
+ DeepSpeedTransformerInference.layer_id += 1
53
+
54
+ data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
55
+ global inference_module
56
+ if inference_module is None:
57
+ builder = InferenceBuilder()
58
+ inference_module = builder.load()
59
+
60
+ if DeepSpeedTransformerInference.layer_id == 1:
61
+ log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0])
62
+ if deepspeed.HAS_TRITON and self.config.use_triton:
63
+ log_dist(f"Injecting Triton kernels ...", [0])
64
+
65
+ if self.config.bigscience_bloom:
66
+ self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
67
+ assert not self.config.use_triton
68
+ else:
69
+ if deepspeed.HAS_TRITON and self.config.use_triton:
70
+ self.attention = TritonSelfAttention(self.config)
71
+ else:
72
+ self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups,
73
+ merge_count)
74
+
75
+ if deepspeed.HAS_TRITON and self.config.use_triton:
76
+ self.mlp = TritonMLP(self.config)
77
+ else:
78
+ self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count,
79
+ mlp_extra_grouping)
80
+
81
+ device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu'
82
+ if self.config.set_empty_params:
83
+ self.norm_w = None
84
+ self.norm_b = None
85
+ else:
86
+ self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
87
+ requires_grad=False)
88
+ self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
89
+ requires_grad=False)
90
+ self.layer_past = None
91
+ try:
92
+ if config.dtype == torch.float32:
93
+ self.allocate_workspace = inference_module.allocate_workspace_fp32
94
+ elif config.dtype == torch.bfloat16:
95
+ self.allocate_workspace = inference_module.allocate_workspace_bf16
96
+ else:
97
+ self.allocate_workspace = inference_module.allocate_workspace_fp32
98
+ self._alloc_workspace = True
99
+ except AttributeError:
100
+ self.allocate_workspace = None
101
+ self._alloc_workspace = False
102
+
103
+ @classmethod
104
+ def reset_cache(cls):
105
+ if inference_module is not None:
106
+ inference_module.reset_cache()
107
+
108
+ def forward(
109
+ self,
110
+ input=None,
111
+ input_mask=None,
112
+ attention_mask=None,
113
+ attn_mask=None,
114
+ head_mask=None,
115
+ layer_past=None,
116
+ get_key_value=False,
117
+ get_present=False,
118
+ encoder_output=None,
119
+ enc_dec_attn_mask=None,
120
+ x=None,
121
+ encoder_hidden_states=None,
122
+ encoder_attention_mask=None,
123
+ use_cache=False,
124
+ alibi=None,
125
+ output_attentions=False,
126
+ # TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API.
127
+ # This needs to be redesigned later!
128
+ layer_head_mask=None,
129
+ past_key_value=None,
130
+ **kwargs):
131
+
132
+ if x is not None:
133
+ input = x
134
+ if "hidden_states" in kwargs:
135
+ input = kwargs["hidden_states"]
136
+
137
+ input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask
138
+
139
+ # Allocate memory only on first layer forward
140
+ if self.config.layer_id == 0 and self._alloc_workspace:
141
+ self.allocate_workspace(self.config.hidden_size, self.config.heads,
142
+ input.size()[1],
143
+ input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
144
+ self.config.bigscience_bloom,
145
+ dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
146
+ self.config.min_out_tokens)
147
+ self._alloc_workspace = False
148
+
149
+ get_present = (get_present or get_key_value or use_cache)
150
+ input_mask = input_mask if attention_mask is None else attention_mask
151
+
152
+ # We set the prev key/value to None when there is a prompt
153
+ if input.shape[1] > 1:
154
+ self.layer_past = None
155
+ layer_past = layer_past if layer_past is not None else self.layer_past
156
+ head_mask = layer_head_mask if layer_head_mask is not None else head_mask
157
+
158
+ attn_mask = None
159
+ if isinstance(input, tuple):
160
+ attn_mask = input[1]
161
+ input = input[0]
162
+ input_type = input.dtype
163
+
164
+ if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
165
+ and input.dtype == torch.float:
166
+ target_dtype = torch.half if self.config.dtype == torch.int8 else self.config.dtype
167
+ input = input.to(target_dtype)
168
+
169
+ with torch.no_grad():
170
+ attention_output, key, value, context_outputtn_ctx, inp_norm = \
171
+ self.attention(input,
172
+ input_mask,
173
+ head_mask,
174
+ layer_past,
175
+ get_present,
176
+ encoder_hidden_states,
177
+ encoder_attention_mask,
178
+ output_attentions,
179
+ self.norm_w,
180
+ self.norm_b,
181
+ alibi)
182
+
183
+ presents = (key, value)
184
+ self.layer_past = presents if layer_past is None else None
185
+ output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
186
+
187
+ if not self.config.pre_layer_norm:
188
+ output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon)
189
+
190
+ output = output.to(input_type)
191
+ if get_present:
192
+ output = (output, presents)
193
+
194
+ if self.config.return_single_tuple:
195
+ return (output, )
196
+ elif self.config.return_tuple:
197
+ return output if type(output) is tuple else (output, attn_mask)
198
+ else:
199
+ return output
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/csv_monitor.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/monitor.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/tensorboard.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/wandb.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/monitor/csv_monitor.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .monitor import Monitor
7
+ import os
8
+
9
+ import deepspeed.comm as dist
10
+
11
+
12
+ class csvMonitor(Monitor):
13
+
14
+ def __init__(self, csv_config):
15
+ super().__init__(csv_config)
16
+ self.filenames = []
17
+ self.enabled = csv_config.enabled
18
+ self.output_path = csv_config.output_path
19
+ self.job_name = csv_config.job_name
20
+ self.log_dir = self.setup_log_dir()
21
+
22
+ def setup_log_dir(self, base=os.path.join(os.path.expanduser("~"), "csv_monitor")):
23
+ if self.enabled and dist.get_rank() == 0:
24
+ if self.output_path is not None:
25
+ log_dir = os.path.join(self.output_path, self.job_name)
26
+ # NOTE: This code path currently is never used since the default tensorboard_output_path is an empty string and not None. Saving it in case we want this functionality in the future.
27
+ else:
28
+ if "DLWS_JOB_ID" in os.environ:
29
+ infra_job_id = os.environ["DLWS_JOB_ID"]
30
+ elif "DLTS_JOB_ID" in os.environ:
31
+ infra_job_id = os.environ["DLTS_JOB_ID"]
32
+ else:
33
+ infra_job_id = "unknown-job-id"
34
+
35
+ csv_monitor_dir_name = os.path.join(infra_job_id, "logs")
36
+ log_dir = os.path.join(base, csv_monitor_dir_name, self.job_name)
37
+ os.makedirs(log_dir, exist_ok=True)
38
+ return log_dir
39
+
40
+ def write_events(self, event_list):
41
+ if self.enabled and dist.get_rank() == 0:
42
+ import csv
43
+ # We assume each event_list element is a tensorboard-style tuple in the format: (log_name: String, value, step: Int)
44
+ for event in event_list:
45
+ log_name = event[0]
46
+ value = event[1]
47
+ step = event[2]
48
+
49
+ # Set the header to the log_name
50
+ # Need this check because the deepspeed engine currently formats log strings to separate with '/'
51
+ if '/' in log_name:
52
+ record_splits = log_name.split('/')
53
+ header = record_splits[len(record_splits) - 1]
54
+ else:
55
+ header = log_name
56
+
57
+ # sanitize common naming conventions into filename
58
+ filename = log_name.replace('/', '_').replace(' ', '_')
59
+ fname = self.log_dir + '/' + filename + '.csv'
60
+
61
+ # Open file and record event. Insert header if this is the first time writing
62
+ with open(fname, 'a+') as csv_monitor_file:
63
+ csv_monitor_writer = csv.writer(csv_monitor_file)
64
+ if filename not in self.filenames:
65
+ self.filenames.append(filename)
66
+ csv_monitor_writer.writerow(['step', header])
67
+ csv_monitor_writer.writerow([step, value])
parrot/lib/python3.10/site-packages/deepspeed/monitor/monitor.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Support different forms of monitoring such as wandb and tensorboard
7
+ """
8
+
9
+ from abc import ABC, abstractmethod
10
+ import deepspeed.comm as dist
11
+
12
+
13
+ class Monitor(ABC):
14
+
15
+ @abstractmethod
16
+ def __init__(self, monitor_config):
17
+ self.monitor_config = monitor_config
18
+
19
+ @abstractmethod
20
+ def write_events(self, event_list):
21
+ pass
22
+
23
+
24
+ from .wandb import WandbMonitor
25
+ from .tensorboard import TensorBoardMonitor
26
+ from .csv_monitor import csvMonitor
27
+
28
+
29
+ class MonitorMaster(Monitor):
30
+
31
+ def __init__(self, monitor_config):
32
+ super().__init__(monitor_config)
33
+ self.tb_monitor = None
34
+ self.wandb_monitor = None
35
+ self.csv_monitor = None
36
+ self.enabled = monitor_config.enabled
37
+
38
+ if dist.get_rank() == 0:
39
+ if monitor_config.tensorboard.enabled:
40
+ self.tb_monitor = TensorBoardMonitor(monitor_config.tensorboard)
41
+ if monitor_config.wandb.enabled:
42
+ self.wandb_monitor = WandbMonitor(monitor_config.wandb)
43
+ if monitor_config.csv_monitor.enabled:
44
+ self.csv_monitor = csvMonitor(monitor_config.csv_monitor)
45
+
46
+ def write_events(self, event_list):
47
+ if dist.get_rank() == 0:
48
+ if self.tb_monitor is not None:
49
+ self.tb_monitor.write_events(event_list)
50
+ if self.wandb_monitor is not None:
51
+ self.wandb_monitor.write_events(event_list)
52
+ if self.csv_monitor is not None:
53
+ self.csv_monitor.write_events(event_list)
parrot/lib/python3.10/site-packages/deepspeed/monitor/utils.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+
7
+ def check_tb_availability():
8
+ try:
9
+ # torch.utils.tensorboard will fail if `tensorboard` is not available,
10
+ # see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html
11
+ import tensorboard # noqa: F401 # type: ignore
12
+ except ImportError:
13
+ print('If you want to use tensorboard logging, please `pip install tensorboard`')
14
+ raise
15
+
16
+
17
+ def check_wandb_availability():
18
+ try:
19
+ import wandb # noqa: F401 # type: ignore
20
+ except ImportError:
21
+ print(
22
+ 'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart'
23
+ )
24
+ raise
parrot/lib/python3.10/site-packages/deepspeed/ops/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from . import adam
7
+ from . import adagrad
8
+ from . import lamb
9
+ from . import lion
10
+ #from ..git_version_info_installed import installed_ops as __installed_ops__
11
+ #if __installed_ops__['sparse_attn']:
12
+ from . import sparse_attention
13
+ from . import transformer
14
+
15
+ from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
16
+
17
+ from ..git_version_info import compatible_ops as __compatible_ops__
parrot/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc ADDED
Binary file (5.47 kB). View file