ZTWHHH commited on
Commit
ceeaaea
·
verified ·
1 Parent(s): 8a94bfe

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/bin/sqlite3 +3 -0
  3. parrot/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py +7 -0
  5. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py +181 -0
  10. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py +195 -0
  11. parrot/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py +17 -0
  12. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py +53 -0
  13. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/all_ops.py +32 -0
  16. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py +99 -0
  17. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py +774 -0
  18. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py +24 -0
  23. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adagrad.py +43 -0
  24. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adam.py +44 -0
  25. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py +48 -0
  26. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py +72 -0
  27. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py +37 -0
  28. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lamb.py +40 -0
  29. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py +37 -0
  30. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py +37 -0
  33. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py +29 -0
  34. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py +24 -0
  35. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py +104 -0
  36. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_cutlass_builder.py +92 -0
  37. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/quantizer.py +38 -0
  38. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_ops.py +115 -0
  39. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py +77 -0
  40. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py +34 -0
  41. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/sparse_attn.py +82 -0
  42. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py +45 -0
  43. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py +22 -0
  44. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer.py +36 -0
  45. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer_inference.py +74 -0
  46. parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__init__.py +6 -0
  47. parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/__init__.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/dropping_utils.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/dropping_utils.py +132 -0
  50. parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -180,3 +180,4 @@ parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310
180
  parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
181
  parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
182
  parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
 
 
180
  parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
181
  parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
182
  parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
183
+ parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
parrot/bin/sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c98c978efe537f94f522a16697b0ca86b384ca5e7a7bbc3eece38923194046
3
+ size 1777144
parrot/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (516 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cpu_adam import DeepSpeedCPUAdam
7
+ from .fused_adam import FusedAdam
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (263 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (6.38 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc ADDED
Binary file (779 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from cpuinfo import get_cpu_info
8
+ from deepspeed.utils import logger
9
+ from deepspeed.utils.logging import should_log_le
10
+ from deepspeed.ops.op_builder import CPUAdamBuilder
11
+
12
+
13
+ class DeepSpeedCPUAdam(torch.optim.Optimizer):
14
+ optimizer_id = 0
15
+
16
+ def __init__(self,
17
+ model_params,
18
+ lr=1e-3,
19
+ bias_correction=True,
20
+ betas=(0.9, 0.999),
21
+ eps=1e-8,
22
+ weight_decay=0,
23
+ amsgrad=False,
24
+ adamw_mode=True,
25
+ fp32_optimizer_states=True):
26
+ """Fast vectorized implementation of two variations of Adam optimizer on CPU:
27
+
28
+ * Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
29
+ * AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
30
+
31
+ DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
32
+ In order to apply this optimizer, the model requires to have its master parameter (in FP32)
33
+ reside on the CPU memory.
34
+
35
+ To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
36
+ the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
37
+ with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
38
+ the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
39
+ (https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
40
+
41
+ For calling step function, there are two options available: (1) update optimizer's states and (2) update
42
+ optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
43
+ option can bring 30% higher throughput than the doing the copy separately using option one.
44
+
45
+
46
+ .. note::
47
+ We recommend using our `config
48
+ <https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
49
+ to allow :meth:`deepspeed.initialize` to build this optimizer
50
+ for you.
51
+
52
+
53
+ Arguments:
54
+ model_params (iterable): iterable of parameters to optimize or dicts defining
55
+ parameter groups.
56
+ lr (float, optional): learning rate. (default: 1e-3)
57
+ betas (Tuple[float, float], optional): coefficients used for computing
58
+ running averages of gradient and its square. (default: (0.9, 0.999))
59
+ eps (float, optional): term added to the denominator to improve
60
+ numerical stability. (default: 1e-8)
61
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
62
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
63
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
64
+ (default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
65
+ adamw_mode: select between Adam and AdamW implementations (default: AdamW)
66
+ fp32_optimizer_states: creates momentum and variance in full precision regardless of
67
+ the precision of the parameters (default: True)
68
+ """
69
+
70
+ default_args = dict(lr=lr,
71
+ betas=betas,
72
+ eps=eps,
73
+ weight_decay=weight_decay,
74
+ bias_correction=bias_correction,
75
+ amsgrad=amsgrad)
76
+ super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
77
+
78
+ cpu_info = get_cpu_info()
79
+ self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
80
+ if "amd" in self.cpu_vendor:
81
+ for group_id, group in enumerate(self.param_groups):
82
+ for param_id, p in enumerate(group['params']):
83
+ if p.dtype == torch.half:
84
+ logger.warning("FP16 params for CPUAdam may not work on AMD CPUs")
85
+ break
86
+ else:
87
+ continue
88
+ break
89
+
90
+ self.opt_id = DeepSpeedCPUAdam.optimizer_id
91
+ DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
92
+ self.adam_w_mode = adamw_mode
93
+ self.fp32_optimizer_states = fp32_optimizer_states
94
+ self.ds_opt_adam = CPUAdamBuilder().load()
95
+
96
+ self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode,
97
+ should_log_le("info"))
98
+
99
+ def __del__(self):
100
+ # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
101
+ # is used multiple times in the same process (notebook or pytest worker)
102
+ self.ds_opt_adam.destroy_adam(self.opt_id)
103
+
104
+ def __setstate__(self, state):
105
+ super(DeepSpeedCPUAdam, self).__setstate__(state)
106
+ for group in self.param_groups:
107
+ group.setdefault('amsgrad', False)
108
+
109
+ @torch.no_grad()
110
+ def step(self, closure=None, fp16_param_groups=None):
111
+ """Update the model parameters.
112
+
113
+ .. note::
114
+ This method will be called internally by ZeRO-Offload. DeepSpeed
115
+ users should still use ``engine.step()`` as shown in the
116
+ `Getting Started
117
+ <https://www.deepspeed.ai/getting-started/#training>`_ guide.
118
+
119
+ Args:
120
+ closure (callable, optional): closure to compute the loss.
121
+ Defaults to ``None``.
122
+ fp16_param_groups: FP16 GPU parameters to update. Performing the
123
+ copy here reduces communication time. Defaults to ``None``.
124
+
125
+ Returns:
126
+ loss: if ``closure`` is provided. Otherwise ``None``.
127
+ """
128
+
129
+ loss = None
130
+ if closure is not None:
131
+ with torch.enable_grad():
132
+ loss = closure()
133
+
134
+ # intended device for step
135
+ device = torch.device('cpu')
136
+
137
+ # converting the fp16 params to a group of parameter
138
+ if type(fp16_param_groups) is list:
139
+ if type(fp16_param_groups[0]) is not list:
140
+ fp16_param_groups = [fp16_param_groups]
141
+ elif fp16_param_groups is not None:
142
+ fp16_param_groups = [[fp16_param_groups]]
143
+
144
+ for group_id, group in enumerate(self.param_groups):
145
+ for param_id, p in enumerate(group['params']):
146
+
147
+ if p.grad is None:
148
+ continue
149
+
150
+ assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
151
+ "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
152
+
153
+ state = self.state[p]
154
+ # State initialization
155
+ if len(state) == 0:
156
+ #print(f'group {group_id} param {param_id} = {p.numel()}')
157
+ state['step'] = 0
158
+
159
+ #use full precision by default unless self.fp32_optimizer_states is off
160
+ state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
161
+
162
+ # gradient momentums
163
+ state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
164
+ #memory_format=torch.preserve_format)
165
+ # gradient variances
166
+ state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
167
+ #memory_format=torch.preserve_format)
168
+
169
+ state['step'] += 1
170
+ beta1, beta2 = group['betas']
171
+
172
+ if fp16_param_groups is not None:
173
+ self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
174
+ group['eps'], group['weight_decay'], group['bias_correction'],
175
+ p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'],
176
+ fp16_param_groups[group_id][param_id].data)
177
+ else:
178
+ self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
179
+ group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
180
+ state['exp_avg'], state['exp_avg_sq'])
181
+ return loss
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
8
+ """
9
+
10
+ import torch
11
+ from .multi_tensor_apply import MultiTensorApply
12
+
13
+ multi_tensor_applier = MultiTensorApply(2048 * 32)
14
+ from deepspeed.accelerator import get_accelerator
15
+ from deepspeed.ops.op_builder import FusedAdamBuilder
16
+
17
+
18
+ class FusedAdam(torch.optim.Optimizer):
19
+ """Implements Adam algorithm.
20
+
21
+ Currently GPU-only. Requires Apex to be installed via
22
+ ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
23
+
24
+ This version of fused Adam implements 2 fusions.
25
+
26
+ * Fusion of the Adam update's elementwise operations
27
+ * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
28
+
29
+ :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
30
+ or ``torch.optim.Adam`` with ``adam_w_mode=False``::
31
+
32
+ opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
33
+ ...
34
+ opt.step()
35
+
36
+ :class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
37
+ you may choose any ``opt_level``::
38
+
39
+ opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
40
+ model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
41
+ ...
42
+ opt.step()
43
+
44
+ In general, ``opt_level="O1"`` is recommended.
45
+
46
+
47
+ .. warning::
48
+ A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
49
+ are now deprecated and unnecessary.
50
+
51
+ Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
52
+
53
+ Arguments:
54
+ params (iterable): iterable of parameters to optimize or dicts defining
55
+ parameter groups.
56
+ lr (float, optional): learning rate. (default: 1e-3)
57
+ betas (Tuple[float, float], optional): coefficients used for computing
58
+ running averages of gradient and its square. (default: (0.9, 0.999))
59
+ eps (float, optional): term added to the denominator to improve
60
+ numerical stability. (default: 1e-8)
61
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
62
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
63
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
64
+ (default: False) NOT SUPPORTED in FusedAdam!
65
+ adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
66
+ True for decoupled weight decay(also known as AdamW) (default: True)
67
+ set_grad_none (bool, optional): whether set grad to None when zero_grad()
68
+ method is called. (default: True)
69
+
70
+ .. _Adam - A Method for Stochastic Optimization:
71
+ https://arxiv.org/abs/1412.6980
72
+ .. _On the Convergence of Adam and Beyond:
73
+ https://openreview.net/forum?id=ryQu7f-RZ
74
+ """
75
+
76
+ def __init__(self,
77
+ params,
78
+ lr=1e-3,
79
+ bias_correction=True,
80
+ betas=(0.9, 0.999),
81
+ eps=1e-8,
82
+ adam_w_mode=True,
83
+ weight_decay=0.,
84
+ amsgrad=False,
85
+ set_grad_none=True):
86
+
87
+ if amsgrad:
88
+ raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
89
+ defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
90
+ super(FusedAdam, self).__init__(params, defaults)
91
+ self.adam_w_mode = 1 if adam_w_mode else 0
92
+ self.set_grad_none = set_grad_none
93
+
94
+ fused_adam_cuda = FusedAdamBuilder().load()
95
+ # Skip buffer
96
+ self._dummy_overflow_buf = get_accelerator().IntTensor([0])
97
+ self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
98
+
99
+ def zero_grad(self):
100
+ if self.set_grad_none:
101
+ for group in self.param_groups:
102
+ for p in group['params']:
103
+ p.grad = None
104
+ else:
105
+ super(FusedAdam, self).zero_grad()
106
+
107
+ def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
108
+ """Performs a single optimization step.
109
+
110
+ Arguments:
111
+ closure (callable, optional): A closure that reevaluates the model
112
+ and returns the loss.
113
+
114
+ The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
115
+ """
116
+ if any(p is not None for p in [grads, output_params, scale, grad_norms]):
117
+ raise RuntimeError(
118
+ 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
119
+ )
120
+ loss = None
121
+ if closure is not None:
122
+ loss = closure()
123
+
124
+ for group in self.param_groups:
125
+ if len(group['params']) == 0:
126
+ continue
127
+ bias_correction = 1 if group['bias_correction'] else 0
128
+ beta1, beta2 = group['betas']
129
+
130
+ # assume same step across group now to simplify things
131
+ # per parameter step can be easily support by making it tensor, or pass list into kernel
132
+ if 'step' not in group:
133
+ group['step'] = 0
134
+
135
+ # create lists for multi-tensor apply
136
+ g_16, p_16, m_16, v_16 = [], [], [], []
137
+ g_bf, p_bf, m_bf, v_bf = [], [], [], []
138
+ g_32, p_32, m_32, v_32 = [], [], [], []
139
+
140
+ for p in group['params']:
141
+ if p.grad is None:
142
+ continue
143
+ if p.grad.data.is_sparse:
144
+ raise RuntimeError(
145
+ 'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
146
+
147
+ state = self.state[p]
148
+ # State initialization
149
+ if len(state) == 0:
150
+ # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
151
+ # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
152
+ # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
153
+ state['step'] = group.get('step', 0)
154
+ # Exponential moving average of gradient values
155
+ state['exp_avg'] = torch.zeros_like(p.data)
156
+ # Exponential moving average of squared gradient values
157
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
158
+
159
+ if p.dtype == torch.float16:
160
+ g_16.append(p.grad.data)
161
+ p_16.append(p.data)
162
+ m_16.append(state['exp_avg'])
163
+ v_16.append(state['exp_avg_sq'])
164
+ elif p.dtype == torch.bfloat16:
165
+ g_bf.append(p.grad)
166
+ p_bf.append(p)
167
+ m_bf.append(state['exp_avg'])
168
+ v_bf.append(state['exp_avg_sq'])
169
+ elif p.dtype == torch.float32:
170
+ g_32.append(p.grad.data)
171
+ p_32.append(p.data)
172
+ m_32.append(state['exp_avg'])
173
+ v_32.append(state['exp_avg_sq'])
174
+ else:
175
+ raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
176
+
177
+ if len(g_16) > 0:
178
+ state['step'] += 1
179
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
180
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
181
+ bias_correction, group['weight_decay'])
182
+
183
+ if len(g_bf) > 0:
184
+ state['step'] += 1
185
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
186
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
187
+ bias_correction, group['weight_decay'])
188
+
189
+ if len(g_32) > 0:
190
+ state['step'] += 1
191
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
192
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
193
+ bias_correction, group['weight_decay'])
194
+
195
+ return loss
parrot/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from NVIDIA/apex, commit a109f85
8
+ """
9
+
10
+
11
+ class MultiTensorApply(object):
12
+
13
+ def __init__(self, chunk_size):
14
+ self.chunk_size = chunk_size
15
+
16
+ def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
17
+ return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+ import os
8
+ import pkgutil
9
+ import importlib
10
+
11
+ from .builder import get_default_compute_capabilities, OpBuilder
12
+
13
+ # Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder
14
+ __deepspeed__ = True
15
+
16
+ # List of all available op builders from deepspeed op_builder
17
+ try:
18
+ import deepspeed.ops.op_builder # noqa: F401 # type: ignore
19
+ op_builder_dir = "deepspeed.ops.op_builder"
20
+ except ImportError:
21
+ op_builder_dir = "op_builder"
22
+
23
+ __op_builders__ = []
24
+
25
+ this_module = sys.modules[__name__]
26
+
27
+
28
+ def builder_closure(member_name):
29
+ if op_builder_dir == "op_builder":
30
+ # during installation time cannot get builder due to torch not installed,
31
+ # return closure instead
32
+ def _builder():
33
+ from deepspeed.accelerator import get_accelerator
34
+ builder = get_accelerator().create_op_builder(member_name)
35
+ return builder
36
+
37
+ return _builder
38
+ else:
39
+ # during runtime, return op builder class directly
40
+ from deepspeed.accelerator import get_accelerator
41
+ builder = get_accelerator().get_op_builder(member_name)
42
+ return builder
43
+
44
+
45
+ # reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator
46
+ for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]):
47
+ if module_name != 'all_ops' and module_name != 'builder':
48
+ module = importlib.import_module(f".{module_name}", package=op_builder_dir)
49
+ for member_name in module.__dir__():
50
+ if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder":
51
+ # assign builder name to variable with same name
52
+ # the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder"
53
+ this_module.__dict__[member_name] = builder_closure(member_name)
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc ADDED
Binary file (2.99 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/all_ops.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import pkgutil
8
+ import importlib
9
+ try:
10
+ # during installation time accelerator is visible, otherwise return deepspeed.accelerator
11
+ from accelerator import get_accelerator
12
+ except ImportError:
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+ # List of all available ops
16
+
17
+ # reflect all builder names into __op_builders__
18
+ op_builder_dir = get_accelerator().op_builder_dir()
19
+ op_builder_module = importlib.import_module(op_builder_dir)
20
+ __op_builders__ = []
21
+
22
+ for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
23
+ # avoid self references
24
+ if module_name != 'all_ops' and module_name != 'builder':
25
+ module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
26
+ for member_name in module.__dir__():
27
+ if member_name.endswith('Builder'):
28
+ # append builder to __op_builders__ list
29
+ builder = get_accelerator().create_op_builder(member_name)
30
+ __op_builders__.append(builder)
31
+
32
+ ALL_OPS = {op.name: op for op in __op_builders__ if op is not None}
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import distutils.spawn
7
+ import subprocess
8
+
9
+ from .builder import OpBuilder
10
+
11
+
12
+ class AsyncIOBuilder(OpBuilder):
13
+ BUILD_VAR = "DS_BUILD_AIO"
14
+ NAME = "async_io"
15
+
16
+ def __init__(self):
17
+ super().__init__(name=self.NAME)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.ops.aio.{self.NAME}_op'
21
+
22
+ def sources(self):
23
+ return [
24
+ 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp',
25
+ 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp',
26
+ 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp',
27
+ 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp',
28
+ 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp'
29
+ ]
30
+
31
+ def include_paths(self):
32
+ return ['csrc/aio/py_lib', 'csrc/aio/common']
33
+
34
+ def cxx_args(self):
35
+ # -O0 for improved debugging, since performance is bound by I/O
36
+ CPU_ARCH = self.cpu_arch()
37
+ SIMD_WIDTH = self.simd_width()
38
+ import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed
39
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2])
40
+ if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1:
41
+ CPP_STD = '-std=c++17'
42
+ else:
43
+ CPP_STD = '-std=c++14'
44
+ return [
45
+ '-g',
46
+ '-Wall',
47
+ '-O0',
48
+ CPP_STD,
49
+ '-shared',
50
+ '-fPIC',
51
+ '-Wno-reorder',
52
+ CPU_ARCH,
53
+ '-fopenmp',
54
+ SIMD_WIDTH,
55
+ '-laio',
56
+ ]
57
+
58
+ def extra_ldflags(self):
59
+ return ['-laio']
60
+
61
+ def check_for_libaio_pkg(self):
62
+ libs = dict(
63
+ dpkg=["-l", "libaio-dev", "apt"],
64
+ pacman=["-Q", "libaio", "pacman"],
65
+ rpm=["-q", "libaio-devel", "yum"],
66
+ )
67
+
68
+ found = False
69
+ for pkgmgr, data in libs.items():
70
+ flag, lib, tool = data
71
+ path = distutils.spawn.find_executable(pkgmgr)
72
+ if path is not None:
73
+ cmd = f"{pkgmgr} {flag} {lib}"
74
+ result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
75
+ if result.wait() == 0:
76
+ found = True
77
+ else:
78
+ self.warning(f"{self.NAME}: please install the {lib} package with {tool}")
79
+ break
80
+ return found
81
+
82
+ def is_compatible(self, verbose=True):
83
+ # Check for the existence of libaio by using distutils
84
+ # to compile and link a test program that calls io_submit,
85
+ # which is a function provided by libaio that is used in the async_io op.
86
+ # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS
87
+ # respectively to specify the directories for libaio.h and libaio.so.
88
+ aio_compatible = self.has_function('io_pgetevents', ('aio', ))
89
+ if verbose and not aio_compatible:
90
+ self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.")
91
+
92
+ # Check for the libaio package via known package managers
93
+ # to print suggestions on which package to install.
94
+ self.check_for_libaio_pkg()
95
+
96
+ self.warning(
97
+ "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found."
98
+ )
99
+ return super().is_compatible(verbose) and aio_compatible
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import sys
8
+ import time
9
+ import importlib
10
+ from pathlib import Path
11
+ import subprocess
12
+ import shlex
13
+ import shutil
14
+ import tempfile
15
+ import distutils.ccompiler
16
+ import distutils.log
17
+ import distutils.sysconfig
18
+ from distutils.errors import CompileError, LinkError
19
+ from abc import ABC, abstractmethod
20
+ from typing import List
21
+
22
+ YELLOW = '\033[93m'
23
+ END = '\033[0m'
24
+ WARNING = f"{YELLOW} [WARNING] {END}"
25
+
26
+ DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions"
27
+ DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
28
+
29
+ try:
30
+ import torch
31
+ except ImportError:
32
+ print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.")
33
+ else:
34
+ TORCH_MAJOR = int(torch.__version__.split('.')[0])
35
+ TORCH_MINOR = int(torch.__version__.split('.')[1])
36
+
37
+
38
+ class MissingCUDAException(Exception):
39
+ pass
40
+
41
+
42
+ class CUDAMismatchException(Exception):
43
+ pass
44
+
45
+
46
+ def installed_cuda_version(name=""):
47
+ import torch.utils.cpp_extension
48
+ cuda_home = torch.utils.cpp_extension.CUDA_HOME
49
+ if cuda_home is None:
50
+ raise MissingCUDAException("CUDA_HOME does not exist, unable to compile CUDA op(s)")
51
+ # Ensure there is not a cuda version mismatch between torch and nvcc compiler
52
+ output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
53
+ output_split = output.split()
54
+ release_idx = output_split.index("release")
55
+ release = output_split[release_idx + 1].replace(',', '').split(".")
56
+ # Ignore patch versions, only look at major + minor
57
+ cuda_major, cuda_minor = release[:2]
58
+ return int(cuda_major), int(cuda_minor)
59
+
60
+
61
+ def get_default_compute_capabilities():
62
+ compute_caps = DEFAULT_COMPUTE_CAPABILITIES
63
+ import torch.utils.cpp_extension
64
+ if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11:
65
+ if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
66
+ # Special treatment of CUDA 11.0 because compute_86 is not supported.
67
+ compute_caps += ";8.0"
68
+ else:
69
+ compute_caps += ";8.0;8.6"
70
+ return compute_caps
71
+
72
+
73
+ # list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used
74
+ # to build deepspeed and system-wide installed cuda 11.2
75
+ cuda_minor_mismatch_ok = {
76
+ 10: ["10.0", "10.1", "10.2"],
77
+ 11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"],
78
+ 12: ["12.0", "12.1", "12.2", "12.3"],
79
+ }
80
+
81
+
82
+ def assert_no_cuda_mismatch(name=""):
83
+ cuda_major, cuda_minor = installed_cuda_version(name)
84
+ sys_cuda_version = f'{cuda_major}.{cuda_minor}'
85
+ torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
86
+ # This is a show-stopping error, should probably not proceed past this
87
+ if sys_cuda_version != torch_cuda_version:
88
+ if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
89
+ and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
90
+ print(f"Installed CUDA version {sys_cuda_version} does not match the "
91
+ f"version torch was compiled with {torch.version.cuda} "
92
+ "but since the APIs are compatible, accepting this combination")
93
+ return True
94
+ elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1":
95
+ print(
96
+ f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
97
+ f"version torch was compiled with {torch.version.cuda}."
98
+ "Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior."
99
+ )
100
+ return True
101
+ raise CUDAMismatchException(
102
+ f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
103
+ f"version torch was compiled with {torch.version.cuda}, unable to compile "
104
+ "cuda/cpp extensions without a matching cuda version.")
105
+ return True
106
+
107
+
108
+ class OpBuilder(ABC):
109
+ _rocm_version = None
110
+ _is_rocm_pytorch = None
111
+ _is_sycl_enabled = None
112
+ _loaded_ops = {}
113
+
114
+ def __init__(self, name):
115
+ self.name = name
116
+ self.jit_mode = False
117
+ self.build_for_cpu = False
118
+ self.enable_bf16 = False
119
+ self.error_log = None
120
+
121
+ @abstractmethod
122
+ def absolute_name(self):
123
+ '''
124
+ Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam
125
+ will be installed as something like: deepspeed/ops/adam/cpu_adam.so
126
+ '''
127
+ pass
128
+
129
+ @abstractmethod
130
+ def sources(self):
131
+ '''
132
+ Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
133
+ '''
134
+ pass
135
+
136
+ def hipify_extension(self):
137
+ pass
138
+
139
+ def sycl_extension(self):
140
+ pass
141
+
142
+ @staticmethod
143
+ def validate_torch_version(torch_info):
144
+ install_torch_version = torch_info['version']
145
+ current_torch_version = ".".join(torch.__version__.split('.')[:2])
146
+ if install_torch_version != current_torch_version:
147
+ raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed "
148
+ "with a different version than what is being used at runtime. "
149
+ f"Please re-install DeepSpeed or switch torch versions. "
150
+ f"Install torch version={install_torch_version}, "
151
+ f"Runtime torch version={current_torch_version}")
152
+
153
+ @staticmethod
154
+ def validate_torch_op_version(torch_info):
155
+ if not OpBuilder.is_rocm_pytorch():
156
+ current_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
157
+ install_cuda_version = torch_info['cuda_version']
158
+ if install_cuda_version != current_cuda_version:
159
+ raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed "
160
+ "with a different version than what is being used at runtime. "
161
+ f"Please re-install DeepSpeed or switch torch versions. "
162
+ f"Install CUDA version={install_cuda_version}, "
163
+ f"Runtime CUDA version={current_cuda_version}")
164
+ else:
165
+ current_hip_version = ".".join(torch.version.hip.split('.')[:2])
166
+ install_hip_version = torch_info['hip_version']
167
+ if install_hip_version != current_hip_version:
168
+ raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed "
169
+ "with a different version than what is being used at runtime. "
170
+ f"Please re-install DeepSpeed or switch torch versions. "
171
+ f"Install HIP version={install_hip_version}, "
172
+ f"Runtime HIP version={current_hip_version}")
173
+
174
+ @staticmethod
175
+ def is_rocm_pytorch():
176
+ if OpBuilder._is_rocm_pytorch is not None:
177
+ return OpBuilder._is_rocm_pytorch
178
+
179
+ _is_rocm_pytorch = False
180
+ try:
181
+ import torch
182
+ except ImportError:
183
+ pass
184
+ else:
185
+ if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
186
+ _is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
187
+ if _is_rocm_pytorch:
188
+ from torch.utils.cpp_extension import ROCM_HOME
189
+ _is_rocm_pytorch = ROCM_HOME is not None
190
+ OpBuilder._is_rocm_pytorch = _is_rocm_pytorch
191
+ return OpBuilder._is_rocm_pytorch
192
+
193
+ @staticmethod
194
+ def is_sycl_enabled():
195
+ if OpBuilder._is_sycl_enabled is not None:
196
+ return OpBuilder._is_sycl_enabled
197
+
198
+ _is_sycl_enabled = False
199
+ try:
200
+ result = subprocess.run(["c2s", "--version"], capture_output=True)
201
+ except:
202
+ pass
203
+ else:
204
+ _is_sycl_enabled = True
205
+
206
+ OpBuilder._is_sycl_enabled = _is_sycl_enabled
207
+ return OpBuilder._is_sycl_enabled
208
+
209
+ @staticmethod
210
+ def installed_rocm_version():
211
+ if OpBuilder._rocm_version:
212
+ return OpBuilder._rocm_version
213
+
214
+ ROCM_MAJOR = '0'
215
+ ROCM_MINOR = '0'
216
+ if OpBuilder.is_rocm_pytorch():
217
+ from torch.utils.cpp_extension import ROCM_HOME
218
+ rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev")
219
+ if rocm_ver_file.is_file():
220
+ with open(rocm_ver_file, 'r') as file:
221
+ ROCM_VERSION_DEV_RAW = file.read()
222
+ elif "rocm" in torch.__version__:
223
+ ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1]
224
+ else:
225
+ assert False, "Could not detect ROCm version"
226
+ assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version"
227
+ ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0]
228
+ ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1]
229
+ OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR))
230
+ return OpBuilder._rocm_version
231
+
232
+ def include_paths(self):
233
+ '''
234
+ Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
235
+ '''
236
+ return []
237
+
238
+ def nvcc_args(self):
239
+ '''
240
+ Returns optional list of compiler flags to forward to nvcc when building CUDA sources
241
+ '''
242
+ return []
243
+
244
+ def cxx_args(self):
245
+ '''
246
+ Returns optional list of compiler flags to forward to the build
247
+ '''
248
+ return []
249
+
250
+ def is_compatible(self, verbose=True):
251
+ '''
252
+ Check if all non-python dependencies are satisfied to build this op
253
+ '''
254
+ return True
255
+
256
+ def extra_ldflags(self):
257
+ return []
258
+
259
+ def has_function(self, funcname, libraries, verbose=False):
260
+ '''
261
+ Test for existence of a function within a tuple of libraries.
262
+
263
+ This is used as a smoke test to check whether a certain library is available.
264
+ As a test, this creates a simple C program that calls the specified function,
265
+ and then distutils is used to compile that program and link it with the specified libraries.
266
+ Returns True if both the compile and link are successful, False otherwise.
267
+ '''
268
+ tempdir = None # we create a temporary directory to hold various files
269
+ filestderr = None # handle to open file to which we redirect stderr
270
+ oldstderr = None # file descriptor for stderr
271
+ try:
272
+ # Echo compile and link commands that are used.
273
+ if verbose:
274
+ distutils.log.set_verbosity(1)
275
+
276
+ # Create a compiler object.
277
+ compiler = distutils.ccompiler.new_compiler(verbose=verbose)
278
+
279
+ # Configure compiler and linker to build according to Python install.
280
+ distutils.sysconfig.customize_compiler(compiler)
281
+
282
+ # Create a temporary directory to hold test files.
283
+ tempdir = tempfile.mkdtemp()
284
+
285
+ # Define a simple C program that calls the function in question
286
+ prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname)
287
+
288
+ # Write the test program to a file.
289
+ filename = os.path.join(tempdir, 'test.c')
290
+ with open(filename, 'w') as f:
291
+ f.write(prog)
292
+
293
+ # Redirect stderr file descriptor to a file to silence compile/link warnings.
294
+ if not verbose:
295
+ filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w')
296
+ oldstderr = os.dup(sys.stderr.fileno())
297
+ os.dup2(filestderr.fileno(), sys.stderr.fileno())
298
+
299
+ # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames()
300
+ # Otherwise, a local directory will be used instead of tempdir
301
+ drive, driveless_filename = os.path.splitdrive(filename)
302
+ root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else ''
303
+ output_dir = os.path.join(drive, root_dir)
304
+
305
+ # Attempt to compile the C program into an object file.
306
+ cflags = shlex.split(os.environ.get('CFLAGS', ""))
307
+ objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags))
308
+
309
+ # Attempt to link the object file into an executable.
310
+ # Be sure to tack on any libraries that have been specified.
311
+ ldflags = shlex.split(os.environ.get('LDFLAGS', ""))
312
+ compiler.link_executable(objs,
313
+ os.path.join(tempdir, 'a.out'),
314
+ extra_preargs=self.strip_empty_entries(ldflags),
315
+ libraries=libraries)
316
+
317
+ # Compile and link succeeded
318
+ return True
319
+
320
+ except CompileError:
321
+ return False
322
+
323
+ except LinkError:
324
+ return False
325
+
326
+ except:
327
+ return False
328
+
329
+ finally:
330
+ # Restore stderr file descriptor and close the stderr redirect file.
331
+ if oldstderr is not None:
332
+ os.dup2(oldstderr, sys.stderr.fileno())
333
+ if filestderr is not None:
334
+ filestderr.close()
335
+
336
+ # Delete the temporary directory holding the test program and stderr files.
337
+ if tempdir is not None:
338
+ shutil.rmtree(tempdir)
339
+
340
+ def strip_empty_entries(self, args):
341
+ '''
342
+ Drop any empty strings from the list of compile and link flags
343
+ '''
344
+ return [x for x in args if len(x) > 0]
345
+
346
+ def cpu_arch(self):
347
+ try:
348
+ from cpuinfo import get_cpu_info
349
+ except ImportError as e:
350
+ cpu_info = self._backup_cpuinfo()
351
+ if cpu_info is None:
352
+ return "-march=native"
353
+
354
+ try:
355
+ cpu_info = get_cpu_info()
356
+ except Exception as e:
357
+ self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
358
+ "falling back to `lscpu` to get this information.")
359
+ cpu_info = self._backup_cpuinfo()
360
+ if cpu_info is None:
361
+ return "-march=native"
362
+
363
+ if cpu_info['arch'].startswith('PPC_'):
364
+ # gcc does not provide -march on PowerPC, use -mcpu instead
365
+ return '-mcpu=native'
366
+ return '-march=native'
367
+
368
+ def is_cuda_enable(self):
369
+ try:
370
+ assert_no_cuda_mismatch(self.name)
371
+ return '-D__ENABLE_CUDA__'
372
+ except MissingCUDAException:
373
+ print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, "
374
+ "only cpu ops can be compiled!")
375
+ return '-D__DISABLE_CUDA__'
376
+ return '-D__DISABLE_CUDA__'
377
+
378
+ def _backup_cpuinfo(self):
379
+ # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides
380
+ if not self.command_exists('lscpu'):
381
+ self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo "
382
+ "to detect the CPU architecture. 'lscpu' does not appear to exist on "
383
+ "your system, will fall back to use -march=native and non-vectorized execution.")
384
+ return None
385
+ result = subprocess.check_output('lscpu', shell=True)
386
+ result = result.decode('utf-8').strip().lower()
387
+
388
+ cpu_info = {}
389
+ cpu_info['arch'] = None
390
+ cpu_info['flags'] = ""
391
+ if 'genuineintel' in result or 'authenticamd' in result:
392
+ cpu_info['arch'] = 'X86_64'
393
+ if 'avx512' in result:
394
+ cpu_info['flags'] += 'avx512,'
395
+ elif 'avx512f' in result:
396
+ cpu_info['flags'] += 'avx512f,'
397
+ if 'avx2' in result:
398
+ cpu_info['flags'] += 'avx2'
399
+ elif 'ppc64le' in result:
400
+ cpu_info['arch'] = "PPC_"
401
+
402
+ return cpu_info
403
+
404
+ def simd_width(self):
405
+ try:
406
+ from cpuinfo import get_cpu_info
407
+ except ImportError as e:
408
+ cpu_info = self._backup_cpuinfo()
409
+ if cpu_info is None:
410
+ return '-D__SCALAR__'
411
+
412
+ try:
413
+ cpu_info = get_cpu_info()
414
+ except Exception as e:
415
+ self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
416
+ "falling back to `lscpu` to get this information.")
417
+ cpu_info = self._backup_cpuinfo()
418
+ if cpu_info is None:
419
+ return '-D__SCALAR__'
420
+
421
+ if cpu_info['arch'] == 'X86_64':
422
+ if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']:
423
+ return '-D__AVX512__'
424
+ elif 'avx2' in cpu_info['flags']:
425
+ return '-D__AVX256__'
426
+ return '-D__SCALAR__'
427
+
428
+ def command_exists(self, cmd):
429
+ if '|' in cmd:
430
+ cmds = cmd.split("|")
431
+ else:
432
+ cmds = [cmd]
433
+ valid = False
434
+ for cmd in cmds:
435
+ result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
436
+ valid = valid or result.wait() == 0
437
+
438
+ if not valid and len(cmds) > 1:
439
+ print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!")
440
+ elif not valid and len(cmds) == 1:
441
+ print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!")
442
+ return valid
443
+
444
+ def warning(self, msg):
445
+ self.error_log = f"{msg}"
446
+ print(f"{WARNING} {msg}")
447
+
448
+ def deepspeed_src_path(self, code_path):
449
+ if os.path.isabs(code_path):
450
+ return code_path
451
+ else:
452
+ return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
453
+
454
+ def builder(self):
455
+ from torch.utils.cpp_extension import CppExtension
456
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
457
+ return CppExtension(name=self.absolute_name(),
458
+ sources=self.strip_empty_entries(self.sources()),
459
+ include_dirs=include_dirs,
460
+ extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
461
+ extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
462
+
463
+ def load(self, verbose=True):
464
+ if self.name in __class__._loaded_ops:
465
+ return __class__._loaded_ops[self.name]
466
+
467
+ from deepspeed.git_version_info import installed_ops, torch_info
468
+ if installed_ops.get(self.name, False):
469
+ # Ensure the op we're about to load was compiled with the same
470
+ # torch/cuda versions we are currently using at runtime.
471
+ self.validate_torch_version(torch_info)
472
+ if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder):
473
+ self.validate_torch_op_version(torch_info)
474
+
475
+ op_module = importlib.import_module(self.absolute_name())
476
+ __class__._loaded_ops[self.name] = op_module
477
+ return op_module
478
+ else:
479
+ return self.jit_load(verbose)
480
+
481
+ def jit_load(self, verbose=True):
482
+ if not self.is_compatible(verbose):
483
+ raise RuntimeError(
484
+ f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}"
485
+ )
486
+ try:
487
+ import ninja # noqa: F401 # type: ignore
488
+ except ImportError:
489
+ raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.")
490
+
491
+ if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch():
492
+ self.build_for_cpu = not torch.cuda.is_available()
493
+
494
+ self.jit_mode = True
495
+ from torch.utils.cpp_extension import load
496
+
497
+ start_build = time.time()
498
+ sources = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.sources()]
499
+ extra_include_paths = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.include_paths()]
500
+
501
+ # Torch will try and apply whatever CCs are in the arch list at compile time,
502
+ # we have already set the intended targets ourselves we know that will be
503
+ # needed at runtime. This prevents CC collisions such as multiple __half
504
+ # implementations. Stash arch list to reset after build.
505
+ torch_arch_list = None
506
+ if "TORCH_CUDA_ARCH_LIST" in os.environ:
507
+ torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST")
508
+ os.environ["TORCH_CUDA_ARCH_LIST"] = ""
509
+
510
+ nvcc_args = self.strip_empty_entries(self.nvcc_args())
511
+ cxx_args = self.strip_empty_entries(self.cxx_args())
512
+
513
+ if isinstance(self, CUDAOpBuilder):
514
+ if not self.build_for_cpu and self.enable_bf16:
515
+ cxx_args.append("-DBF16_AVAILABLE")
516
+ nvcc_args.append("-DBF16_AVAILABLE")
517
+ nvcc_args.append("-U__CUDA_NO_BFLOAT16_OPERATORS__")
518
+ nvcc_args.append("-U__CUDA_NO_BFLOAT162_OPERATORS__")
519
+
520
+ if self.is_rocm_pytorch():
521
+ cxx_args.append("-D__HIP_PLATFORM_AMD__=1")
522
+
523
+ op_module = load(name=self.name,
524
+ sources=self.strip_empty_entries(sources),
525
+ extra_include_paths=self.strip_empty_entries(extra_include_paths),
526
+ extra_cflags=cxx_args,
527
+ extra_cuda_cflags=nvcc_args,
528
+ extra_ldflags=self.strip_empty_entries(self.extra_ldflags()),
529
+ verbose=verbose)
530
+
531
+ build_duration = time.time() - start_build
532
+ if verbose:
533
+ print(f"Time to load {self.name} op: {build_duration} seconds")
534
+
535
+ # Reset arch list so we are not silently removing it for other possible use cases
536
+ if torch_arch_list:
537
+ os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list
538
+
539
+ __class__._loaded_ops[self.name] = op_module
540
+
541
+ return op_module
542
+
543
+
544
+ class CUDAOpBuilder(OpBuilder):
545
+
546
+ def compute_capability_args(self, cross_compile_archs=None):
547
+ """
548
+ Returns nvcc compute capability compile flags.
549
+
550
+ 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`.
551
+ 2. If neither is set default compute capabilities will be used
552
+ 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX
553
+
554
+ Format:
555
+
556
+ - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples:
557
+
558
+ TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ...
559
+ TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ...
560
+
561
+ - `cross_compile_archs` uses ; separator.
562
+
563
+ """
564
+ ccs = []
565
+ if self.jit_mode:
566
+ # Compile for underlying architectures since we know those at runtime
567
+ for i in range(torch.cuda.device_count()):
568
+ CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i)
569
+ cc = f"{CC_MAJOR}.{CC_MINOR}"
570
+ if cc not in ccs:
571
+ ccs.append(cc)
572
+ ccs = sorted(ccs)
573
+ ccs[-1] += '+PTX'
574
+ else:
575
+ # Cross-compile mode, compile for various architectures
576
+ # env override takes priority
577
+ cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
578
+ if cross_compile_archs_env is not None:
579
+ if cross_compile_archs is not None:
580
+ print(
581
+ f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
582
+ )
583
+ cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
584
+ else:
585
+ if cross_compile_archs is None:
586
+ cross_compile_archs = get_default_compute_capabilities()
587
+ ccs = cross_compile_archs.split(';')
588
+
589
+ ccs = self.filter_ccs(ccs)
590
+ if len(ccs) == 0:
591
+ raise RuntimeError(
592
+ f"Unable to load {self.name} op due to no compute capabilities remaining after filtering")
593
+
594
+ args = []
595
+ self.enable_bf16 = True
596
+ for cc in ccs:
597
+ num = cc[0] + cc[2]
598
+ args.append(f'-gencode=arch=compute_{num},code=sm_{num}')
599
+ if cc.endswith('+PTX'):
600
+ args.append(f'-gencode=arch=compute_{num},code=compute_{num}')
601
+
602
+ if int(cc[0]) <= 7:
603
+ self.enable_bf16 = False
604
+
605
+ return args
606
+
607
+ def filter_ccs(self, ccs: List[str]):
608
+ """
609
+ Prune any compute capabilities that are not compatible with the builder. Should log
610
+ which CCs have been pruned.
611
+ """
612
+ return ccs
613
+
614
+ def version_dependent_macros(self):
615
+ # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
616
+ version_ge_1_1 = []
617
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
618
+ version_ge_1_1 = ['-DVERSION_GE_1_1']
619
+ version_ge_1_3 = []
620
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
621
+ version_ge_1_3 = ['-DVERSION_GE_1_3']
622
+ version_ge_1_5 = []
623
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
624
+ version_ge_1_5 = ['-DVERSION_GE_1_5']
625
+ return version_ge_1_1 + version_ge_1_3 + version_ge_1_5
626
+
627
+ def is_compatible(self, verbose=True):
628
+ return super().is_compatible(verbose)
629
+
630
+ def builder(self):
631
+ try:
632
+ if not self.is_rocm_pytorch():
633
+ assert_no_cuda_mismatch(self.name)
634
+ self.build_for_cpu = False
635
+ except MissingCUDAException:
636
+ self.build_for_cpu = True
637
+
638
+ if self.build_for_cpu:
639
+ from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
640
+ else:
641
+ from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
642
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
643
+ compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
644
+ {'cxx': self.strip_empty_entries(self.cxx_args()), \
645
+ 'nvcc': self.strip_empty_entries(self.nvcc_args())}
646
+
647
+ if not self.build_for_cpu and self.enable_bf16:
648
+ compile_args['cxx'].append("-DBF16_AVAILABLE")
649
+
650
+ if self.is_rocm_pytorch():
651
+ compile_args['cxx'].append("-D__HIP_PLATFORM_AMD__=1")
652
+
653
+ cuda_ext = ExtensionBuilder(name=self.absolute_name(),
654
+ sources=self.strip_empty_entries(self.sources()),
655
+ include_dirs=include_dirs,
656
+ libraries=self.strip_empty_entries(self.libraries_args()),
657
+ extra_compile_args=compile_args,
658
+ extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
659
+
660
+ if self.is_rocm_pytorch():
661
+ # hip converts paths to absolute, this converts back to relative
662
+ sources = cuda_ext.sources
663
+ curr_file = Path(__file__).parent.parent # ds root
664
+ for i in range(len(sources)):
665
+ src = Path(sources[i])
666
+ if src.is_absolute():
667
+ sources[i] = str(src.relative_to(curr_file))
668
+ else:
669
+ sources[i] = str(src)
670
+ cuda_ext.sources = sources
671
+ return cuda_ext
672
+
673
+ def hipify_extension(self):
674
+ if self.is_rocm_pytorch():
675
+ from torch.utils.hipify import hipify_python
676
+ hipify_python.hipify(
677
+ project_directory=os.getcwd(),
678
+ output_directory=os.getcwd(),
679
+ header_include_dirs=self.include_paths(),
680
+ includes=[os.path.join(os.getcwd(), '*')],
681
+ extra_files=[os.path.abspath(s) for s in self.sources()],
682
+ show_detailed=True,
683
+ is_pytorch_extension=True,
684
+ hipify_extra_files_only=True,
685
+ )
686
+
687
+ def cxx_args(self):
688
+ if sys.platform == "win32":
689
+ return ['-O2']
690
+ else:
691
+ return ['-O3', '-std=c++17', '-g', '-Wno-reorder']
692
+
693
+ def nvcc_args(self):
694
+ if self.build_for_cpu:
695
+ return []
696
+ args = ['-O3']
697
+ if self.is_rocm_pytorch():
698
+ ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
699
+ args += [
700
+ '-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__',
701
+ '-U__HIP_NO_HALF2_OPERATORS__',
702
+ '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR,
703
+ '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
704
+ ]
705
+ else:
706
+ try:
707
+ nvcc_threads = int(os.getenv("DS_NVCC_THREADS", ""))
708
+ if nvcc_threads <= 0:
709
+ raise ValueError("")
710
+ except ValueError:
711
+ nvcc_threads = min(os.cpu_count(), 8)
712
+
713
+ cuda_major, _ = installed_cuda_version()
714
+ args += [
715
+ '-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
716
+ '-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__',
717
+ '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', f'--threads={nvcc_threads}'
718
+ ]
719
+ if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
720
+ args.append('--ptxas-options=-v')
721
+ args += self.compute_capability_args()
722
+ return args
723
+
724
+ def libraries_args(self):
725
+ if self.build_for_cpu:
726
+ return []
727
+
728
+ if sys.platform == "win32":
729
+ return ['cublas', 'curand']
730
+ else:
731
+ return []
732
+
733
+
734
+ class TorchCPUOpBuilder(CUDAOpBuilder):
735
+
736
+ def extra_ldflags(self):
737
+ if self.build_for_cpu:
738
+ return ['-fopenmp']
739
+
740
+ if not self.is_rocm_pytorch():
741
+ return ['-lcurand']
742
+
743
+ return []
744
+
745
+ def cxx_args(self):
746
+ import torch
747
+ args = []
748
+ if not self.build_for_cpu:
749
+ if not self.is_rocm_pytorch():
750
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64")
751
+ if not os.path.exists(CUDA_LIB64):
752
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib")
753
+ else:
754
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib")
755
+
756
+ args += super().cxx_args()
757
+ args += [
758
+ f'-L{CUDA_LIB64}',
759
+ '-lcudart',
760
+ '-lcublas',
761
+ '-g',
762
+ ]
763
+
764
+ CPU_ARCH = self.cpu_arch()
765
+ SIMD_WIDTH = self.simd_width()
766
+ CUDA_ENABLE = self.is_cuda_enable()
767
+ args += [
768
+ CPU_ARCH,
769
+ '-fopenmp',
770
+ SIMD_WIDTH,
771
+ CUDA_ENABLE,
772
+ ]
773
+
774
+ return args
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (429 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CPUOpBuilder
7
+
8
+
9
+ class NotImplementedBuilder(CPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED"
11
+ NAME = "deepspeed_not_implemented"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.comm.{self.NAME}_op'
19
+
20
+ def load(self, verbose=True):
21
+ raise ValueError("This op had not been implemented on CPU backend.")
22
+
23
+ def sources(self):
24
+ return []
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adagrad.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from .builder import TorchCPUOpBuilder
8
+
9
+
10
+ class CPUAdagradBuilder(TorchCPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CPU_ADAGRAD"
12
+ NAME = "cpu_adagrad"
13
+
14
+ def __init__(self):
15
+ super().__init__(name=self.NAME)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.adagrad.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ if self.build_for_cpu:
22
+ return ['csrc/adagrad/cpu_adagrad.cpp']
23
+
24
+ return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu']
25
+
26
+ def libraries_args(self):
27
+ args = super().libraries_args()
28
+ if self.build_for_cpu:
29
+ return args
30
+
31
+ if not self.is_rocm_pytorch():
32
+ args += ['curand']
33
+ return args
34
+
35
+ def include_paths(self):
36
+ import torch
37
+ if self.build_for_cpu:
38
+ CUDA_INCLUDE = []
39
+ elif not self.is_rocm_pytorch():
40
+ CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
41
+ else:
42
+ CUDA_INCLUDE = []
43
+ return ['csrc/includes'] + CUDA_INCLUDE
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adam.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from .builder import TorchCPUOpBuilder
8
+
9
+
10
+ class CPUAdamBuilder(TorchCPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CPU_ADAM"
12
+ NAME = "cpu_adam"
13
+
14
+ def __init__(self):
15
+ super().__init__(name=self.NAME)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.adam.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ if self.build_for_cpu:
22
+ return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp']
23
+
24
+ return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp', 'csrc/common/custom_cuda_kernel.cu']
25
+
26
+ def libraries_args(self):
27
+ args = super().libraries_args()
28
+ if self.build_for_cpu:
29
+ return args
30
+
31
+ if not self.is_rocm_pytorch():
32
+ args += ['curand']
33
+
34
+ return args
35
+
36
+ def include_paths(self):
37
+ import torch
38
+ if self.build_for_cpu:
39
+ CUDA_INCLUDE = []
40
+ elif not self.is_rocm_pytorch():
41
+ CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
42
+ else:
43
+ CUDA_INCLUDE = []
44
+ return ['csrc/includes'] + CUDA_INCLUDE
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from .builder import TorchCPUOpBuilder
8
+
9
+
10
+ class CPULionBuilder(TorchCPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CPU_LION"
12
+ NAME = "cpu_lion"
13
+
14
+ def __init__(self):
15
+ super().__init__(name=self.NAME)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.lion.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ if self.build_for_cpu:
22
+ return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp']
23
+
24
+ return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp', 'csrc/common/custom_cuda_kernel.cu']
25
+
26
+ def libraries_args(self):
27
+ args = super().libraries_args()
28
+ if self.build_for_cpu:
29
+ return args
30
+
31
+ if not self.is_rocm_pytorch():
32
+ args += ['curand']
33
+
34
+ return args
35
+
36
+ def include_paths(self):
37
+ import torch
38
+ if self.build_for_cpu:
39
+ CUDA_INCLUDE = []
40
+ elif not self.is_rocm_pytorch():
41
+ CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
42
+ else:
43
+ CUDA_INCLUDE = [
44
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
45
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
46
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
47
+ ]
48
+ return ['csrc/includes'] + CUDA_INCLUDE
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder, installed_cuda_version
7
+ import os
8
+
9
+
10
+ class EvoformerAttnBuilder(CUDAOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_EVOFORMER_ATTN"
12
+ NAME = "evoformer_attn"
13
+
14
+ def __init__(self, name=None):
15
+ name = self.NAME if name is None else name
16
+ super().__init__(name=name)
17
+ self.cutlass_path = os.environ.get('CUTLASS_PATH')
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.ops.{self.NAME}_op'
21
+
22
+ def extra_ldflags(self):
23
+ if not self.is_rocm_pytorch():
24
+ return ['-lcurand']
25
+ else:
26
+ return []
27
+
28
+ def sources(self):
29
+ src_dir = 'csrc/deepspeed4science/evoformer_attn'
30
+ return [f'{src_dir}/attention.cpp', f'{src_dir}/attention_back.cu', f'{src_dir}/attention_cu.cu']
31
+
32
+ def nvcc_args(self):
33
+ args = super().nvcc_args()
34
+ try:
35
+ import torch
36
+ except ImportError:
37
+ self.warning("Please install torch if trying to pre-compile kernels")
38
+ return args
39
+ major = torch.cuda.get_device_properties(0).major #ignore-cuda
40
+ minor = torch.cuda.get_device_properties(0).minor #ignore-cuda
41
+ args.append(f"-DGPU_ARCH={major}{minor}")
42
+ return args
43
+
44
+ def is_compatible(self, verbose=True):
45
+ try:
46
+ import torch
47
+ except ImportError:
48
+ self.warning("Please install torch if trying to pre-compile kernels")
49
+ return False
50
+ if self.cutlass_path is None:
51
+ self.warning("Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH")
52
+ return False
53
+ with open(f'{self.cutlass_path}/CHANGELOG.md', 'r') as f:
54
+ if '3.1.0' not in f.read():
55
+ self.warning("Please use CUTLASS version >= 3.1.0")
56
+ return False
57
+ cuda_okay = True
58
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
59
+ sys_cuda_major, _ = installed_cuda_version()
60
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
61
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
62
+ if cuda_capability < 7:
63
+ self.warning("Please use a GPU with compute capability >= 7.0")
64
+ cuda_okay = False
65
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
66
+ self.warning("Please use CUDA 11+")
67
+ cuda_okay = False
68
+ return super().is_compatible(verbose) and cuda_okay
69
+
70
+ def include_paths(self):
71
+ includes = [f'{self.cutlass_path}/include', f'{self.cutlass_path}/tools/util/include']
72
+ return includes
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+ import sys
9
+
10
+
11
+ class FusedAdamBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = "DS_BUILD_FUSED_ADAM"
13
+ NAME = "fused_adam"
14
+
15
+ def __init__(self):
16
+ super().__init__(name=self.NAME)
17
+
18
+ def absolute_name(self):
19
+ return f'deepspeed.ops.adam.{self.NAME}_op'
20
+
21
+ def sources(self):
22
+ return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu']
23
+
24
+ def include_paths(self):
25
+ return ['csrc/includes', 'csrc/adam']
26
+
27
+ def cxx_args(self):
28
+ args = super().cxx_args()
29
+ return args + self.version_dependent_macros()
30
+
31
+ def nvcc_args(self):
32
+ nvcc_flags = ['-O3'] + self.version_dependent_macros()
33
+ if not self.is_rocm_pytorch():
34
+ nvcc_flags.extend(
35
+ ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
36
+ self.compute_capability_args())
37
+ return nvcc_flags
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lamb.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+ import sys
9
+
10
+
11
+ class FusedLambBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = 'DS_BUILD_FUSED_LAMB'
13
+ NAME = "fused_lamb"
14
+
15
+ def __init__(self):
16
+ super().__init__(name=self.NAME)
17
+
18
+ def absolute_name(self):
19
+ return f'deepspeed.ops.lamb.{self.NAME}_op'
20
+
21
+ def sources(self):
22
+ return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu']
23
+
24
+ def include_paths(self):
25
+ return ['csrc/includes']
26
+
27
+ def cxx_args(self):
28
+ args = super().cxx_args()
29
+ return args + self.version_dependent_macros()
30
+
31
+ def nvcc_args(self):
32
+ nvcc_flags = ['-O3'] + self.version_dependent_macros()
33
+ if self.is_rocm_pytorch():
34
+ ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
35
+ nvcc_flags += ['-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR]
36
+ else:
37
+ nvcc_flags.extend(
38
+ ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
39
+ self.compute_capability_args())
40
+ return nvcc_flags
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+ import sys
9
+
10
+
11
+ class FusedLionBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = "DS_BUILD_FUSED_LION"
13
+ NAME = "fused_lion"
14
+
15
+ def __init__(self):
16
+ super().__init__(name=self.NAME)
17
+
18
+ def absolute_name(self):
19
+ return f'deepspeed.ops.lion.{self.NAME}_op'
20
+
21
+ def sources(self):
22
+ return ['csrc/lion/fused_lion_frontend.cpp', 'csrc/lion/multi_tensor_lion.cu']
23
+
24
+ def include_paths(self):
25
+ return ['csrc/includes', 'csrc/lion']
26
+
27
+ def cxx_args(self):
28
+ args = super().cxx_args()
29
+ return args + self.version_dependent_macros()
30
+
31
+ def nvcc_args(self):
32
+ nvcc_flags = ['-O3'] + self.version_dependent_macros()
33
+ if not self.is_rocm_pytorch():
34
+ nvcc_flags.extend(
35
+ ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
36
+ self.compute_capability_args())
37
+ return nvcc_flags
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company
2
+ # Copyright (c) Microsoft Corporation.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # DeepSpeed Team
6
+
7
+ import os
8
+
9
+ try:
10
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
11
+ # if successful this also means we're doing a local install and not JIT compile path
12
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
13
+ from op_builder.builder import OpBuilder
14
+ except ImportError:
15
+ from deepspeed.ops.op_builder.builder import OpBuilder
16
+
17
+
18
+ class CPUOpBuilder(OpBuilder):
19
+
20
+ def builder(self):
21
+ from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
22
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
23
+ compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
24
+
25
+ cpp_ext = ExtensionBuilder(name=self.absolute_name(),
26
+ sources=self.strip_empty_entries(self.sources()),
27
+ include_dirs=include_dirs,
28
+ libraries=self.strip_empty_entries(self.libraries_args()),
29
+ extra_compile_args=compile_args)
30
+
31
+ return cpp_ext
32
+
33
+ def cxx_args(self):
34
+ return ['-O3', '-g', '-Wno-reorder']
35
+
36
+ def libraries_args(self):
37
+ return []
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company
2
+ # Copyright (c) Microsoft Corporation.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # DeepSpeed Team
6
+
7
+ from .builder import CPUOpBuilder
8
+
9
+
10
+ class FusedAdamBuilder(CPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_FUSED_ADAM"
12
+ NAME = "fused_adam"
13
+
14
+ def __init__(self):
15
+ super().__init__(name=self.NAME)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.adam.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp']
22
+
23
+ def cxx_args(self):
24
+ args = super().cxx_args()
25
+ args += ['-DENABLE_BFLOAT16']
26
+ return args
27
+
28
+ def include_paths(self):
29
+ return ['csrc/includes']
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CPUOpBuilder
7
+
8
+
9
+ class NotImplementedBuilder(CPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED"
11
+ NAME = "deepspeed_not_implemented"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.comm.{self.NAME}_op'
19
+
20
+ def load(self, verbose=True):
21
+ raise ValueError("This op had not been implemented on HPU backend.")
22
+
23
+ def sources(self):
24
+ return []
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+ from .builder import CUDAOpBuilder, installed_cuda_version
9
+
10
+
11
+ class InferenceCoreBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = "DS_BUILD_INFERENCE_CORE_OPS"
13
+ NAME = "inference_core_ops"
14
+
15
+ def __init__(self, name=None):
16
+ name = self.NAME if name is None else name
17
+ super().__init__(name=name)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.inference.v2.kernels{self.NAME}'
21
+
22
+ def is_compatible(self, verbose=True):
23
+ try:
24
+ import torch
25
+ except ImportError:
26
+ self.warning("Please install torch if trying to pre-compile inference kernels")
27
+ return False
28
+
29
+ cuda_okay = True
30
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
31
+ sys_cuda_major, _ = installed_cuda_version()
32
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
33
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
34
+ if cuda_capability < 6:
35
+ self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
36
+ cuda_okay = False
37
+ if cuda_capability >= 8:
38
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
39
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
40
+ cuda_okay = False
41
+ return super().is_compatible(verbose) and cuda_okay
42
+
43
+ def filter_ccs(self, ccs):
44
+ ccs_retained = []
45
+ ccs_pruned = []
46
+ for cc in ccs:
47
+ if int(cc[0]) >= 6:
48
+ ccs_retained.append(cc)
49
+ else:
50
+ ccs_pruned.append(cc)
51
+ if len(ccs_pruned) > 0:
52
+ self.warning(f"Filtered compute capabilities {ccs_pruned}")
53
+ return ccs_retained
54
+
55
+ def get_prefix(self):
56
+ ds_path = self.deepspeed_src_path("deepspeed")
57
+ return "deepspeed" if os.path.isdir(ds_path) else ".."
58
+
59
+ def sources(self):
60
+ import torch
61
+
62
+ sources = [
63
+ "inference/v2/kernels/core_ops/core_ops.cpp",
64
+ "inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp",
65
+ "inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu",
66
+ "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp",
67
+ "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu",
68
+ "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp",
69
+ "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu",
70
+ "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp",
71
+ "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu",
72
+ ]
73
+
74
+ # The source files with specific GPU architecture requirements.
75
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
76
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
77
+ if cuda_capability != 8:
78
+ self.warning("FP6 quantization kernel is only supported on Ampere architectures")
79
+ else:
80
+ sources.append("inference/v2/kernels/core_ops/cuda_linear/fp6_linear.cu")
81
+ sources.append("inference/v2/kernels/core_ops/cuda_linear/cuda_linear_kernels.cpp")
82
+
83
+ prefix = self.get_prefix()
84
+ sources = [os.path.join(prefix, src) for src in sources]
85
+ return sources
86
+
87
+ def extra_ldflags(self):
88
+ return []
89
+
90
+ def include_paths(self):
91
+ sources = [
92
+ 'inference/v2/kernels/core_ops/bias_activations',
93
+ 'inference/v2/kernels/core_ops/blas_kernels',
94
+ 'inference/v2/kernels/core_ops/cuda_layer_norm',
95
+ 'inference/v2/kernels/core_ops/cuda_rms_norm',
96
+ 'inference/v2/kernels/core_ops/gated_activations',
97
+ 'inference/v2/kernels/core_ops/cuda_linear',
98
+ 'inference/v2/kernels/includes',
99
+ ]
100
+
101
+ prefix = self.get_prefix()
102
+ sources = [os.path.join(prefix, src) for src in sources]
103
+
104
+ return sources
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_cutlass_builder.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ import os
6
+
7
+ from .builder import CUDAOpBuilder, installed_cuda_version
8
+
9
+
10
+ class InferenceCutlassBuilder(CUDAOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CUTLASS_OPS"
12
+ NAME = "cutlass_ops"
13
+
14
+ def __init__(self, name=None):
15
+ name = self.NAME if name is None else name
16
+ super().__init__(name=name)
17
+
18
+ def absolute_name(self):
19
+ return f'deepspeed.inference.v2.kernels.cutlass_ops.{self.NAME}'
20
+
21
+ def is_compatible(self, verbose=True):
22
+ try:
23
+ import torch
24
+ except ImportError:
25
+ self.warning("Please install torch if trying to pre-compile inference kernels")
26
+ return False
27
+
28
+ cuda_okay = True
29
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
30
+ sys_cuda_major, _ = installed_cuda_version()
31
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
32
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
33
+ if cuda_capability < 6:
34
+ self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
35
+ cuda_okay = False
36
+ if cuda_capability >= 8:
37
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
38
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
39
+ cuda_okay = False
40
+ return super().is_compatible(verbose) and cuda_okay
41
+
42
+ def filter_ccs(self, ccs):
43
+ ccs_retained = []
44
+ ccs_pruned = []
45
+ for cc in ccs:
46
+ if int(cc[0]) >= 8:
47
+ # Only support Ampere and newer
48
+ ccs_retained.append(cc)
49
+ else:
50
+ ccs_pruned.append(cc)
51
+ if len(ccs_pruned) > 0:
52
+ self.warning(f"Filtered compute capabilities {ccs_pruned}")
53
+ return ccs_retained
54
+
55
+ def get_prefix(self):
56
+ ds_path = self.deepspeed_src_path("deepspeed")
57
+ return "deepspeed" if os.path.isdir(ds_path) else ".."
58
+
59
+ def sources(self):
60
+ sources = [
61
+ "inference/v2/kernels/cutlass_ops/cutlass_ops.cpp",
62
+ "inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu",
63
+ "inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu",
64
+ ]
65
+
66
+ prefix = self.get_prefix()
67
+ sources = [os.path.join(prefix, src) for src in sources]
68
+ return sources
69
+
70
+ def extra_ldflags(self):
71
+ import dskernels
72
+ lib_path = dskernels.library_path()
73
+ prefix = self.get_prefix()
74
+ lib_path = os.path.join(prefix, lib_path)
75
+ lib_path = self.deepspeed_src_path(lib_path)
76
+
77
+ args = [f'-L{lib_path}', '-ldeepspeedft']
78
+ if self.jit_load:
79
+ args.append(f'-Wl,-rpath,{lib_path}')
80
+ return args
81
+
82
+ def include_paths(self):
83
+ sources = [
84
+ 'inference/v2/kernels/includes',
85
+ 'inference/v2/kernels/cutlass_ops/mixed_gemm',
86
+ 'inference/v2/kernels/cutlass_ops/moe_gemm',
87
+ 'inference/v2/kernels/cutlass_ops/shared_resources/',
88
+ ]
89
+
90
+ prefix = self.get_prefix()
91
+ sources = [os.path.join(prefix, src) for src in sources]
92
+ return sources
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/quantizer.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+
9
+ class QuantizerBuilder(CUDAOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_QUANTIZER"
11
+ NAME = "quantizer"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.quantizer.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ return [
22
+ 'csrc/quantization/pt_binding.cpp',
23
+ 'csrc/quantization/fake_quantizer.cu',
24
+ 'csrc/quantization/quantize.cu',
25
+ 'csrc/quantization/quantize_intX.cu',
26
+ 'csrc/quantization/dequantize.cu',
27
+ 'csrc/quantization/swizzled_quantize.cu',
28
+ 'csrc/quantization/quant_reduce.cu',
29
+ ]
30
+
31
+ def include_paths(self):
32
+ return ['csrc/includes']
33
+
34
+ def extra_ldflags(self):
35
+ if not self.is_rocm_pytorch():
36
+ return ['-lcurand']
37
+ else:
38
+ return []
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_ops.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+ from .builder import CUDAOpBuilder, installed_cuda_version
9
+
10
+
11
+ class RaggedOpsBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = "DS_BUILD_RAGGED_DEVICE_OPS"
13
+ NAME = "ragged_device_ops"
14
+
15
+ def __init__(self, name=None):
16
+ name = self.NAME if name is None else name
17
+ super().__init__(name=name)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.inference.v2.kernels.ragged_ops.{self.NAME}'
21
+
22
+ def is_compatible(self, verbose=True):
23
+ try:
24
+ import torch
25
+ except ImportError:
26
+ self.warning("Please install torch if trying to pre-compile inference kernels")
27
+ return False
28
+
29
+ cuda_okay = True
30
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
31
+ sys_cuda_major, _ = installed_cuda_version()
32
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
33
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
34
+ if cuda_capability < 6:
35
+ self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
36
+ cuda_okay = False
37
+ if cuda_capability >= 8:
38
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
39
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
40
+ cuda_okay = False
41
+ return super().is_compatible(verbose) and cuda_okay
42
+
43
+ def filter_ccs(self, ccs):
44
+ ccs_retained = []
45
+ ccs_pruned = []
46
+ for cc in ccs:
47
+ if int(cc[0]) >= 8:
48
+ # Blocked flash has a dependency on Ampere + newer
49
+ ccs_retained.append(cc)
50
+ else:
51
+ ccs_pruned.append(cc)
52
+ if len(ccs_pruned) > 0:
53
+ self.warning(f"Filtered compute capabilities {ccs_pruned}")
54
+ return ccs_retained
55
+
56
+ def get_prefix(self):
57
+ ds_path = self.deepspeed_src_path("deepspeed")
58
+ return "deepspeed" if os.path.isdir(ds_path) else ".."
59
+
60
+ def sources(self):
61
+ sources = [
62
+ "inference/v2/kernels/ragged_ops/ragged_ops.cpp",
63
+ "inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp",
64
+ "inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp",
65
+ "inference/v2/kernels/ragged_ops/embed/embed.cpp",
66
+ "inference/v2/kernels/ragged_ops/embed/embed_cuda.cu",
67
+ "inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp",
68
+ "inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu",
69
+ "inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp",
70
+ "inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu",
71
+ "inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp",
72
+ "inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu",
73
+ "inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp",
74
+ "inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu",
75
+ "inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp",
76
+ "inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp",
77
+ "inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu",
78
+ ]
79
+
80
+ prefix = self.get_prefix()
81
+ sources = [os.path.join(prefix, src) for src in sources]
82
+ return sources
83
+
84
+ def extra_ldflags(self):
85
+ import dskernels
86
+ lib_path = dskernels.library_path()
87
+
88
+ prefix = self.get_prefix()
89
+ lib_path = os.path.join(prefix, lib_path)
90
+ lib_path = self.deepspeed_src_path(lib_path)
91
+
92
+ args = [f'-L{lib_path}', '-lblockedflash']
93
+ if self.jit_load:
94
+ args.append(f'-Wl,-rpath,{lib_path}')
95
+ return args
96
+
97
+ def include_paths(self):
98
+ sources = [
99
+ 'inference/v2/kernels/includes',
100
+ 'inference/v2/kernels/ragged_ops',
101
+ 'inference/v2/kernels/ragged_ops/atom_builder',
102
+ 'inference/v2/kernels/ragged_ops/blocked_flash',
103
+ 'inference/v2/kernels/ragged_ops/embed',
104
+ 'inference/v2/kernels/ragged_ops/includes',
105
+ 'inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary',
106
+ 'inference/v2/kernels/ragged_ops/logits_gather',
107
+ 'inference/v2/kernels/ragged_ops/moe_gather',
108
+ 'inference/v2/kernels/ragged_ops/moe_scatter',
109
+ 'inference/v2/kernels/ragged_ops/ragged_helpers',
110
+ 'inference/v2/kernels/ragged_ops/top_k_gating',
111
+ ]
112
+
113
+ prefix = self.get_prefix()
114
+ sources = [os.path.join(prefix, src) for src in sources]
115
+ return sources
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+ from .builder import CUDAOpBuilder, installed_cuda_version
9
+
10
+
11
+ class RaggedUtilsBuilder(CUDAOpBuilder):
12
+ BUILD_VAR = "DS_BUILD_RAGGED_OPS"
13
+ NAME = "ragged_ops"
14
+
15
+ def __init__(self, name=None):
16
+ name = self.NAME if name is None else name
17
+ super().__init__(name=name)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.inference.v2.{self.NAME}'
21
+
22
+ def is_compatible(self, verbose=True):
23
+ try:
24
+ import torch
25
+ except ImportError:
26
+ self.warning("Please install torch if trying to pre-compile inference kernels")
27
+ return False
28
+
29
+ cuda_okay = True
30
+ if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
31
+ sys_cuda_major, _ = installed_cuda_version()
32
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
33
+ cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
34
+ if cuda_capability < 6:
35
+ self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
36
+ cuda_okay = False
37
+ if cuda_capability >= 8:
38
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
39
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
40
+ cuda_okay = False
41
+ return super().is_compatible(verbose) and cuda_okay
42
+
43
+ def filter_ccs(self, ccs):
44
+ ccs_retained = []
45
+ ccs_pruned = []
46
+ for cc in ccs:
47
+ if int(cc[0]) >= 6:
48
+ ccs_retained.append(cc)
49
+ else:
50
+ ccs_pruned.append(cc)
51
+ if len(ccs_pruned) > 0:
52
+ self.warning(f"Filtered compute capabilities {ccs_pruned}")
53
+ return ccs_retained
54
+
55
+ def get_prefix(self):
56
+ ds_path = self.deepspeed_src_path("deepspeed")
57
+ return "deepspeed" if os.path.isdir(ds_path) else ".."
58
+
59
+ def sources(self):
60
+ sources = [
61
+ "inference/v2/ragged/csrc/fast_host_buffer.cu",
62
+ "inference/v2/ragged/csrc/ragged_ops.cpp",
63
+ ]
64
+
65
+ prefix = self.get_prefix()
66
+ sources = [os.path.join(prefix, src) for src in sources]
67
+ return sources
68
+
69
+ def extra_ldflags(self):
70
+ return []
71
+
72
+ def include_paths(self):
73
+ include_dirs = ['inference/v2/ragged/includes', 'inference/v2/kernels/includes']
74
+ prefix = self.get_prefix()
75
+ includes = [os.path.join(prefix, include_dir) for include_dir in include_dirs]
76
+
77
+ return includes
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+
9
+ class RandomLTDBuilder(CUDAOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_RANDOM_LTD"
11
+ NAME = "random_ltd"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.{self.NAME}_op'
19
+
20
+ def extra_ldflags(self):
21
+ if not self.is_rocm_pytorch():
22
+ return ['-lcurand']
23
+ else:
24
+ return []
25
+
26
+ def sources(self):
27
+ return [
28
+ 'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu',
29
+ 'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu'
30
+ ]
31
+
32
+ def include_paths(self):
33
+ includes = ['csrc/includes']
34
+ return includes
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/sparse_attn.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import OpBuilder
7
+
8
+ try:
9
+ from packaging import version as pkg_version
10
+ except ImportError:
11
+ pkg_version = None
12
+
13
+
14
+ class SparseAttnBuilder(OpBuilder):
15
+ BUILD_VAR = "DS_BUILD_SPARSE_ATTN"
16
+ NAME = "sparse_attn"
17
+
18
+ def __init__(self):
19
+ super().__init__(name=self.NAME)
20
+
21
+ def absolute_name(self):
22
+ return f'deepspeed.ops.sparse_attention.{self.NAME}_op'
23
+
24
+ def sources(self):
25
+ return ['csrc/sparse_attention/utils.cpp']
26
+
27
+ def cxx_args(self):
28
+ return ['-O2', '-fopenmp']
29
+
30
+ def is_compatible(self, verbose=True):
31
+ # Check to see if llvm and cmake are installed since they are dependencies
32
+ #required_commands = ['llvm-config|llvm-config-9', 'cmake']
33
+ #command_status = list(map(self.command_exists, required_commands))
34
+ #deps_compatible = all(command_status)
35
+
36
+ if self.is_rocm_pytorch():
37
+ self.warning(f'{self.NAME} is not compatible with ROCM')
38
+ return False
39
+
40
+ try:
41
+ import torch
42
+ except ImportError:
43
+ self.warning(f"unable to import torch, please install it first")
44
+ return False
45
+
46
+ # torch-cpu will not have a cuda version
47
+ if torch.version.cuda is None:
48
+ cuda_compatible = False
49
+ self.warning(f"{self.NAME} cuda is not available from torch")
50
+ else:
51
+ major, minor = torch.version.cuda.split('.')[:2]
52
+ cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11)
53
+ if not cuda_compatible:
54
+ self.warning(f"{self.NAME} requires CUDA version 10.1+")
55
+
56
+ TORCH_MAJOR = int(torch.__version__.split('.')[0])
57
+ TORCH_MINOR = int(torch.__version__.split('.')[1])
58
+ torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5)
59
+ if not torch_compatible:
60
+ self.warning(
61
+ f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}')
62
+
63
+ try:
64
+ import triton
65
+ except ImportError:
66
+ # auto-install of triton is broken on some systems, reverting to manual install for now
67
+ # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710
68
+ self.warning(f"please install triton==1.0.0 if you want to use sparse attention")
69
+ return False
70
+
71
+ if pkg_version:
72
+ installed_triton = pkg_version.parse(triton.__version__)
73
+ triton_mismatch = installed_triton != pkg_version.parse("1.0.0")
74
+ else:
75
+ installed_triton = triton.__version__
76
+ triton_mismatch = installed_triton != "1.0.0"
77
+
78
+ if triton_mismatch:
79
+ self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible")
80
+ return False
81
+
82
+ return super().is_compatible(verbose) and torch_compatible and cuda_compatible
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder, installed_cuda_version
7
+
8
+
9
+ class SpatialInferenceBuilder(CUDAOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE"
11
+ NAME = "spatial_inference"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.spatial.{self.NAME}_op'
19
+
20
+ def is_compatible(self, verbose=True):
21
+ try:
22
+ import torch
23
+ except ImportError:
24
+ self.warning("Please install torch if trying to pre-compile inference kernels")
25
+ return False
26
+
27
+ cuda_okay = True
28
+ if not self.is_rocm_pytorch() and torch.cuda.is_available():
29
+ sys_cuda_major, _ = installed_cuda_version()
30
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
31
+ cuda_capability = torch.cuda.get_device_properties(0).major
32
+ if cuda_capability >= 8:
33
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
34
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
35
+ cuda_okay = False
36
+ return super().is_compatible(verbose) and cuda_okay
37
+
38
+ def sources(self):
39
+ return [
40
+ 'csrc/spatial/csrc/opt_bias_add.cu',
41
+ 'csrc/spatial/csrc/pt_binding.cpp',
42
+ ]
43
+
44
+ def include_paths(self):
45
+ return ['csrc/spatial/includes', 'csrc/includes']
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .transformer import TransformerBuilder
7
+
8
+
9
+ class StochasticTransformerBuilder(TransformerBuilder):
10
+ BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER"
11
+ NAME = "stochastic_transformer"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.transformer.{self.NAME}_op'
18
+
19
+ def nvcc_args(self):
20
+ args = super().nvcc_args()
21
+ args.append('-D__STOCHASTIC_MODE__')
22
+ return args
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder
7
+
8
+
9
+ class TransformerBuilder(CUDAOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_TRANSFORMER"
11
+ NAME = "transformer"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.transformer.{self.NAME}_op'
19
+
20
+ def extra_ldflags(self):
21
+ if not self.is_rocm_pytorch():
22
+ return ['-lcurand']
23
+ else:
24
+ return []
25
+
26
+ def sources(self):
27
+ return [
28
+ 'csrc/transformer/ds_transformer_cuda.cpp', 'csrc/transformer/cublas_wrappers.cu',
29
+ 'csrc/transformer/transform_kernels.cu', 'csrc/transformer/gelu_kernels.cu',
30
+ 'csrc/transformer/dropout_kernels.cu', 'csrc/transformer/normalize_kernels.cu',
31
+ 'csrc/transformer/softmax_kernels.cu', 'csrc/transformer/general_kernels.cu'
32
+ ]
33
+
34
+ def include_paths(self):
35
+ includes = ['csrc/includes']
36
+ return includes
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer_inference.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CUDAOpBuilder, installed_cuda_version
7
+
8
+
9
+ class InferenceBuilder(CUDAOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
11
+ NAME = "transformer_inference"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
19
+
20
+ def is_compatible(self, verbose=True):
21
+ try:
22
+ import torch
23
+ except ImportError:
24
+ self.warning("Please install torch if trying to pre-compile inference kernels")
25
+ return False
26
+
27
+ cuda_okay = True
28
+ if not self.is_rocm_pytorch() and torch.cuda.is_available():
29
+ sys_cuda_major, _ = installed_cuda_version()
30
+ torch_cuda_major = int(torch.version.cuda.split('.')[0])
31
+ cuda_capability = torch.cuda.get_device_properties(0).major
32
+ if cuda_capability < 6:
33
+ self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
34
+ cuda_okay = False
35
+ if cuda_capability >= 8:
36
+ if torch_cuda_major < 11 or sys_cuda_major < 11:
37
+ self.warning("On Ampere and higher architectures please use CUDA 11+")
38
+ cuda_okay = False
39
+ return super().is_compatible(verbose) and cuda_okay
40
+
41
+ def filter_ccs(self, ccs):
42
+ ccs_retained = []
43
+ ccs_pruned = []
44
+ for cc in ccs:
45
+ if int(cc[0]) >= 6:
46
+ ccs_retained.append(cc)
47
+ else:
48
+ ccs_pruned.append(cc)
49
+ if len(ccs_pruned) > 0:
50
+ self.warning(f"Filtered compute capabilities {ccs_pruned}")
51
+ return ccs_retained
52
+
53
+ def sources(self):
54
+ return [
55
+ 'csrc/transformer/inference/csrc/pt_binding.cpp',
56
+ 'csrc/transformer/inference/csrc/gelu.cu',
57
+ 'csrc/transformer/inference/csrc/relu.cu',
58
+ 'csrc/transformer/inference/csrc/layer_norm.cu',
59
+ 'csrc/transformer/inference/csrc/rms_norm.cu',
60
+ 'csrc/transformer/inference/csrc/softmax.cu',
61
+ 'csrc/transformer/inference/csrc/dequantize.cu',
62
+ 'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
63
+ 'csrc/transformer/inference/csrc/transform.cu',
64
+ 'csrc/transformer/inference/csrc/pointwise_ops.cu',
65
+ ]
66
+
67
+ def extra_ldflags(self):
68
+ if not self.is_rocm_pytorch():
69
+ return ['-lcurand']
70
+ else:
71
+ return []
72
+
73
+ def include_paths(self):
74
+ return ['csrc/transformer/inference/includes', 'csrc/includes']
parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (308 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/dropping_utils.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/dropping_utils.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from deepspeed.ops.op_builder import RandomLTDBuilder
9
+ """
10
+ Returns:
11
+ sampled_indices: [layers, batch_size, reserved_length]
12
+ new_mask: [batch_size, 1, reserved_length, reserved_length]
13
+ """
14
+
15
+ random_ltd_module = None
16
+
17
+
18
+ def gpt_sample_tokens(reserved_length: int,
19
+ seq_length: int,
20
+ batch_size: int,
21
+ layers: int = 1,
22
+ device: str = 'cpu',
23
+ attn_mask: torch.Tensor = None):
24
+
25
+ prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
26
+ sampled_indices = torch.multinomial(prob_dist, reserved_length)
27
+
28
+ sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
29
+ global random_ltd_module
30
+ if random_ltd_module is None:
31
+ random_ltd_module = RandomLTDBuilder().load()
32
+ sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
33
+
34
+ # Not certain the optimized kernel is actually better here, cause it kind of screws
35
+ # with alignment right if the sequence length is not divisible by like 16
36
+ # new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length)
37
+ if attn_mask is not None:
38
+ new_mask = attn_mask[:, :, :reserved_length, :reserved_length]
39
+ else:
40
+ new_mask = None
41
+
42
+ return sampled_indices, new_mask
43
+
44
+
45
+ """
46
+ Returns:
47
+ sampled_indices: [layers, batch_size, reserved_length]
48
+ new_mask: [layers, batch_size, 1, reserved_length, reserved_length]
49
+ """
50
+
51
+
52
+ def bert_sample_tokens(reserved_length: int,
53
+ seq_length: int,
54
+ batch_size: int,
55
+ layers: int = 1,
56
+ device: str = 'cpu',
57
+ attn_mask: torch.Tensor = None):
58
+ assert attn_mask is not None
59
+ prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
60
+ sampled_indices = torch.multinomial(prob_dist, reserved_length)
61
+
62
+ sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
63
+ global random_ltd_module
64
+ if random_ltd_module is None:
65
+ random_ltd_module = RandomLTDBuilder().load()
66
+
67
+ sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
68
+ dtype = sampled_indices.dtype
69
+
70
+ sampled_indices = sampled_indices.to(torch.long)
71
+ new_mask = []
72
+ for l in range(layers):
73
+ tmp_mask_list = []
74
+ for i in range(batch_size):
75
+ mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :]
76
+ tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]])
77
+ new_mask.append(torch.cat(tmp_mask_list, dim=0))
78
+
79
+ return sampled_indices.to(dtype), new_mask
80
+
81
+
82
+ class GatherTokens(torch.autograd.Function):
83
+
84
+ @staticmethod
85
+ def forward(ctx, activations: torch.Tensor, sorted_indices: torch.Tensor, batch_first: bool):
86
+ global random_ltd_module
87
+ if random_ltd_module is None:
88
+ random_ltd_module = RandomLTDBuilder().load()
89
+ ctx.save_for_backward(activations, sorted_indices)
90
+ ctx.batch_first = batch_first
91
+ return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first)
92
+
93
+ @staticmethod
94
+ def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor):
95
+
96
+ g_gradients = g_gradients.contiguous()
97
+ global random_ltd_module
98
+ if random_ltd_module is None:
99
+ random_ltd_module = RandomLTDBuilder().load()
100
+ activations, sorted_indices = ctx.saved_tensors
101
+ batch_first = ctx.batch_first
102
+
103
+ return random_ltd_module.token_scatter_(a_gradients, g_gradients, sorted_indices, batch_first), None, None
104
+
105
+
106
+ class ScatterTokens(torch.autograd.Function):
107
+
108
+ @staticmethod
109
+ def forward(ctx, all_activations: torch.Tensor, layer_activations: torch.Tensor, sorted_indices: torch.Tensor,
110
+ batch_first: bool):
111
+ global random_ltd_module
112
+ if random_ltd_module is None:
113
+ random_ltd_module = RandomLTDBuilder().load()
114
+ scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), layer_activations, sorted_indices,
115
+ batch_first)
116
+
117
+ ctx.save_for_backward(sorted_indices)
118
+ ctx.batch_first = batch_first
119
+ return scatter_results
120
+
121
+ @staticmethod
122
+ def backward(ctx, out_gradients: torch.Tensor):
123
+
124
+ out_gradients = out_gradients.contiguous()
125
+ global random_ltd_module
126
+ if random_ltd_module is None:
127
+ random_ltd_module = RandomLTDBuilder().load()
128
+ sorted_indices, = ctx.saved_tensors
129
+ batch_first = ctx.batch_first
130
+
131
+ ret_val = random_ltd_module.token_gather(out_gradients, sorted_indices, batch_first)
132
+ return out_gradients, ret_val, None, None
parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (555 Bytes). View file