diff --git a/.gitattributes b/.gitattributes index fe68a2c758ba4d2ee020cffcb9592a5aa202c244..656495e8fbfd324e532ec9ba4063690300f85473 100644 --- a/.gitattributes +++ b/.gitattributes @@ -180,3 +180,4 @@ parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310 parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text +parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/bin/sqlite3 b/parrot/bin/sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..a07b3b3a13a0c9f8c274a6ef845d679e3530bbe4 --- /dev/null +++ b/parrot/bin/sqlite3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c98c978efe537f94f522a16697b0ca86b384ca5e7a7bbc3eece38923194046 +size 1777144 diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf301d8213ca9b6fa6a2e6ba069196d01538f936 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a29bb9447d015d0992c18624dba41a15aa838866 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_adam import DeepSpeedCPUAdam +from .fused_adam import FusedAdam diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b20343c078d8a0af1553b3306aa4ab5f98c73773 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de626de7fe0366de5118380551cf7a937b2854da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6db5694221d245c616a32c13d2b11829927f5dc6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ac6ac355cf6009664e854e7957762b38777cfc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..10b8c15f970b8b7dfd85db35eb61793faabd43e2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from cpuinfo import get_cpu_info +from deepspeed.utils import logger +from deepspeed.utils.logging import should_log_le +from deepspeed.ops.op_builder import CPUAdamBuilder + + +class DeepSpeedCPUAdam(torch.optim.Optimizer): + optimizer_id = 0 + + def __init__(self, + model_params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + adamw_mode=True, + fp32_optimizer_states=True): + """Fast vectorized implementation of two variations of Adam optimizer on CPU: + + * Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980); + * AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101) + + DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W). + In order to apply this optimizer, the model requires to have its master parameter (in FP32) + reside on the CPU memory. + + To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers + the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory, + with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize + the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial + (https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology. + + For calling step function, there are two options available: (1) update optimizer's states and (2) update + optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second + option can bring 30% higher throughput than the doing the copy separately using option one. + + + .. note:: + We recommend using our `config + `_ + to allow :meth:`deepspeed.initialize` to build this optimizer + for you. + + + Arguments: + model_params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in DeepSpeed CPUAdam! + adamw_mode: select between Adam and AdamW implementations (default: AdamW) + fp32_optimizer_states: creates momentum and variance in full precision regardless of + the precision of the parameters (default: True) + """ + + default_args = dict(lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + bias_correction=bias_correction, + amsgrad=amsgrad) + super(DeepSpeedCPUAdam, self).__init__(model_params, default_args) + + cpu_info = get_cpu_info() + self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown" + if "amd" in self.cpu_vendor: + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + if p.dtype == torch.half: + logger.warning("FP16 params for CPUAdam may not work on AMD CPUs") + break + else: + continue + break + + self.opt_id = DeepSpeedCPUAdam.optimizer_id + DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1 + self.adam_w_mode = adamw_mode + self.fp32_optimizer_states = fp32_optimizer_states + self.ds_opt_adam = CPUAdamBuilder().load() + + self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, + should_log_le("info")) + + def __del__(self): + # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize + # is used multiple times in the same process (notebook or pytest worker) + self.ds_opt_adam.destroy_adam(self.opt_id) + + def __setstate__(self, state): + super(DeepSpeedCPUAdam, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None, fp16_param_groups=None): + """Update the model parameters. + + .. note:: + This method will be called internally by ZeRO-Offload. DeepSpeed + users should still use ``engine.step()`` as shown in the + `Getting Started + `_ guide. + + Args: + closure (callable, optional): closure to compute the loss. + Defaults to ``None``. + fp16_param_groups: FP16 GPU parameters to update. Performing the + copy here reduces communication time. Defaults to ``None``. + + Returns: + loss: if ``closure`` is provided. Otherwise ``None``. + """ + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + # intended device for step + device = torch.device('cpu') + + # converting the fp16 params to a group of parameter + if type(fp16_param_groups) is list: + if type(fp16_param_groups[0]) is not list: + fp16_param_groups = [fp16_param_groups] + elif fp16_param_groups is not None: + fp16_param_groups = [[fp16_param_groups]] + + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + + if p.grad is None: + continue + + assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \ + "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config." + + state = self.state[p] + # State initialization + if len(state) == 0: + #print(f'group {group_id} param {param_id} = {p.numel()}') + state['step'] = 0 + + #use full precision by default unless self.fp32_optimizer_states is off + state_dtype = torch.float if self.fp32_optimizer_states else p.dtype + + # gradient momentums + state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + # gradient variances + state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + + state['step'] += 1 + beta1, beta2 = group['betas'] + + if fp16_param_groups is not None: + self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2, + group['eps'], group['weight_decay'], group['bias_correction'], + p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], + fp16_param_groups[group_id][param_id].data) + else: + self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], + group['weight_decay'], group['bias_correction'], p.data, p.grad.data, + state['exp_avg'], state['exp_avg_sq']) + return loss diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..53f859e9cc87bde8f16760a4c23394ea1207af0c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4 +""" + +import torch +from .multi_tensor_apply import MultiTensorApply + +multi_tensor_applier = MultiTensorApply(2048 * 32) +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder + + +class FusedAdam(torch.optim.Optimizer): + """Implements Adam algorithm. + + Currently GPU-only. Requires Apex to be installed via + ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``. + + This version of fused Adam implements 2 fusions. + + * Fusion of the Adam update's elementwise operations + * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. + + :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, + or ``torch.optim.Adam`` with ``adam_w_mode=False``:: + + opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....) + ... + opt.step() + + :class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp, + you may choose any ``opt_level``:: + + opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....) + model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2") + ... + opt.step() + + In general, ``opt_level="O1"`` is recommended. + + + .. warning:: + A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments + are now deprecated and unnecessary. + + Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in FusedAdam! + adam_w_mode (boolean, optional): Apply L2 regularization or weight decay + True for decoupled weight decay(also known as AdamW) (default: True) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + + .. _Adam - A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + adam_w_mode=True, + weight_decay=0., + amsgrad=False, + set_grad_none=True): + + if amsgrad: + raise RuntimeError('FusedAdam does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay) + super(FusedAdam, self).__init__(params, defaults) + self.adam_w_mode = 1 if adam_w_mode else 0 + self.set_grad_none = set_grad_none + + fused_adam_cuda = FusedAdamBuilder().load() + # Skip buffer + self._dummy_overflow_buf = get_accelerator().IntTensor([0]) + self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedAdam, self).zero_grad() + + def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes. + """ + if any(p is not None for p in [grads, output_params, scale, grad_norms]): + raise RuntimeError( + 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.' + ) + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + if len(group['params']) == 0: + continue + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' not in group: + group['step'] = 0 + + # create lists for multi-tensor apply + g_16, p_16, m_16, v_16 = [], [], [], [] + g_bf, p_bf, m_bf, v_bf = [], [], [], [] + g_32, p_32, m_32, v_32 = [], [], [], [] + + for p in group['params']: + if p.grad is None: + continue + if p.grad.data.is_sparse: + raise RuntimeError( + 'FusedAdam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + # State initialization + if len(state) == 0: + # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately. + # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time. + # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists. + state['step'] = group.get('step', 0) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if p.dtype == torch.float16: + g_16.append(p.grad.data) + p_16.append(p.data) + m_16.append(state['exp_avg']) + v_16.append(state['exp_avg_sq']) + elif p.dtype == torch.bfloat16: + g_bf.append(p.grad) + p_bf.append(p) + m_bf.append(state['exp_avg']) + v_bf.append(state['exp_avg_sq']) + elif p.dtype == torch.float32: + g_32.append(p.grad.data) + p_32.append(p.data) + m_32.append(state['exp_avg']) + v_32.append(state['exp_avg_sq']) + else: + raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.') + + if len(g_16) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + if len(g_bf) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + if len(g_32) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + return loss diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba228505cef747eea4fec62f3e68707fa4daa0c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from NVIDIA/apex, commit a109f85 +""" + + +class MultiTensorApply(object): + + def __init__(self, chunk_size): + self.chunk_size = chunk_size + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afe48159933c75045e9445a577d545b7ae260073 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +import os +import pkgutil +import importlib + +from .builder import get_default_compute_capabilities, OpBuilder + +# Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder +__deepspeed__ = True + +# List of all available op builders from deepspeed op_builder +try: + import deepspeed.ops.op_builder # noqa: F401 # type: ignore + op_builder_dir = "deepspeed.ops.op_builder" +except ImportError: + op_builder_dir = "op_builder" + +__op_builders__ = [] + +this_module = sys.modules[__name__] + + +def builder_closure(member_name): + if op_builder_dir == "op_builder": + # during installation time cannot get builder due to torch not installed, + # return closure instead + def _builder(): + from deepspeed.accelerator import get_accelerator + builder = get_accelerator().create_op_builder(member_name) + return builder + + return _builder + else: + # during runtime, return op builder class directly + from deepspeed.accelerator import get_accelerator + builder = get_accelerator().get_op_builder(member_name) + return builder + + +# reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator +for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]): + if module_name != 'all_ops' and module_name != 'builder': + module = importlib.import_module(f".{module_name}", package=op_builder_dir) + for member_name in module.__dir__(): + if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder": + # assign builder name to variable with same name + # the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder" + this_module.__dict__[member_name] = builder_closure(member_name) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e048b20fb8d3d2563ad8d7cb975c8bdc0647f65 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dd7467133bea9c2802116b33bdc5e211b84a150 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/all_ops.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/all_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9c41f35eaf1b8c94f5d313e8f8b381bed48536f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/all_ops.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import pkgutil +import importlib +try: + # during installation time accelerator is visible, otherwise return deepspeed.accelerator + from accelerator import get_accelerator +except ImportError: + from deepspeed.accelerator import get_accelerator + +# List of all available ops + +# reflect all builder names into __op_builders__ +op_builder_dir = get_accelerator().op_builder_dir() +op_builder_module = importlib.import_module(op_builder_dir) +__op_builders__ = [] + +for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]): + # avoid self references + if module_name != 'all_ops' and module_name != 'builder': + module = importlib.import_module("{}.{}".format(op_builder_dir, module_name)) + for member_name in module.__dir__(): + if member_name.endswith('Builder'): + # append builder to __op_builders__ list + builder = get_accelerator().create_op_builder(member_name) + __op_builders__.append(builder) + +ALL_OPS = {op.name: op for op in __op_builders__ if op is not None} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..b55c821910b9ffad0254f0e5104eb066252c446e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import OpBuilder + + +class AsyncIOBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + return ['csrc/aio/py_lib', 'csrc/aio/common'] + + def cxx_args(self): + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + return ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..dd77f967cc60a1def56d9af532f3c3cadb1f32c6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py @@ -0,0 +1,774 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import sys +import time +import importlib +from pathlib import Path +import subprocess +import shlex +import shutil +import tempfile +import distutils.ccompiler +import distutils.log +import distutils.sysconfig +from distutils.errors import CompileError, LinkError +from abc import ABC, abstractmethod +from typing import List + +YELLOW = '\033[93m' +END = '\033[0m' +WARNING = f"{YELLOW} [WARNING] {END}" + +DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions" +DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0" + +try: + import torch +except ImportError: + print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.") +else: + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + + +class MissingCUDAException(Exception): + pass + + +class CUDAMismatchException(Exception): + pass + + +def installed_cuda_version(name=""): + import torch.utils.cpp_extension + cuda_home = torch.utils.cpp_extension.CUDA_HOME + if cuda_home is None: + raise MissingCUDAException("CUDA_HOME does not exist, unable to compile CUDA op(s)") + # Ensure there is not a cuda version mismatch between torch and nvcc compiler + output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True) + output_split = output.split() + release_idx = output_split.index("release") + release = output_split[release_idx + 1].replace(',', '').split(".") + # Ignore patch versions, only look at major + minor + cuda_major, cuda_minor = release[:2] + return int(cuda_major), int(cuda_minor) + + +def get_default_compute_capabilities(): + compute_caps = DEFAULT_COMPUTE_CAPABILITIES + import torch.utils.cpp_extension + if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11: + if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0: + # Special treatment of CUDA 11.0 because compute_86 is not supported. + compute_caps += ";8.0" + else: + compute_caps += ";8.0;8.6" + return compute_caps + + +# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used +# to build deepspeed and system-wide installed cuda 11.2 +cuda_minor_mismatch_ok = { + 10: ["10.0", "10.1", "10.2"], + 11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"], + 12: ["12.0", "12.1", "12.2", "12.3"], +} + + +def assert_no_cuda_mismatch(name=""): + cuda_major, cuda_minor = installed_cuda_version(name) + sys_cuda_version = f'{cuda_major}.{cuda_minor}' + torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) + # This is a show-stopping error, should probably not proceed past this + if sys_cuda_version != torch_cuda_version: + if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major] + and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]): + print(f"Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda} " + "but since the APIs are compatible, accepting this combination") + return True + elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1": + print( + f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda}." + "Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior." + ) + return True + raise CUDAMismatchException( + f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda}, unable to compile " + "cuda/cpp extensions without a matching cuda version.") + return True + + +class OpBuilder(ABC): + _rocm_version = None + _is_rocm_pytorch = None + _is_sycl_enabled = None + _loaded_ops = {} + + def __init__(self, name): + self.name = name + self.jit_mode = False + self.build_for_cpu = False + self.enable_bf16 = False + self.error_log = None + + @abstractmethod + def absolute_name(self): + ''' + Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam + will be installed as something like: deepspeed/ops/adam/cpu_adam.so + ''' + pass + + @abstractmethod + def sources(self): + ''' + Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) + ''' + pass + + def hipify_extension(self): + pass + + def sycl_extension(self): + pass + + @staticmethod + def validate_torch_version(torch_info): + install_torch_version = torch_info['version'] + current_torch_version = ".".join(torch.__version__.split('.')[:2]) + if install_torch_version != current_torch_version: + raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install torch version={install_torch_version}, " + f"Runtime torch version={current_torch_version}") + + @staticmethod + def validate_torch_op_version(torch_info): + if not OpBuilder.is_rocm_pytorch(): + current_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) + install_cuda_version = torch_info['cuda_version'] + if install_cuda_version != current_cuda_version: + raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install CUDA version={install_cuda_version}, " + f"Runtime CUDA version={current_cuda_version}") + else: + current_hip_version = ".".join(torch.version.hip.split('.')[:2]) + install_hip_version = torch_info['hip_version'] + if install_hip_version != current_hip_version: + raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install HIP version={install_hip_version}, " + f"Runtime HIP version={current_hip_version}") + + @staticmethod + def is_rocm_pytorch(): + if OpBuilder._is_rocm_pytorch is not None: + return OpBuilder._is_rocm_pytorch + + _is_rocm_pytorch = False + try: + import torch + except ImportError: + pass + else: + if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5): + _is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None + if _is_rocm_pytorch: + from torch.utils.cpp_extension import ROCM_HOME + _is_rocm_pytorch = ROCM_HOME is not None + OpBuilder._is_rocm_pytorch = _is_rocm_pytorch + return OpBuilder._is_rocm_pytorch + + @staticmethod + def is_sycl_enabled(): + if OpBuilder._is_sycl_enabled is not None: + return OpBuilder._is_sycl_enabled + + _is_sycl_enabled = False + try: + result = subprocess.run(["c2s", "--version"], capture_output=True) + except: + pass + else: + _is_sycl_enabled = True + + OpBuilder._is_sycl_enabled = _is_sycl_enabled + return OpBuilder._is_sycl_enabled + + @staticmethod + def installed_rocm_version(): + if OpBuilder._rocm_version: + return OpBuilder._rocm_version + + ROCM_MAJOR = '0' + ROCM_MINOR = '0' + if OpBuilder.is_rocm_pytorch(): + from torch.utils.cpp_extension import ROCM_HOME + rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev") + if rocm_ver_file.is_file(): + with open(rocm_ver_file, 'r') as file: + ROCM_VERSION_DEV_RAW = file.read() + elif "rocm" in torch.__version__: + ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1] + else: + assert False, "Could not detect ROCm version" + assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version" + ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0] + ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1] + OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR)) + return OpBuilder._rocm_version + + def include_paths(self): + ''' + Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) + ''' + return [] + + def nvcc_args(self): + ''' + Returns optional list of compiler flags to forward to nvcc when building CUDA sources + ''' + return [] + + def cxx_args(self): + ''' + Returns optional list of compiler flags to forward to the build + ''' + return [] + + def is_compatible(self, verbose=True): + ''' + Check if all non-python dependencies are satisfied to build this op + ''' + return True + + def extra_ldflags(self): + return [] + + def has_function(self, funcname, libraries, verbose=False): + ''' + Test for existence of a function within a tuple of libraries. + + This is used as a smoke test to check whether a certain library is available. + As a test, this creates a simple C program that calls the specified function, + and then distutils is used to compile that program and link it with the specified libraries. + Returns True if both the compile and link are successful, False otherwise. + ''' + tempdir = None # we create a temporary directory to hold various files + filestderr = None # handle to open file to which we redirect stderr + oldstderr = None # file descriptor for stderr + try: + # Echo compile and link commands that are used. + if verbose: + distutils.log.set_verbosity(1) + + # Create a compiler object. + compiler = distutils.ccompiler.new_compiler(verbose=verbose) + + # Configure compiler and linker to build according to Python install. + distutils.sysconfig.customize_compiler(compiler) + + # Create a temporary directory to hold test files. + tempdir = tempfile.mkdtemp() + + # Define a simple C program that calls the function in question + prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname) + + # Write the test program to a file. + filename = os.path.join(tempdir, 'test.c') + with open(filename, 'w') as f: + f.write(prog) + + # Redirect stderr file descriptor to a file to silence compile/link warnings. + if not verbose: + filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w') + oldstderr = os.dup(sys.stderr.fileno()) + os.dup2(filestderr.fileno(), sys.stderr.fileno()) + + # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames() + # Otherwise, a local directory will be used instead of tempdir + drive, driveless_filename = os.path.splitdrive(filename) + root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else '' + output_dir = os.path.join(drive, root_dir) + + # Attempt to compile the C program into an object file. + cflags = shlex.split(os.environ.get('CFLAGS', "")) + objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags)) + + # Attempt to link the object file into an executable. + # Be sure to tack on any libraries that have been specified. + ldflags = shlex.split(os.environ.get('LDFLAGS', "")) + compiler.link_executable(objs, + os.path.join(tempdir, 'a.out'), + extra_preargs=self.strip_empty_entries(ldflags), + libraries=libraries) + + # Compile and link succeeded + return True + + except CompileError: + return False + + except LinkError: + return False + + except: + return False + + finally: + # Restore stderr file descriptor and close the stderr redirect file. + if oldstderr is not None: + os.dup2(oldstderr, sys.stderr.fileno()) + if filestderr is not None: + filestderr.close() + + # Delete the temporary directory holding the test program and stderr files. + if tempdir is not None: + shutil.rmtree(tempdir) + + def strip_empty_entries(self, args): + ''' + Drop any empty strings from the list of compile and link flags + ''' + return [x for x in args if len(x) > 0] + + def cpu_arch(self): + try: + from cpuinfo import get_cpu_info + except ImportError as e: + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return "-march=native" + + try: + cpu_info = get_cpu_info() + except Exception as e: + self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " + "falling back to `lscpu` to get this information.") + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return "-march=native" + + if cpu_info['arch'].startswith('PPC_'): + # gcc does not provide -march on PowerPC, use -mcpu instead + return '-mcpu=native' + return '-march=native' + + def is_cuda_enable(self): + try: + assert_no_cuda_mismatch(self.name) + return '-D__ENABLE_CUDA__' + except MissingCUDAException: + print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, " + "only cpu ops can be compiled!") + return '-D__DISABLE_CUDA__' + return '-D__DISABLE_CUDA__' + + def _backup_cpuinfo(self): + # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides + if not self.command_exists('lscpu'): + self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo " + "to detect the CPU architecture. 'lscpu' does not appear to exist on " + "your system, will fall back to use -march=native and non-vectorized execution.") + return None + result = subprocess.check_output('lscpu', shell=True) + result = result.decode('utf-8').strip().lower() + + cpu_info = {} + cpu_info['arch'] = None + cpu_info['flags'] = "" + if 'genuineintel' in result or 'authenticamd' in result: + cpu_info['arch'] = 'X86_64' + if 'avx512' in result: + cpu_info['flags'] += 'avx512,' + elif 'avx512f' in result: + cpu_info['flags'] += 'avx512f,' + if 'avx2' in result: + cpu_info['flags'] += 'avx2' + elif 'ppc64le' in result: + cpu_info['arch'] = "PPC_" + + return cpu_info + + def simd_width(self): + try: + from cpuinfo import get_cpu_info + except ImportError as e: + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return '-D__SCALAR__' + + try: + cpu_info = get_cpu_info() + except Exception as e: + self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " + "falling back to `lscpu` to get this information.") + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return '-D__SCALAR__' + + if cpu_info['arch'] == 'X86_64': + if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']: + return '-D__AVX512__' + elif 'avx2' in cpu_info['flags']: + return '-D__AVX256__' + return '-D__SCALAR__' + + def command_exists(self, cmd): + if '|' in cmd: + cmds = cmd.split("|") + else: + cmds = [cmd] + valid = False + for cmd in cmds: + result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True) + valid = valid or result.wait() == 0 + + if not valid and len(cmds) > 1: + print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!") + elif not valid and len(cmds) == 1: + print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!") + return valid + + def warning(self, msg): + self.error_log = f"{msg}" + print(f"{WARNING} {msg}") + + def deepspeed_src_path(self, code_path): + if os.path.isabs(code_path): + return code_path + else: + return os.path.join(Path(__file__).parent.parent.absolute(), code_path) + + def builder(self): + from torch.utils.cpp_extension import CppExtension + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + return CppExtension(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())}, + extra_link_args=self.strip_empty_entries(self.extra_ldflags())) + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops, torch_info + if installed_ops.get(self.name, False): + # Ensure the op we're about to load was compiled with the same + # torch/cuda versions we are currently using at runtime. + self.validate_torch_version(torch_info) + if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder): + self.validate_torch_op_version(torch_info) + + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module + else: + return self.jit_load(verbose) + + def jit_load(self, verbose=True): + if not self.is_compatible(verbose): + raise RuntimeError( + f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}" + ) + try: + import ninja # noqa: F401 # type: ignore + except ImportError: + raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.") + + if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch(): + self.build_for_cpu = not torch.cuda.is_available() + + self.jit_mode = True + from torch.utils.cpp_extension import load + + start_build = time.time() + sources = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.sources()] + extra_include_paths = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.include_paths()] + + # Torch will try and apply whatever CCs are in the arch list at compile time, + # we have already set the intended targets ourselves we know that will be + # needed at runtime. This prevents CC collisions such as multiple __half + # implementations. Stash arch list to reset after build. + torch_arch_list = None + if "TORCH_CUDA_ARCH_LIST" in os.environ: + torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") + os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + nvcc_args = self.strip_empty_entries(self.nvcc_args()) + cxx_args = self.strip_empty_entries(self.cxx_args()) + + if isinstance(self, CUDAOpBuilder): + if not self.build_for_cpu and self.enable_bf16: + cxx_args.append("-DBF16_AVAILABLE") + nvcc_args.append("-DBF16_AVAILABLE") + nvcc_args.append("-U__CUDA_NO_BFLOAT16_OPERATORS__") + nvcc_args.append("-U__CUDA_NO_BFLOAT162_OPERATORS__") + + if self.is_rocm_pytorch(): + cxx_args.append("-D__HIP_PLATFORM_AMD__=1") + + op_module = load(name=self.name, + sources=self.strip_empty_entries(sources), + extra_include_paths=self.strip_empty_entries(extra_include_paths), + extra_cflags=cxx_args, + extra_cuda_cflags=nvcc_args, + extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), + verbose=verbose) + + build_duration = time.time() - start_build + if verbose: + print(f"Time to load {self.name} op: {build_duration} seconds") + + # Reset arch list so we are not silently removing it for other possible use cases + if torch_arch_list: + os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list + + __class__._loaded_ops[self.name] = op_module + + return op_module + + +class CUDAOpBuilder(OpBuilder): + + def compute_capability_args(self, cross_compile_archs=None): + """ + Returns nvcc compute capability compile flags. + + 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`. + 2. If neither is set default compute capabilities will be used + 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX + + Format: + + - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples: + + TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ... + TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ... + + - `cross_compile_archs` uses ; separator. + + """ + ccs = [] + if self.jit_mode: + # Compile for underlying architectures since we know those at runtime + for i in range(torch.cuda.device_count()): + CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i) + cc = f"{CC_MAJOR}.{CC_MINOR}" + if cc not in ccs: + ccs.append(cc) + ccs = sorted(ccs) + ccs[-1] += '+PTX' + else: + # Cross-compile mode, compile for various architectures + # env override takes priority + cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None) + if cross_compile_archs_env is not None: + if cross_compile_archs is not None: + print( + f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`" + ) + cross_compile_archs = cross_compile_archs_env.replace(' ', ';') + else: + if cross_compile_archs is None: + cross_compile_archs = get_default_compute_capabilities() + ccs = cross_compile_archs.split(';') + + ccs = self.filter_ccs(ccs) + if len(ccs) == 0: + raise RuntimeError( + f"Unable to load {self.name} op due to no compute capabilities remaining after filtering") + + args = [] + self.enable_bf16 = True + for cc in ccs: + num = cc[0] + cc[2] + args.append(f'-gencode=arch=compute_{num},code=sm_{num}') + if cc.endswith('+PTX'): + args.append(f'-gencode=arch=compute_{num},code=compute_{num}') + + if int(cc[0]) <= 7: + self.enable_bf16 = False + + return args + + def filter_ccs(self, ccs: List[str]): + """ + Prune any compute capabilities that are not compatible with the builder. Should log + which CCs have been pruned. + """ + return ccs + + def version_dependent_macros(self): + # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 + version_ge_1_1 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): + version_ge_1_1 = ['-DVERSION_GE_1_1'] + version_ge_1_3 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): + version_ge_1_3 = ['-DVERSION_GE_1_3'] + version_ge_1_5 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): + version_ge_1_5 = ['-DVERSION_GE_1_5'] + return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 + + def is_compatible(self, verbose=True): + return super().is_compatible(verbose) + + def builder(self): + try: + if not self.is_rocm_pytorch(): + assert_no_cuda_mismatch(self.name) + self.build_for_cpu = False + except MissingCUDAException: + self.build_for_cpu = True + + if self.build_for_cpu: + from torch.utils.cpp_extension import CppExtension as ExtensionBuilder + else: + from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \ + {'cxx': self.strip_empty_entries(self.cxx_args()), \ + 'nvcc': self.strip_empty_entries(self.nvcc_args())} + + if not self.build_for_cpu and self.enable_bf16: + compile_args['cxx'].append("-DBF16_AVAILABLE") + + if self.is_rocm_pytorch(): + compile_args['cxx'].append("-D__HIP_PLATFORM_AMD__=1") + + cuda_ext = ExtensionBuilder(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + libraries=self.strip_empty_entries(self.libraries_args()), + extra_compile_args=compile_args, + extra_link_args=self.strip_empty_entries(self.extra_ldflags())) + + if self.is_rocm_pytorch(): + # hip converts paths to absolute, this converts back to relative + sources = cuda_ext.sources + curr_file = Path(__file__).parent.parent # ds root + for i in range(len(sources)): + src = Path(sources[i]) + if src.is_absolute(): + sources[i] = str(src.relative_to(curr_file)) + else: + sources[i] = str(src) + cuda_ext.sources = sources + return cuda_ext + + def hipify_extension(self): + if self.is_rocm_pytorch(): + from torch.utils.hipify import hipify_python + hipify_python.hipify( + project_directory=os.getcwd(), + output_directory=os.getcwd(), + header_include_dirs=self.include_paths(), + includes=[os.path.join(os.getcwd(), '*')], + extra_files=[os.path.abspath(s) for s in self.sources()], + show_detailed=True, + is_pytorch_extension=True, + hipify_extra_files_only=True, + ) + + def cxx_args(self): + if sys.platform == "win32": + return ['-O2'] + else: + return ['-O3', '-std=c++17', '-g', '-Wno-reorder'] + + def nvcc_args(self): + if self.build_for_cpu: + return [] + args = ['-O3'] + if self.is_rocm_pytorch(): + ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() + args += [ + '-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__', + '-U__HIP_NO_HALF2_OPERATORS__', + '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, + '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR + ] + else: + try: + nvcc_threads = int(os.getenv("DS_NVCC_THREADS", "")) + if nvcc_threads <= 0: + raise ValueError("") + except ValueError: + nvcc_threads = min(os.cpu_count(), 8) + + cuda_major, _ = installed_cuda_version() + args += [ + '-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math', + '-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', f'--threads={nvcc_threads}' + ] + if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1': + args.append('--ptxas-options=-v') + args += self.compute_capability_args() + return args + + def libraries_args(self): + if self.build_for_cpu: + return [] + + if sys.platform == "win32": + return ['cublas', 'curand'] + else: + return [] + + +class TorchCPUOpBuilder(CUDAOpBuilder): + + def extra_ldflags(self): + if self.build_for_cpu: + return ['-fopenmp'] + + if not self.is_rocm_pytorch(): + return ['-lcurand'] + + return [] + + def cxx_args(self): + import torch + args = [] + if not self.build_for_cpu: + if not self.is_rocm_pytorch(): + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64") + if not os.path.exists(CUDA_LIB64): + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib") + else: + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib") + + args += super().cxx_args() + args += [ + f'-L{CUDA_LIB64}', + '-lcudart', + '-lcublas', + '-g', + ] + + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + CUDA_ENABLE = self.is_cuda_enable() + args += [ + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + CUDA_ENABLE, + ] + + return args diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7f0382f00b433e05c935c813cd04cbd03ade239 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a5201ee94eb0e4cf5ab0d0f4d17d8b1a76b6af0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bab2ada1d119a7e6f50e94bf1e246977fd8cefb1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a8229727285585a8f5041fa66107c190b6747bc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..69d114a9f1c0b0defc482c1fb143c261fc466125 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class NotImplementedBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on CPU backend.") + + def sources(self): + return [] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adagrad.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f163f7464aad4f9307bb0796c9a7ef606ea44f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adagrad.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from .builder import TorchCPUOpBuilder + + +class CPUAdagradBuilder(TorchCPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" + NAME = "cpu_adagrad" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adagrad.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/adagrad/cpu_adagrad.cpp'] + + return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu'] + + def libraries_args(self): + args = super().libraries_args() + if self.build_for_cpu: + return args + + if not self.is_rocm_pytorch(): + args += ['curand'] + return args + + def include_paths(self): + import torch + if self.build_for_cpu: + CUDA_INCLUDE = [] + elif not self.is_rocm_pytorch(): + CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] + else: + CUDA_INCLUDE = [] + return ['csrc/includes'] + CUDA_INCLUDE diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..7c34c4ce43a1642c52d596cf5c45f52281a2516e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_adam.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from .builder import TorchCPUOpBuilder + + +class CPUAdamBuilder(TorchCPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp', 'csrc/common/custom_cuda_kernel.cu'] + + def libraries_args(self): + args = super().libraries_args() + if self.build_for_cpu: + return args + + if not self.is_rocm_pytorch(): + args += ['curand'] + + return args + + def include_paths(self): + import torch + if self.build_for_cpu: + CUDA_INCLUDE = [] + elif not self.is_rocm_pytorch(): + CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] + else: + CUDA_INCLUDE = [] + return ['csrc/includes'] + CUDA_INCLUDE diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..5c16d10ebb4453bc222001a95867631c6e5509f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py @@ -0,0 +1,48 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from .builder import TorchCPUOpBuilder + + +class CPULionBuilder(TorchCPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_LION" + NAME = "cpu_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] + + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp', 'csrc/common/custom_cuda_kernel.cu'] + + def libraries_args(self): + args = super().libraries_args() + if self.build_for_cpu: + return args + + if not self.is_rocm_pytorch(): + args += ['curand'] + + return args + + def include_paths(self): + import torch + if self.build_for_cpu: + CUDA_INCLUDE = [] + elif not self.is_rocm_pytorch(): + CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] + else: + CUDA_INCLUDE = [ + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"), + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"), + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"), + ] + return ['csrc/includes'] + CUDA_INCLUDE diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..6e7721f94e012b7c22c28d83ce74220b7c4ff9a7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version +import os + + +class EvoformerAttnBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_EVOFORMER_ATTN" + NAME = "evoformer_attn" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + self.cutlass_path = os.environ.get('CUTLASS_PATH') + + def absolute_name(self): + return f'deepspeed.ops.{self.NAME}_op' + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def sources(self): + src_dir = 'csrc/deepspeed4science/evoformer_attn' + return [f'{src_dir}/attention.cpp', f'{src_dir}/attention_back.cu', f'{src_dir}/attention_cu.cu'] + + def nvcc_args(self): + args = super().nvcc_args() + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile kernels") + return args + major = torch.cuda.get_device_properties(0).major #ignore-cuda + minor = torch.cuda.get_device_properties(0).minor #ignore-cuda + args.append(f"-DGPU_ARCH={major}{minor}") + return args + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile kernels") + return False + if self.cutlass_path is None: + self.warning("Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH") + return False + with open(f'{self.cutlass_path}/CHANGELOG.md', 'r') as f: + if '3.1.0' not in f.read(): + self.warning("Please use CUTLASS version >= 3.1.0") + return False + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 7: + self.warning("Please use a GPU with compute capability >= 7.0") + cuda_okay = False + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("Please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def include_paths(self): + includes = [f'{self.cutlass_path}/include', f'{self.cutlass_path}/tools/util/include'] + return includes diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6e4eeaaea5d9b2c2ee70de3d4261c6348abe94 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + +import sys + + +class FusedAdamBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu'] + + def include_paths(self): + return ['csrc/includes', 'csrc/adam'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() + + def nvcc_args(self): + nvcc_flags = ['-O3'] + self.version_dependent_macros() + if not self.is_rocm_pytorch(): + nvcc_flags.extend( + ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + + self.compute_capability_args()) + return nvcc_flags diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lamb.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..f0cb557706b3f7e6d4ab73d7a999a70aebecbeb3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lamb.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + +import sys + + +class FusedLambBuilder(CUDAOpBuilder): + BUILD_VAR = 'DS_BUILD_FUSED_LAMB' + NAME = "fused_lamb" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lamb.{self.NAME}_op' + + def sources(self): + return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu'] + + def include_paths(self): + return ['csrc/includes'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() + + def nvcc_args(self): + nvcc_flags = ['-O3'] + self.version_dependent_macros() + if self.is_rocm_pytorch(): + ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() + nvcc_flags += ['-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR] + else: + nvcc_flags.extend( + ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + + self.compute_capability_args()) + return nvcc_flags diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..b900a8f2369df14d65a73235d360db21583ee4ab --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + +import sys + + +class FusedLionBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_LION" + NAME = "fused_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + return ['csrc/lion/fused_lion_frontend.cpp', 'csrc/lion/multi_tensor_lion.cu'] + + def include_paths(self): + return ['csrc/includes', 'csrc/lion'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() + + def nvcc_args(self): + nvcc_flags = ['-O3'] + self.version_dependent_macros() + if not self.is_rocm_pytorch(): + nvcc_flags.extend( + ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + + self.compute_capability_args()) + return nvcc_flags diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b373a611efbb2b62253d18079cda27af4497c404 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67ad03ecf6807726cc8f3952f76f07ddb531cc8a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3c86128fffd6eddc302bcec06b02474086afa44a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py @@ -0,0 +1,37 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class CPUOpBuilder(OpBuilder): + + def builder(self): + from torch.utils.cpp_extension import CppExtension as ExtensionBuilder + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} + + cpp_ext = ExtensionBuilder(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + libraries=self.strip_empty_entries(self.libraries_args()), + extra_compile_args=compile_args) + + return cpp_ext + + def cxx_args(self): + return ['-O3', '-g', '-Wno-reorder'] + + def libraries_args(self): + return [] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..d77228317ddb42930aaf6fbdee0d3b0e61b5cfd5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class FusedAdamBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def cxx_args(self): + args = super().cxx_args() + args += ['-DENABLE_BFLOAT16'] + return args + + def include_paths(self): + return ['csrc/includes'] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..140d65b48defbea439483b63fd545e52de92209e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class NotImplementedBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on HPU backend.") + + def sources(self): + return [] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..3c53774d0a50f627c5ed96ab51d234783e8cda76 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class InferenceCoreBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_INFERENCE_CORE_OPS" + NAME = "inference_core_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.kernels{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 6: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + import torch + + sources = [ + "inference/v2/kernels/core_ops/core_ops.cpp", + "inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp", + "inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu", + "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp", + "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu", + "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp", + "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu", + "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp", + "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu", + ] + + # The source files with specific GPU architecture requirements. + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability != 8: + self.warning("FP6 quantization kernel is only supported on Ampere architectures") + else: + sources.append("inference/v2/kernels/core_ops/cuda_linear/fp6_linear.cu") + sources.append("inference/v2/kernels/core_ops/cuda_linear/cuda_linear_kernels.cpp") + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + return [] + + def include_paths(self): + sources = [ + 'inference/v2/kernels/core_ops/bias_activations', + 'inference/v2/kernels/core_ops/blas_kernels', + 'inference/v2/kernels/core_ops/cuda_layer_norm', + 'inference/v2/kernels/core_ops/cuda_rms_norm', + 'inference/v2/kernels/core_ops/gated_activations', + 'inference/v2/kernels/core_ops/cuda_linear', + 'inference/v2/kernels/includes', + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + + return sources diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_cutlass_builder.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_cutlass_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..51f7931d943529126ad9866ff9d10a0d843c65f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_cutlass_builder.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class InferenceCutlassBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_CUTLASS_OPS" + NAME = "cutlass_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.kernels.cutlass_ops.{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 8: + # Only support Ampere and newer + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + sources = [ + "inference/v2/kernels/cutlass_ops/cutlass_ops.cpp", + "inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu", + "inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu", + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + import dskernels + lib_path = dskernels.library_path() + prefix = self.get_prefix() + lib_path = os.path.join(prefix, lib_path) + lib_path = self.deepspeed_src_path(lib_path) + + args = [f'-L{lib_path}', '-ldeepspeedft'] + if self.jit_load: + args.append(f'-Wl,-rpath,{lib_path}') + return args + + def include_paths(self): + sources = [ + 'inference/v2/kernels/includes', + 'inference/v2/kernels/cutlass_ops/mixed_gemm', + 'inference/v2/kernels/cutlass_ops/moe_gemm', + 'inference/v2/kernels/cutlass_ops/shared_resources/', + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/quantizer.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0b5348e5af96e28e256524b38efaff5d50863d17 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/quantizer.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + + +class QuantizerBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_QUANTIZER" + NAME = "quantizer" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.quantizer.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/quantization/pt_binding.cpp', + 'csrc/quantization/fake_quantizer.cu', + 'csrc/quantization/quantize.cu', + 'csrc/quantization/quantize_intX.cu', + 'csrc/quantization/dequantize.cu', + 'csrc/quantization/swizzled_quantize.cu', + 'csrc/quantization/quant_reduce.cu', + ] + + def include_paths(self): + return ['csrc/includes'] + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_ops.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ec7cab91885f0a1b708b334c1d02c1333a01c35b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_ops.py @@ -0,0 +1,115 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class RaggedOpsBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_RAGGED_DEVICE_OPS" + NAME = "ragged_device_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.kernels.ragged_ops.{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 8: + # Blocked flash has a dependency on Ampere + newer + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + sources = [ + "inference/v2/kernels/ragged_ops/ragged_ops.cpp", + "inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp", + "inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp", + "inference/v2/kernels/ragged_ops/embed/embed.cpp", + "inference/v2/kernels/ragged_ops/embed/embed_cuda.cu", + "inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp", + "inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu", + "inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp", + "inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu", + "inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp", + "inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu", + "inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp", + "inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu", + "inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp", + "inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp", + "inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu", + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + import dskernels + lib_path = dskernels.library_path() + + prefix = self.get_prefix() + lib_path = os.path.join(prefix, lib_path) + lib_path = self.deepspeed_src_path(lib_path) + + args = [f'-L{lib_path}', '-lblockedflash'] + if self.jit_load: + args.append(f'-Wl,-rpath,{lib_path}') + return args + + def include_paths(self): + sources = [ + 'inference/v2/kernels/includes', + 'inference/v2/kernels/ragged_ops', + 'inference/v2/kernels/ragged_ops/atom_builder', + 'inference/v2/kernels/ragged_ops/blocked_flash', + 'inference/v2/kernels/ragged_ops/embed', + 'inference/v2/kernels/ragged_ops/includes', + 'inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary', + 'inference/v2/kernels/ragged_ops/logits_gather', + 'inference/v2/kernels/ragged_ops/moe_gather', + 'inference/v2/kernels/ragged_ops/moe_scatter', + 'inference/v2/kernels/ragged_ops/ragged_helpers', + 'inference/v2/kernels/ragged_ops/top_k_gating', + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..89450e1fd30d9f4e03d578a5c7deebc4d26a50c8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class RaggedUtilsBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_RAGGED_OPS" + NAME = "ragged_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 6: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + sources = [ + "inference/v2/ragged/csrc/fast_host_buffer.cu", + "inference/v2/ragged/csrc/ragged_ops.cpp", + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + return [] + + def include_paths(self): + include_dirs = ['inference/v2/ragged/includes', 'inference/v2/kernels/includes'] + prefix = self.get_prefix() + includes = [os.path.join(prefix, include_dir) for include_dir in include_dirs] + + return includes diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py new file mode 100644 index 0000000000000000000000000000000000000000..54af7150fb36f9eb8bd6a295648d8a11da8d8373 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + + +class RandomLTDBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_RANDOM_LTD" + NAME = "random_ltd" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.{self.NAME}_op' + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def sources(self): + return [ + 'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu', + 'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu' + ] + + def include_paths(self): + includes = ['csrc/includes'] + return includes diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/sparse_attn.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/sparse_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..188d257ff4ef2dc14692a2d95c0a0e6e25beb581 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/sparse_attn.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import OpBuilder + +try: + from packaging import version as pkg_version +except ImportError: + pkg_version = None + + +class SparseAttnBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_SPARSE_ATTN" + NAME = "sparse_attn" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.sparse_attention.{self.NAME}_op' + + def sources(self): + return ['csrc/sparse_attention/utils.cpp'] + + def cxx_args(self): + return ['-O2', '-fopenmp'] + + def is_compatible(self, verbose=True): + # Check to see if llvm and cmake are installed since they are dependencies + #required_commands = ['llvm-config|llvm-config-9', 'cmake'] + #command_status = list(map(self.command_exists, required_commands)) + #deps_compatible = all(command_status) + + if self.is_rocm_pytorch(): + self.warning(f'{self.NAME} is not compatible with ROCM') + return False + + try: + import torch + except ImportError: + self.warning(f"unable to import torch, please install it first") + return False + + # torch-cpu will not have a cuda version + if torch.version.cuda is None: + cuda_compatible = False + self.warning(f"{self.NAME} cuda is not available from torch") + else: + major, minor = torch.version.cuda.split('.')[:2] + cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11) + if not cuda_compatible: + self.warning(f"{self.NAME} requires CUDA version 10.1+") + + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5) + if not torch_compatible: + self.warning( + f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}') + + try: + import triton + except ImportError: + # auto-install of triton is broken on some systems, reverting to manual install for now + # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710 + self.warning(f"please install triton==1.0.0 if you want to use sparse attention") + return False + + if pkg_version: + installed_triton = pkg_version.parse(triton.__version__) + triton_mismatch = installed_triton != pkg_version.parse("1.0.0") + else: + installed_triton = triton.__version__ + triton_mismatch = installed_triton != "1.0.0" + + if triton_mismatch: + self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible") + return False + + return super().is_compatible(verbose) and torch_compatible and cuda_compatible diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..59caf57f938db217c52fcfa483e47fc3e451e6ea --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class SpatialInferenceBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE" + NAME = "spatial_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.spatial.{self.NAME}_op' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def sources(self): + return [ + 'csrc/spatial/csrc/opt_bias_add.cu', + 'csrc/spatial/csrc/pt_binding.cpp', + ] + + def include_paths(self): + return ['csrc/spatial/includes', 'csrc/includes'] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..52b02a3c629e9d537b8e22139f48f5335396e4ff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .transformer import TransformerBuilder + + +class StochasticTransformerBuilder(TransformerBuilder): + BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER" + NAME = "stochastic_transformer" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.transformer.{self.NAME}_op' + + def nvcc_args(self): + args = super().nvcc_args() + args.append('-D__STOCHASTIC_MODE__') + return args diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..8db30fdc67919474679715014cbf8d3f470bedf4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + + +class TransformerBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER" + NAME = "transformer" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.transformer.{self.NAME}_op' + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def sources(self): + return [ + 'csrc/transformer/ds_transformer_cuda.cpp', 'csrc/transformer/cublas_wrappers.cu', + 'csrc/transformer/transform_kernels.cu', 'csrc/transformer/gelu_kernels.cu', + 'csrc/transformer/dropout_kernels.cu', 'csrc/transformer/normalize_kernels.cu', + 'csrc/transformer/softmax_kernels.cu', 'csrc/transformer/general_kernels.cu' + ] + + def include_paths(self): + includes = ['csrc/includes'] + return includes diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer_inference.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..5ee902289448c8c3855824d5f1313f32c77492f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/transformer_inference.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class InferenceBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.transformer.inference.{self.NAME}_op' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 6: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def sources(self): + return [ + 'csrc/transformer/inference/csrc/pt_binding.cpp', + 'csrc/transformer/inference/csrc/gelu.cu', + 'csrc/transformer/inference/csrc/relu.cu', + 'csrc/transformer/inference/csrc/layer_norm.cu', + 'csrc/transformer/inference/csrc/rms_norm.cu', + 'csrc/transformer/inference/csrc/softmax.cu', + 'csrc/transformer/inference/csrc/dequantize.cu', + 'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu', + 'csrc/transformer/inference/csrc/transform.cu', + 'csrc/transformer/inference/csrc/pointwise_ops.cu', + ] + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def include_paths(self): + return ['csrc/transformer/inference/includes', 'csrc/includes'] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ade5811ce76d5b7bf51881db5ff0824c1fd379d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a4599e047300900ec53b37036d22d634570e208 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/dropping_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/dropping_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fb8e176b2353a05cf2fb361c8d2e9bd25f5cd32 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/__pycache__/dropping_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/dropping_utils.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/dropping_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dd36c94537f896831291458514b513ea840791e2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/random_ltd/dropping_utils.py @@ -0,0 +1,132 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from deepspeed.ops.op_builder import RandomLTDBuilder +""" +Returns: + sampled_indices: [layers, batch_size, reserved_length] + new_mask: [batch_size, 1, reserved_length, reserved_length] +""" + +random_ltd_module = None + + +def gpt_sample_tokens(reserved_length: int, + seq_length: int, + batch_size: int, + layers: int = 1, + device: str = 'cpu', + attn_mask: torch.Tensor = None): + + prob_dist = torch.ones((layers * batch_size, seq_length), device=device) + sampled_indices = torch.multinomial(prob_dist, reserved_length) + + sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32) + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length) + + # Not certain the optimized kernel is actually better here, cause it kind of screws + # with alignment right if the sequence length is not divisible by like 16 + # new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length) + if attn_mask is not None: + new_mask = attn_mask[:, :, :reserved_length, :reserved_length] + else: + new_mask = None + + return sampled_indices, new_mask + + +""" +Returns: + sampled_indices: [layers, batch_size, reserved_length] + new_mask: [layers, batch_size, 1, reserved_length, reserved_length] +""" + + +def bert_sample_tokens(reserved_length: int, + seq_length: int, + batch_size: int, + layers: int = 1, + device: str = 'cpu', + attn_mask: torch.Tensor = None): + assert attn_mask is not None + prob_dist = torch.ones((layers * batch_size, seq_length), device=device) + sampled_indices = torch.multinomial(prob_dist, reserved_length) + + sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32) + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + + sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length) + dtype = sampled_indices.dtype + + sampled_indices = sampled_indices.to(torch.long) + new_mask = [] + for l in range(layers): + tmp_mask_list = [] + for i in range(batch_size): + mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :] + tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]]) + new_mask.append(torch.cat(tmp_mask_list, dim=0)) + + return sampled_indices.to(dtype), new_mask + + +class GatherTokens(torch.autograd.Function): + + @staticmethod + def forward(ctx, activations: torch.Tensor, sorted_indices: torch.Tensor, batch_first: bool): + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + ctx.save_for_backward(activations, sorted_indices) + ctx.batch_first = batch_first + return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first) + + @staticmethod + def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor): + + g_gradients = g_gradients.contiguous() + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + activations, sorted_indices = ctx.saved_tensors + batch_first = ctx.batch_first + + return random_ltd_module.token_scatter_(a_gradients, g_gradients, sorted_indices, batch_first), None, None + + +class ScatterTokens(torch.autograd.Function): + + @staticmethod + def forward(ctx, all_activations: torch.Tensor, layer_activations: torch.Tensor, sorted_indices: torch.Tensor, + batch_first: bool): + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), layer_activations, sorted_indices, + batch_first) + + ctx.save_for_backward(sorted_indices) + ctx.batch_first = batch_first + return scatter_results + + @staticmethod + def backward(ctx, out_gradients: torch.Tensor): + + out_gradients = out_gradients.contiguous() + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + sorted_indices, = ctx.saved_tensors + batch_first = ctx.batch_first + + ret_val = random_ltd_module.token_gather(out_gradients, sorted_indices, batch_first) + return out_gradients, ret_val, None, None diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d15e9f1c1afa7fdf4433a25376a391e0f4f28b2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3be7bee5c8634b428fe513b2f3c276ec7bcc7589 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d87a5baeed0893b6f03678fa1ecb58481091606 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..117fd6e97a3fdff2cd9ab6a560743c1cad51decd Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3971e6ce594649ce2877a381240b146b057e40bf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_2d_transformer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_2d_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1a857cd2fd5f9aa5249377fd3b22d0546a87b93 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_2d_transformer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95e1aea3f069d43168269a511fb4cbab96678b92 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_transformer_block.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_transformer_block.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..472ce84b9cc3acf7bdf00be45adb73efcf824643 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_transformer_block.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f14f2f980a4bd741b80c00a37cb87b2a47a48c0e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_mlp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_mlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68839a7ceef0b045eefe1f671df76793d2d93f13 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_mlp.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aee46c7a3804d339456b7764f1c4b08265f9746c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/triton_ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/triton_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea1c572950361ed15e69f94bea86fa1853d2a445 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/triton_ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/qkv_gemm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/qkv_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09b29a6df2a0bd01416ce195f1fe78495498745c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/qkv_gemm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/residual_add.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/residual_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6afeb3cad00d29e6906e55054e4cbadad1388e7a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/residual_add.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00cc3e646c24f5138df91756095c8ec8e80cda4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..664afa6ccc2e5584cac1c4ae0ad27b84fbdbf3ce Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/gelu.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/gelu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fc0baa76b661d43527e603b8bac111f1527e330 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/gelu.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cde4821188f142b15a8c6e2e75bacf78409a2a07 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a75c4d1b4b69ef1206ffd0e0a9b0c19585e816 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/mlp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/mlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92fc335a1de393702d9e6fe3610bb6f302e0bfef Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/mlp.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/residual_add.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/residual_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4d7ebc26f01b0f40a89d5eb25424fd663f493d2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/residual_add.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd4abeacefcde85f707f9b415ad312888bd8ee90 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/triton_matmul_kernel.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/triton_matmul_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..529c7703e6dbc08da56dc2129469e40c9147faa5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/triton_matmul_kernel.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f313d2ac3d8205702dc2ceb82856154e9ddb2c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py @@ -0,0 +1,249 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import triton.language as tl +''' +layer-normalization +modified the triton kernel in +https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/05-layer-norm.py +''' + + +@triton.jit +def layer_norm_kernel( + Out, + A, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(A + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # # write-back + tl.store(Out + cols, out, mask=mask) + + +@triton.jit +def layer_norm_residual_kernel( + Out, + A, + Residual, + ln_input, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + Residual += row * stride + ln_input += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32) + a = a + res + tl.store(ln_input + cols, a, mask=cols < N) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # write-back + tl.store(Out + cols, out, mask=mask) + + +@triton.jit +def layer_norm_residual_bias_kernel( + Out, + A, + Residual, + InputBias, + ln_input, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + Residual += row * stride + ln_input += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32) + b = tl.load(InputBias + cols, mask=cols < N, other=0.0).to(tl.float32) + a = a + b + res + tl.store(ln_input + cols, a, mask=cols < N) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # write-back + tl.store(Out + cols, out, mask=mask) + + +def layer_norm(a, weight, bias, eps): + assert a.is_contiguous() + assert weight.is_contiguous() + assert bias.is_contiguous() + + # allocate output + out = torch.empty_like(a) + # reshape input data into 2D tensor + a_arg = a.view(-1, a.shape[-1]) + M, N = a_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // a.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 4096) + BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192 + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + layer_norm_kernel[(M, )]( + out, + a_arg, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + return out + + +def layer_norm_residual(a, input_bias, residual, weight, bias, eps): + assert a.is_contiguous() + assert weight.is_contiguous() + assert bias.is_contiguous() + assert residual.is_contiguous() + + # allocate output and scratch-pad for residual addition + out = torch.empty_like(a) + ln_input = torch.empty_like(a) + # reshape input data into 2D tensor + a_arg = a.view(-1, a.shape[-1]) + residual = residual.view(-1, residual.shape[-1]) + M, N = a_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // a.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 4096) + BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192 + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + if input_bias is None: + layer_norm_residual_kernel[(M, )]( + out, + a_arg, + residual, + ln_input, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + else: + layer_norm_residual_bias_kernel[(M, )]( + out, + a_arg, + residual, + input_bias, + ln_input, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + return out diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..1708080b27efb0671d361b7cdeaff4b262cf0ce8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import math +import torch.nn as nn +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist +from ..op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp + + +class TritonMLP(nn.Module): + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False): + super(TritonMLP, self).__init__() + + self.config = config + data_type = self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + device = get_accelerator().current_device_name() + self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + intm_size_per_partition = self.config.intermediate_size // self.config.mp_size + self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size, + intm_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.inter_b = nn.Parameter(torch.empty(intm_size_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + self.output_w = nn.Parameter(torch.empty(intm_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups + self.merge_count = int(math.log2(merge_count)) + self.mp_group = mp_group + + self.mlp_gemm_func = MLPGemmOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + self.fused_gemm_gelu = GELUGemmOp(config) + self.residual_add_func = ResidualAddOp(config) + + def forward(self, input, residual, residual_norm, bias): + residual_add = None + if self.attn_nw is None: + output = self.fused_gemm_gelu(input=residual_norm, + weight=self.inter_w, + bias=self.inter_b, + weight_out=self.output_w) + else: + output, residual_add = self.mlp_gemm_func(input=input, + residual=residual, + input_bias=bias, + weight_interm=self.inter_w, + weight_out=self.output_w, + bias=self.inter_b, + gamma=self.attn_nw, + beta=self.attn_nb) + residual = self.residual_add_func(hidden_state=output, + residual=residual, + attention_output=input, + attention_bias=bias if bias is not None else self.output_b, + final_bias=self.output_b, + add_bias=bias is not None, + residual_add=residual_add) + + if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(residual, group=self.mp_group) + + return residual diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/ops.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..dd87d08d4d2c6c0e4b80ab770ff9a1f03dd3138c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/ops.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import deepspeed +from deepspeed.ops.op_builder import InferenceBuilder +import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext +from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual + +inference_module = None + + +def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode): + assert not transposed_mode and not async_op and not q_int8 + return matmul_ext.matmul(input, weight, bias=None, activation="", use_triton=True) + + +def fused_gemm_gelu(input, + weight, + weight_scale, + bias, + weight_out, + weight_out_scale, + epsilon, + pre_layer_norm, + q_int8, + async_op, + transposed_mode, + use_triton_ln=True): + assert not transposed_mode + + # activation + activation = "gelu" + + # intermediate fc in FF + intm_out = matmul_ext.matmul(input, weight, bias=bias, activation=activation, use_triton=True) + + # output fc in FF + ff_out = matmul_ext.matmul( + intm_out, + weight_out, + bias=None, + activation="", # bias added layer with residual_add + bias + layerNorm layer + use_triton=True) + return ff_out + + +def linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads, transposed_mode=False): + assert not transposed_mode and not do_flash_attn + qkv_out = matmul_ext.matmul(input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) + + return qkv_out + + +def mlp_gemm_func(input, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + epsilon, + pre_layer_norm, + mlp_after_attn, + weight_interm_scale, + weight_out_scale, + q_int8, + mlp_act_func_type, + transposed_mode, + use_triton_ln=True): + assert not transposed_mode + + # residual add and layerNorm after attention + if use_triton_ln: + mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + else: + global inference_module + if inference_module is None: + inference_module = InferenceBuilder().load() + mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + + # activation + if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU: + activation = "gelu" + elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU: + activation = "relu" + else: + activation = "" + + # intermediate fc in FF + intm_out = matmul_ext.matmul(mlp_input, weight_interm, bias=bias, activation=activation, use_triton=True) + # output fc in FF + ff_out = matmul_ext.matmul( + intm_out, + weight_out, + bias=None, + activation="", # bias added layer with residual_add + bias + layerNorm layer + use_triton=True) + + return ff_out, mlp_input + + +def qkv_gemm_func( + input, + weight, + q_scale, + bias, + gamma, + beta, + epsilon, + add_bias, + q_int8, + transposed_mode=False, + use_triton_ln=True, +): + + assert not transposed_mode + # residual add and layerNorm after attention + if use_triton_ln: + qkv_input = layer_norm(input, gamma, beta, epsilon) + else: + global inference_module + if inference_module is None: + inference_module = InferenceBuilder().load() + qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon) + + qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) + + return qkv_out, qkv_input diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/residual_add.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/residual_add.py new file mode 100644 index 0000000000000000000000000000000000000000..063e7a7e4a2d9740ad641b3116ea2cec37b472f4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/residual_add.py @@ -0,0 +1,88 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import triton.language as tl +from deepspeed.accelerator import get_accelerator + + +@triton.jit +def residual_add_bias_kernel( + hidden_state_ptr, + residual_ptr, + attn_output_ptr, + hidden_state_size, + attn_bias_ptr, + final_bias_ptr, + bias_size, + output_ptr, + mp_size: tl.constexpr, + mlp_after_attn: tl.constexpr, + pre_attn_norm: tl.constexpr, + add_attn_bias: tl.constexpr, + BLOCK_SIZE: tl.constexpr, +): + pid = tl.program_id(axis=0) + + block_start = pid * BLOCK_SIZE + + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < hidden_state_size + + bias_offsets = offsets % bias_size + bias_mask = bias_offsets < bias_size + + tl_hidden_state = tl.load(hidden_state_ptr + offsets, mask=mask) + tl_residual = tl.load(residual_ptr + offsets, mask=mask) + tl_attn_output = tl.load(attn_output_ptr + offsets, mask=mask) + tl_attn_bias = tl.load(attn_bias_ptr + bias_offsets, mask=bias_mask) + tl_final_bias = tl.load(final_bias_ptr + bias_offsets, mask=bias_mask) + + if mlp_after_attn: + if pre_attn_norm: + output = tl_hidden_state + (tl_residual + tl_final_bias + tl_attn_output + tl_attn_bias) / mp_size + else: + output = tl_hidden_state + tl_residual + tl_final_bias + else: + output = tl_hidden_state + tl_attn_output + (tl_residual + tl_final_bias) / mp_size + if add_attn_bias: + output += tl_attn_bias / mp_size + + tl.store(output_ptr + offsets, output, mask=mask) + + +def residual_add_bias(hidden_state: torch.Tensor, residual: torch.Tensor, attn_output: torch.Tensor, + attn_bias: torch.Tensor, final_bias: torch.Tensor, mp_size: int, mlp_after_attn: bool, + add_attn_bias: bool, pre_attn_norm: bool): + # check that all tensors are on the same device + assert get_accelerator().on_accelerator(hidden_state) \ + and get_accelerator().on_accelerator(residual) \ + and get_accelerator().on_accelerator(attn_output) \ + and get_accelerator().on_accelerator(attn_bias) \ + and get_accelerator().on_accelerator(final_bias) + + # check that all tensors have the same dtype + assert hidden_state.dtype == residual.dtype == attn_output.dtype \ + == attn_bias.dtype == final_bias.dtype + + # check that all tensors have the right shape + assert hidden_state.shape == residual.shape == attn_output.shape + assert attn_bias.shape == final_bias.shape + assert attn_bias.shape[0] == hidden_state.shape[2] + + output = torch.empty_like(hidden_state) + + hidden_state_size = output.numel() + bias_size = attn_bias.numel() + + grid = lambda meta: (triton.cdiv(hidden_state_size, meta['BLOCK_SIZE']), ) + + residual_add_bias_kernel[grid](hidden_state, residual, attn_output, hidden_state_size,\ + attn_bias, final_bias, bias_size, output, mp_size, mlp_after_attn, pre_attn_norm, \ + add_attn_bias, \ + BLOCK_SIZE=1024) + + return output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/triton_matmul_kernel.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/triton_matmul_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..e2128e046df049ddbd846131b3dc6001083e991b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/triton_matmul_kernel.py @@ -0,0 +1,398 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import triton +import triton.language as tl +from .gelu import gelu_functor +import torch + +AUTOTUNE_TOP_K = 10 +SKIP_AUTOTUNE = False + + +def _triton_ops_matmul_early_config_prune(configs, named_args): + device = torch.cuda.current_device() #ignore-cuda + capability = torch.cuda.get_device_capability() #ignore-cuda + # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages + dtsize = named_args['A'].element_size() + dtype = named_args['A'].dtype + + # 1. make sure we have enough smem + pruned_configs = [] + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ + kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages + + max_shared_memory = triton.runtime.driver.utils.get_device_properties(device)["max_shared_mem"] + required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize + if required_shared_memory <= max_shared_memory: + pruned_configs.append(config) + + return pruned_configs + + +def _fp16_matmul_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE): + if skip_autotune: + configs = [configs[0]] + else: + configs = _triton_ops_matmul_early_config_prune(configs, named_args) + return configs + + +""" +fp16 matmul implementation is adapted from triton matmul: +https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/triton/ops/matmul.py +""" + + +@triton.autotune( + configs=[ + # basic configs for compute-bound matmuls + triton.Config({ + 'BLOCK_M': 128, + 'BLOCK_N': 256, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=3, num_warps=8), + triton.Config({ + 'BLOCK_M': 256, + 'BLOCK_N': 128, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=3, num_warps=8), + triton.Config({ + 'BLOCK_M': 256, + 'BLOCK_N': 64, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 64, + 'BLOCK_N': 256, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 128, + 'BLOCK_N': 128, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 128, + 'BLOCK_N': 64, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 64, + 'BLOCK_N': 128, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 128, + 'BLOCK_N': 32, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=4, num_warps=4), + triton.Config({ + 'BLOCK_M': 64, + 'BLOCK_N': 32, + 'BLOCK_K': 32, + 'SPLIT_K': 1 + }, num_stages=5, num_warps=2), + ], + key=['CACHE_M', 'CACHE_N', 'CACHE_K'], + prune_configs_by={ + 'early_config_prune': _fp16_matmul_prune_config, + 'perf_model': None, + 'top_k': AUTOTUNE_TOP_K + }, +) +@triton.heuristics({ + 'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0, +}) +@triton.jit +def _fp_matmul( + A, + B, + C, + M, + N, + K, + bias, + stride_am, + stride_ak, + stride_bk, + stride_bn, + stride_cm, + stride_cn, + CACHE_M, + CACHE_N, + CACHE_K, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_K: tl.constexpr, + GROUP_M: tl.constexpr, + SPLIT_K: tl.constexpr, + EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr, + BIAS_ADD: tl.constexpr, + ACTIVATION: tl.constexpr, +): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = (M + BLOCK_M - 1) // BLOCK_M + grid_n = (N + BLOCK_N - 1) // BLOCK_N + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + for k in range(K, 0, -BLOCK_K * SPLIT_K): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + a = tl.load(A, mask=rk[None, :] < k, other=0.) + b = tl.load(B, mask=rk[:, None] < k, other=0.) + acc += tl.dot(a, b) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + # bias addition + if BIAS_ADD: + bias_offset = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + bias_ptr = bias + bias_offset + b = tl.load(bias_ptr, mask=bias_offset < N) + acc = acc + b[None, :] + # activation + if ACTIVATION == "relu": + acc = tl.where(acc >= 0, acc, 0) + elif ACTIVATION == "leaky_relu": + acc = tl.where(acc >= 0, acc, 0.01 * acc) + elif ACTIVATION == "gelu": + #acc = tl.sigmoid(1.702 * acc) * acc + acc = gelu_functor(acc) + elif ACTIVATION == "sigmoid": + acc = tl.sigmoid(acc) # sigmoid + acc = acc.to(C.dtype.element_ty) + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + +def matmul_4d_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE): + if skip_autotune: + configs = [configs[0]] + else: + device = torch.cuda.current_device() #ignore-cuda + capability = torch.cuda.get_device_capability() #ignore-cuda + # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages + dtsize = named_args['a_ptr'].element_size() + dtype = named_args['a_ptr'].dtype + + # make sure we have enough smem + pruned_configs = [] + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ + kw['BLOCK_SIZE_M'], kw['BLOCK_SIZE_N'], kw['BLOCK_SIZE_K'], config.num_stages + + max_shared_memory = triton.runtime.driver.utils.get_device_properties(device)["max_shared_mem"] + required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize + if required_shared_memory <= max_shared_memory: + pruned_configs.append(config) + configs = pruned_configs + return configs + + +@triton.autotune( + configs=[ + triton.Config( + { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 64, + "GROUP_SIZE_M": 8 + }, + num_stages=1, # this is mainly for unit test, to minimize the share memory usage + num_warps=8), + triton.Config( + { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=4, + num_warps=4, + ), + triton.Config( + { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=4, + num_warps=4, + ), + triton.Config( + { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 128, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=4, + num_warps=4, + ), + triton.Config( + { + "BLOCK_SIZE_M": 128, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=4, + num_warps=4, + ), + triton.Config( + { + "BLOCK_SIZE_M": 64, + "BLOCK_SIZE_N": 32, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=5, + num_warps=2, + ), + triton.Config( + { + "BLOCK_SIZE_M": 32, + "BLOCK_SIZE_N": 64, + "BLOCK_SIZE_K": 32, + "GROUP_SIZE_M": 8, + }, + num_stages=5, + num_warps=2, + ), + ], + key=['CACHE_M', 'CACHE_N', 'CACHE_K'], + prune_configs_by={ + 'early_config_prune': matmul_4d_prune_config, + 'perf_model': None, + 'top_k': AUTOTUNE_TOP_K + }, +) +@triton.jit +def matmul_4d_kernel( + # Pointers to matrices + a_ptr, + b_ptr, + c_ptr, + # Matrix dimensions + M, + N, + K, + CACHE_M, + CACHE_N, + CACHE_K, + stride_ab, + stride_ah, + stride_am, + stride_ak, + stride_bb, + stride_bh, + stride_bk, + stride_bn, + stride_cb, + stride_ch, + stride_cm, + stride_cn, + scale, + # Meta-parameters + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + MASK: tl.constexpr, +): + """Kernel for computing the matmul C = A x B. + A has shape (M, K), B has shape (K, N) and C has shape (M, N) + """ + pid = tl.program_id(axis=0) + head = tl.program_id(axis=1) + batch = tl.program_id(axis=2) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) + num_pid_in_group = GROUP_SIZE_M * num_pid_n + group_id = pid // num_pid_in_group + first_pid_m = group_id * GROUP_SIZE_M + group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) + pid_m = first_pid_m + (pid % group_size_m) + pid_n = (pid % num_pid_in_group) // group_size_m + + if MASK: + if (pid_m + 1) * BLOCK_SIZE_M - 1 < pid_n * BLOCK_SIZE_N: + c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=c_ptr.dtype.element_ty) - float("inf") + offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] + + stride_cn * offs_cn[None, :]) + tl.store(c_ptrs, c) + return + + offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = (a_ptr + batch * stride_ab + head * stride_ah + + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)) + b_ptrs = (b_ptr + batch * stride_bb + head * stride_bh + + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + for k in range(0, K, BLOCK_SIZE_K): + a_mask = (offs_am[:, None] < M) & (offs_k[None, :] + k < K) + b_mask = (offs_k[:, None] + k < K) & (offs_bn[None, :] < N) + a = tl.load(a_ptrs, mask=a_mask, other=0.) + b = tl.load(b_ptrs, mask=b_mask, other=0.) + accumulator += tl.dot(a, b) + a_ptrs += BLOCK_SIZE_K * stride_ak + b_ptrs += BLOCK_SIZE_K * stride_bk + + c = accumulator.to(c_ptr.dtype.element_ty) + if scale > 0: + c = c * scale.to(c_ptr.dtype.element_ty) + + offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + if MASK: + c += tl.where(offs_cm[:, None] >= offs_cn[None, :], 0, float("-inf")) + c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] + + stride_cn * offs_cn[None, :]) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) + tl.store(c_ptrs, c, mask=c_mask)