diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..729f61f7ba7303d3e98560c6eee06844a0afcda1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25be8830670ccab65dc8c8396444a37416e10b84 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..85e4b7a0e0a00b1514c18c06e9f0c43fa41dd6c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional +from deepspeed.pydantic_v1 import Field +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from .ragged import DSStateManagerConfig + + +class DeepSpeedTPConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + tp_size: int = 1 + """ Number of devices to split the model across using tensor parallelism. """ + + +class QuantizationConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + quantization_mode: Optional[str] = None + """ The quantization mode in string format. The supported modes are as follows: + - 'wf6af16', weight-only quantization with FP6 weight and FP16 activation. + """ + # TODO: may reuse the constants in deepspeed/compression/constants.py + + +class RaggedInferenceEngineConfig(DeepSpeedConfigModel): + """ Sets parameters for DeepSpeed Inference Engine. """ + + tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp") + """ + Configuration for tensor parallelism used to split the model across several + GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`. + """ + + state_manager: DSStateManagerConfig = Field({}, alias="manager") + """ + Configuration for managing persistent state + """ + + quantization: QuantizationConfig = {} diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..4a358310377f00af00d92b6e5dcd2fd5d600392d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py @@ -0,0 +1,268 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import json +import pickle +from typing import Iterable, Tuple + +import torch + +import deepspeed.comm as dist + +from deepspeed.accelerator import get_accelerator +from deepspeed.comm.comm import init_distributed + +from .model_implementations import InferenceV2Policy +from .logging import inference_logger +from .ragged import DSStateManager, RaggedBatchWrapper, PlaceholderSequenceDescriptor +from .scheduling_utils import SchedulingError, SchedulingResult +from .model_implementations.flat_model_helpers import make_param_filename, make_metadata_filename +from .model_implementations.inference_model_base import DSInferenceModelBase + +from .config_v2 import RaggedInferenceEngineConfig + +INFERENCE_MODEL_TIMER = "model-forward-inference" + + +class InferenceEngineV2: + + _config: RaggedInferenceEngineConfig + """ + Configuration of the inference engine. + """ + + _model: DSInferenceModelBase + """ + Inference model supporting ragged inference. + """ + + _state_manager: DSStateManager + """ + Persistent state manager for sequences and KV-cache. + """ + + @property + def free_blocks(self) -> torch.Tensor: + """ + Number of free KV blocks. This is a tensor of shape [n_kv_cache_groups] where each + element is the number of free blocks in the corresponding KV cache group. + """ + return self._state_manager.free_blocks + + @property + def n_kv_cache_groups(self) -> int: + """ + Number of KV cache groups. + """ + return self._state_manager.n_kv_cache_groups + + def model(self) -> DSInferenceModelBase: + """ + The model implementation. + """ + return self._model + + def __init__(self, policy: InferenceV2Policy, engine_config: RaggedInferenceEngineConfig) -> None: + """ + Create the Inference V2 engine. + + Arguments: + policy (InferenceV2Policy): Policy for the model implementation. This policy object + will be used to build the model and load the checkpoint associated with it. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + """ + self._config = engine_config + self._policy = policy + self._base_mp_group = self._initialize_tp_group() + + # Build model from policy + inference_logger().info("Building model...") + self._model = self._policy.build_model(self._config, self._base_mp_group) + inference_logger().info("Model built.") + + # Create state manager + self._batch = RaggedBatchWrapper(self._config.state_manager) + self._state_manager = DSStateManager(self._config.state_manager, + self._model.kv_cache_config(), + base_mp_group=self._base_mp_group) + self._model.set_state_manager(self._state_manager) + + def _initialize_tp_group(self): + """ + Implementation of our TP group initialization. + """ + init_distributed() + local_rank = int(os.getenv("LOCAL_RANK", 0)) + get_accelerator().set_device(local_rank) + + if local_rank >= self._config.tensor_parallel.tp_size: + raise RuntimeError("Local rank is greater than TP size, ensure that the TP config is correct.") + + ranks = list(range(self._config.tensor_parallel.tp_size)) + return dist.new_group(ranks=ranks) + + def put(self, + batch_uids: Iterable[int], + batch_tokens: Iterable[torch.Tensor], + do_checks: bool = True) -> torch.Tensor: + """ + Put a ragged batch onto the inference engine. This will perform one forward and return + a Tensor of the shape [len(batch_uids), *output_shape]. Logits for the non-final tokens + are not calculated. + + Arguments: + batch_uids: Iterable of uids for the batch on the host + batch_tokens: Iterable of token tensors for the batch on the host + do_checks: Check schedulability when it is set to True. You can skip this check for better performance when it has already been completed. + """ + + if do_checks: + token_lens = [len(tokens) for tokens in batch_tokens] + schedule_check = self.can_schedule(batch_uids, token_lens) + if schedule_check != SchedulingResult.Success: + raise SchedulingError(schedule_check) + + self._batch.clear() + for uid, tokens in zip(batch_uids, batch_tokens): + + host_seq_desc = self._state_manager.get_or_create_sequence(uid) + self._model.maybe_allocate_kv(host_seq_desc, tokens.numel()) + host_seq_desc.pre_forward(tokens.numel()) + + # We can disable checks since we already validated schedulability. + self._batch.insert_sequence(host_seq_desc, tokens, do_checks=do_checks) + + # Send all metadata to the device + self._batch.finalize() + + # Prep all data structures for the actual forward (in anticipation of CG in the future) + # and also to amortize some of the costs in a more straightforward way. + self._model.prepare_batch(self._batch) + + # Model implementation will pick up in the forward. + logits = self._model.forward(self._batch) + + # We return one set of logits per sequence in the batch (saves cost on unembedding) + assert logits.shape[0] == self._batch.current_sequences + + for uid in batch_uids: + host_seq_desc = self._state_manager.get_sequence(uid) + host_seq_desc.post_forward() # Updates sequence metadata. + self._model.maybe_free_kv(host_seq_desc) + + return logits + + def query(self, uid: int, max_request_tokens: int, max_request_blocks) -> Tuple[int, torch.Tensor]: + """ + Determine the number of tokens and KV blocks to reserve for a given request. Given a UID + (this UID may not be recognized by the model yet), this will return the number of tokens + and blocks to reserve for the request. + + Arguments: + uid (int): The UID of the sequence (as tracked by the scheduling entity). If + this is a new sequence (with a UID unknown to the inference engine), then + an empty placeholder is created to pass to the occupancy logic. + n_tokens (int): The number of tokens to hypothetically send. + + Returns: + Tuple[int, Optional[int]]: Tuple of free kv blocks and the number of blocks + required to schedule the sequence. + """ + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + if (self._state_manager.n_tracked_sequences == self._config.state_manager.max_tracked_sequences): + return (0, 0) + seq_desc = PlaceholderSequenceDescriptor() + + req_tokens, req_blocks = self._model.get_kv_requirements(seq_desc, max_request_tokens, max_request_blocks) + + return (req_tokens, req_blocks) + + def can_schedule(self, uids: Iterable[int], lengths: Iterable[int]) -> SchedulingResult: + """ + Dry run a batch to determine if it can be scheduled. Placeholder sequences will be + created for any UIDs that are unknown to the inference engine. + + Arguments: + uids (Iterable[int]): Iterable of UIDs for the batch + lengths (Iterable[int]): Iterable of lengths for each sequence of the batch. This lengths + corresponds to the number of tokens to send in the hypothetical forward; history + tokens will be determined via UID lookup and future tokens are disregarded. + + Returns: + bool: True if the batch can be scheduled, False otherwise. + """ + + cur_seqs = self._state_manager.n_tracked_sequences + free_blocks = self._state_manager.free_blocks + req_blocks = 0 + batch_len = 0 + + if len(uids) > self._config.state_manager.max_ragged_sequence_count: + # Can only compose a batch from a limited number of sequences + return SchedulingResult.BatchSequenceLimitExceeded + + for uid, length in zip(uids, lengths): + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + cur_seqs += 1 + seq_desc = PlaceholderSequenceDescriptor() + + sched_len, sched_blocks = self._model.get_kv_requirements(seq_desc, length, free_blocks) + + if sched_len != length: + # We ran out of KV cache + return SchedulingResult.KVCacheLimitExceeded + + batch_len += length + free_blocks -= sched_blocks + + if cur_seqs > self._config.state_manager.max_tracked_sequences: + # Would run out of tracking metadata + return SchedulingResult.EngineSequenceLimitExceeded + + if batch_len > self._config.state_manager.max_ragged_batch_size: + # Would exceed the maximum batch size + return SchedulingResult.BatchTokenLimitExceeded + + return SchedulingResult.Success + + def get_remaining_block_capacity(self, uid: int) -> int: + """ + Get the remaining capacity of the last block already allocated. + """ + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + return 0 + return self._model.get_remaining_block_capacity(seq_desc) + + def flush(self, uid: int) -> None: + """ + Remove all state associated with a sequence from the inference engine. + + Arguments: + uid (int): The UID of the sequence to flush. + """ + self._state_manager.flush_sequence(uid) + + def serialize(self, save_path: str) -> None: + """ + Serialize the model to a file. + + Arguments: + path (str): Path to the file to serialize to. + """ + param_file_name = make_param_filename(save_path, self._model.tp_rank, self._model.tp_size) + metadata_file_name = make_metadata_filename(save_path, self._model.tp_rank, self._model.tp_size) + + # Save the flattened parameters + + torch.save(self._model.flattened_params, param_file_name) + + json.dump(self._model.flattened_param_metadata.json(), open(metadata_file_name, "w")) + + if self._model.tp_rank == 0: + pickle.dump(self._model._config, open(os.path.join(save_path, "ds_model_config.pkl"), "wb")) diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..77afe351cbea127c2f0f5dbc9a249dd71516ca31 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import logging + +from deepspeed.utils.logging import LoggerFactory + +inf_logger = None + + +def inference_logger(level: int = logging.INFO) -> logging.Logger: + """ + Create the inference logger. NOTE: Logging is not cost free. On a 3960X, + there is a cost of about 6 us per call to a no-op logger, so this should + be used during setup only and not during the inference loop. + + Args: + level (int, optional): The logging level. Defaults to logging.INFO. + """ + global inf_logger + if inf_logger is None: + inf_logger = LoggerFactory.create_logger(name="DS-Inference", level=level) + inf_logger.debug("Inference logger created.") + return inf_logger diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14b0654a8c36c63e8666e9392e83c9a82478cce8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .inference_model_base import DSInferenceModelBase +from .inference_transformer_base import DSTransformerModelBase, DSMoETransformerModelBase +from .inference_policy_base import InferenceV2Policy, ContainerMap +from .sharding import * + +# Model Implementations +from .llama_v2 import * +from .opt import * +from .mistral import * +from .mixtral import * +from .falcon import * +from .phi import * +from .qwen import * +from .qwen_v2 import * diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py new file mode 100644 index 0000000000000000000000000000000000000000..894a4137407e9b0c7c38f18d504cebf2977649bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py @@ -0,0 +1,272 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from typing import Iterable, Optional, Tuple, Type + +import torch + +import deepspeed.comm as dist +from ..ragged import DSStateManager, RaggedBatchWrapper +from ..ragged.manager_configs import KVCacheConfig +from ..ragged import DSSequenceDescriptor +from ..model_implementations.layer_container_base import LayerContainer +from ..config_v2 import RaggedInferenceEngineConfig +from .flat_model_helpers import ModelMetadata + +try: + from functools import cached_property +except ImportError: + + def cached_property(func): + return property(func) + + +""" +This abstract class defines the interfaces that a model implementation should implement +in order to include anything that may be called by the engine. Most models should be able +to inherit from `DSInferenceTransformerModelBase` to reduce implementation work so it is recommended +to begin there. +""" +""" +Placeholder for typing the model config, which can vary based on model implementation/ +""" +DSModelImplementationConfig = Type['DSModelImplementationConfig'] +""" +Placeholder for typing the distributed comm object. + +TODO(cmikeh2): Replace when we have a more defined API for the inference communication system. +""" +MPType = Type["MPType"] + + +class DSInferenceModelBase(torch.nn.Module, ABC): + """ + Implementation of a model for inference composable with ragged batching. + """ + + _config: DSModelImplementationConfig + """ + Model-specific configuration. No abstraction surrounds this yet. + """ + + _engine_config: RaggedInferenceEngineConfig + """ + Engine configuration. + """ + + _base_mp_group: MPType + """ + Base communication group for Tensor-parallel inference. + """ + + _non_transformer: Optional[LayerContainer] + """ + Abstract container for storing both embedding (pre-transformer) and unembedding (post-transformer) + parameters. This attribute should be None at model instantiation until the Policy sets + the model parameters. These parameters are grouped together since many model implementations + will tie the embedding and unembedding parameters together. + """ + + _transformer: Optional[Iterable[LayerContainer]] + """ + List of abstract containers (1 per layer) for storing transformer (transformer) + parameters. This attribute should be None at model instantiation until the Policy + sets the model parameters. + """ + + state_manager: Optional[DSStateManager] + """ + Since the state manager is lazy initialized, by the engine, it is not guaranteed to be present + until full initialization. + """ + + def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig, + base_mp_group: MPType) -> None: + """ + Minimal initialization of the model. + + Arguments: + config (DSModelImplementationConfig): Model-specific configuration. No assumptions + should be made about this config that are not closely tied to the specific + model implementation. + engine_config (RaggedInferenceEngineConfig): Engine configuration. + base_mp_group (MPType): Base communication group for Tensor-parallel inference. + """ + super().__init__() + self._config = config + self._engine_config = engine_config + self._base_mp_group = base_mp_group + + # Set to None until the Policy sets the model parameters + self._non_transformer = None + self._transformer = None + self._flattened_param_buffer = None + self._flattened_param_metadata = None + + @property + def config(self) -> DSModelImplementationConfig: + """ + The model config. + """ + return self._config + + def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer, + flattened_param_buffer: torch.Tensor, flattened_param_metadata: ModelMetadata): + """ + Set the model parameters for the embedding, transformer, and unembedding containers. + """ + self._transformer = transformer + self._non_transformer = non_transformer + self._flattened_param_buffer = flattened_param_buffer + self._flattened_param_metadata = flattened_param_metadata + + def set_state_manager(self, state_manager: DSStateManager): + """ + Sets the state manager attribute. This is called by the inference engine after + the model is fully initialized. + """ + self.state_manager = state_manager + + @cached_property + def tp_rank(self) -> int: + """ + The rank of the current process. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_rank(group=self._base_mp_group) + + @cached_property + def tp_size(self) -> int: + """ + The total number of processes. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_world_size(group=self._base_mp_group) + + @property + def model_config(self): + """ + The model config. + """ + return self._config + + @property + def engine_config(self): + """ + The engine config. + """ + return self._engine_config + + @property + def flattened_params(self) -> Optional[torch.Tensor]: + """ + The flattened parameter buffer. + """ + return self._flattened_param_buffer + + @property + def flattened_param_metadata(self) -> Optional[ModelMetadata]: + """ + The flattened parameter metadata. + """ + return self._flattened_param_metadata + + @abstractmethod + def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int, + max_new_blocks: Tuple[int, ...]) -> Tuple[int, torch.Tensor]: + """ + Given a sequence and the number of new tokens in the sequence, determine the + number of new KV blocks needed to support the sequence. This method is + used to help the engine provide schedulability APIs and can be used as a helper + for ``maybe_allocate_kv``. + + Args: + sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage. + max_new_tokens (int): Maximum number of tokens to hypothetically schedule. + max_new_blocks (int): Maximum number of blocks to hypothetically allocate. + + Returns: + Tuple[int, torch.Tensor]: The tuple of number of tokens scheduled and number + of blocks allocated (per KV cache). In general, only one of these numbers will + match the corresponding input argument, but this is not guaranteed. + """ + raise NotImplementedError() + + @abstractmethod + def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int: + raise NotImplementedError() + + @abstractmethod + def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None: + """ + Given a sequence and the number of new tokens in the sequence, determine + whether or not additional KV-storage is needed and allocate it if so. + + Args: + sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage. + n_new_tokens (int): The number of new tokens in the sequence. + """ + raise NotImplementedError() + + @abstractmethod + def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]: + """ + Return the KV-cache configuration for this model. This should be a tuple of one or more + KVCacheConfig objects (one for each distinct cache group). + """ + raise NotImplementedError() + + @property + @abstractmethod + def max_sequence_length(self) -> int: + """ + The maximum sequence length supported by the model. + """ + ... + + def maybe_free_kv(self, sequence: DSSequenceDescriptor) -> None: + """ + After completing a forward pass, determine whether or not the there are any KV blocks + that maybe freed since they are no longer in use. + + Consider the following example: + + We have a block size of 4 and a local window size of 8. At the beginning of the forward + pass there 10 tokens had been seen and the new forward has a size of 4. This would lend + itself to the following cache structure prior to the forward: + [[0, 1, 2*, 3*] [4*, 5*, 6*, 7*] [8*, 9*, x, x] [x x x x]] + Where x's denote empty cache locations and * denote values that are needed for attention + of the next open slot. After the forward, the cache would look like the following: + [[0, 1, 2, 3] [4, 5, 6*, 7*] [8*, 9*, 10*, 11*] [12* 13* x x]] + In this case, the first block is no longer needed since it is not needed for any future + local attention windows. This function would be responsible for freeing that block. + + Default behavior assumes no local patterns that require freeing and in general should + be sufficient. + """ + pass + + @abstractmethod + def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None: + """ + This will be called before each forward with the intent of building forward-specific metadata + about a batch. The intent here is to build data structures like attention atoms without necessarily + needing to implement graphable kernels to do so. + + Abstract so as to force model implementations to opt out of doing anything here explicitly. + """ + raise NotImplementedError() + + def forward(wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Complete a forward pass of the model. This interface should be graphable, so it + should not rely on the ability to use python control flow. + """ + raise NotImplementedError() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c0956f4049bd1515eb9d95cb865468eb6740b8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from deepspeed.inference.v2.model_implementations.common_parameters import * +from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +''' + # HF Mistral model (mistralai/Mistral-7B-v0.1) looks like this: +MistralForCausalLM( + (model): MistralModel( + (embed_tokens): Embedding(32000, 4096) + (layers): ModuleList( + (0-31): 32 x MistralDecoderLayer( + (self_attn): MistralAttention( + (q_proj): Linear(in_features=4096, out_features=4096, bias=False) + (k_proj): Linear(in_features=4096, out_features=1024, bias=False) + (v_proj): Linear(in_features=4096, out_features=1024, bias=False) + (o_proj): Linear(in_features=4096, out_features=4096, bias=False) + (rotary_emb): MistralRotaryEmbedding() + ) + (mlp): MistralMLP( + (gate_proj): Linear(in_features=4096, out_features=14336, bias=False) + (up_proj): Linear(in_features=4096, out_features=14336, bias=False) + (down_proj): Linear(in_features=14336, out_features=4096, bias=False) + (act_fn): SiLUActivation() + ) + (input_layernorm): MistralRMSNorm() + (post_attention_layernorm): MistralRMSNorm() + ) + ) + (norm): MistralRMSNorm() + ) + (lm_head): Linear(in_features=4096, out_features=32000, bias=False) +) +''' + + +class MistralTransformerContainer(LayerContainer): + """ + Transformer layer container for the Mistral model. + """ + qkv_w: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: GatedMLPParameter + mlp_2_w: MLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.o_proj.weight": "attn_out_w.params", + "mlp.gate_proj.weight": "mlp_1_w.gate_params", + "mlp.up_proj.weight": "mlp_1_w.up_params", + "mlp.down_proj.weight": "mlp_2_w.params", + "input_layernorm.weight": "attn_norm_gamma.params", + "post_attention_layernorm.weight": "mlp_norm_gamma.params", + } + + +class MistralNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Mistral model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "model.norm.weight": "final_norm.params", + "lm_head.weight": "word_unembed.params", + } diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcb63c050a0b4ceea8ce7ece38b6fcd329273a8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import weakref +from abc import abstractmethod +from typing import Type + +import torch + +# Currently have dependency loops for the type hints. +InferenceModel = Type["InferenceModel"] +LayerContainer = Type["LayerContainer"] + +MAPPING_KEY = "PARAM_MAPPING" + + +def make_param_getter(clsname, param): + """ + Normal getter implementation for a property. + """ + + def param_getter(self): + return getattr(self, f"__{clsname}__{param}") + + return param_getter + + +def make_param_setter(clsname, param): + """ + Setter implementation that will call complete component to potentially + finalize the parameter. + """ + + def param_setter(self, value): + setattr(self, f"__{clsname}__{param}", value) + self.complete_component() + + return param_setter + + +def make_readonly_setter(): + """ + Setter implementation that will raise an error if called. + """ + + def paramlist_setter(self, value): + raise ValueError("Cannot set a ParametrizedList directly.") + + return paramlist_setter + + +class ParameterMetaclass(type): + """ + MetaClass for the ParameterBase base class. This class will parse the `src_params` + attribute and create properties for each of the dependencies. A dependency can either + be represented as a string, which is interpreted as a named Tensor, or a `ParametrizedList` + subclass. + """ + + def __new__(cls, clsname, bases, attrs): + + annotations = attrs.get("__annotations__", {}) + dependencies = { + name: annotation + for name, annotation in annotations.items() if issubclass(annotation, (torch.Tensor, ParametrizedList)) + } + n_dependencies = len(dependencies) + + # Create properties for each of our dependencies + for d_name, d_type in dependencies.items(): + if issubclass(d_type, ParametrizedList): + assert hasattr( + d_type, "count_attr" + ), "ParametrizedList must have a count_attr attribute to access on the inference module." + attrs[d_name] = property(make_param_getter(clsname, d_name), make_readonly_setter()) + else: # torch.Tensor + attrs[d_name] = property(make_param_getter(clsname, d_name), make_param_setter(clsname, d_name)) + + new_cls = super().__new__(cls, clsname, bases, attrs) + new_cls.n_dependencies = n_dependencies + + return new_cls + + def __call__(cls, *args, **kwargs): + new_obj = super().__call__(*args, **kwargs) + new_obj.__init__(*args, **kwargs) + + setattr(new_obj, "dest_param", None) + + # Initialize our dependences to None/empty `ParametrizedList`s + for name, annotation in new_obj.__annotations__.items(): + if issubclass(annotation, ParametrizedList): + #TODO(jeff): update assert with this, model implementation attribute does not align or missing wrt the ParametrizedList attributes + assert hasattr( + new_obj.inference_model, annotation.count_attr + ), f"new_obj={new_obj.__class__.__name__}, name={name}, annotation.count_attr={annotation.count_attr}" + param_list = annotation(new_obj, getattr(new_obj.inference_model, annotation.count_attr)) + setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", param_list) + else: # torch.Tensor + setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", None) + + return new_obj + + +class ParameterBase(metaclass=ParameterMetaclass): + """ + A ParameterBase allows us to consolidate tracking the dependencies of loading a parameter from + a checkpoint into a single object. This class should not be used directly, but rather subclassed + and the `src_params` attribute set to a list of strings and/or `ParametrizedList`s. + """ + + # inference_model: InferenceModel + """ + Inference model that will provide context on how to shard and transform the parameter. + """ + + #completed_components: int + """ + How many of the layer dependencies have been met. This is used to determine when the parameter + is ready to be finalized. A ParametrizedList counts as a single dependency for the purposes + of this counter. + """ + + def __init__(self, model: InferenceModel, parent_container: LayerContainer) -> None: + """ + Direct constructor. This should not be called from client code. + + Args: + model (InferenceModel): Inference model that will be used to shard and transform the + parameter in `finalize`. + parent_container (LayerContainer): The parent container that this parameter is a member + of. We will build a weakref to this container to call the finalization callback. + """ + self.inference_model = model + self.completed_components = 0 + self.parent_container = weakref.ref(parent_container) + + @abstractmethod + def finalize(self) -> torch.Tensor: + """ + Finalize the parameter after all of its source parameters have been set. This method + will be automatically called when all inputs have been set. It should return the Tensor + with all transformations performed on it. + """ + pass + + def complete_component(self) -> None: + """ + Mark a component as completed. This should be called by the relevant setter of a direct + property or a ParametrizedList. This method will automatically call `finalize` when all + dependencies have been met and then call the finalization callback on the parent container. + + Once the finalization callback has been called, the parameter will be replaced with the + `dst_param` attribute on the parent container, and this instance will be destroyed. + """ + self.completed_components += 1 + + if self.completed_components != self.n_dependencies: + return + + finalized_param = self.finalize() + self.parent_container().finalization_callback(self, finalized_param) + + +class ParametrizedList: + """ + A ParametrizedList is a list of parameters that are dependencies + of a `ParameterBase` but may vary in length depending on the model + configuration (rather than architecture). For example, a MoE layer + may have different number of experts depending on the size of the model. + + This class is used to manage these lists and provide integer indexing + of a single component rather than accessing names directly. For example, + it tends to be more natural to access the 8th expert with `experts[8]` + rather than a name like `expert_8`, especially as an attribute. + + To inherit from this class, set static variables `name` and `count_attr`. + + ```python + class MyParametrizedList(ParametrizedList): + count_attr: str = "my_list_count" + ``` + + In the above example, `my_list_count` should be an accessible attribute + of the inference model (i.e. via `self.inference_model.my_list_count`). + + NOTE: There are some APIs in which this type cannot be used as if it is + just a list of Tensors. For example, `torch.cat(param_list)` will not work. + However, you can make it compatible with a tuple wrapper: + `torch.cat(tuple(param_list))` + """ + + n_params: int + """ + Number of params this list contains. + """ + + param: ParameterBase + """ + WeakRef to the owning parameter. + """ + + def __init__(self, param: ParameterBase, n_params: int) -> None: + """ + Constructor. Should not be called from client code. + + Args: + param (ParameterBase): The owning parameter. + n_params (int): The number of parameters this list contains. This should be + """ + self.n_params = n_params + self.set_params = 0 + self.param = weakref.ref(param) + self._params = [None] * n_params + + def __getitem__(self, index): + return self._params[index] + + def __setitem__(self, index, value): + if self._params[index] is not None: + raise ValueError("Cannot set a parameter twice.") + + self._params[index] = value + self.set_params += 1 + + if self.set_params != self.n_params: + return + + self.param().complete_component() + + def __iter__(self): + return iter(self._params) + + +def ParamList(attr: str): + """ + Helper to create a subclass of ParametrizedList with the desired `count_attr`. + + In this manner, we can annotate the type of a Parameter dependency with the + following: + + ```python + class CustomParameter(ParameterBase): + dependency_list: ParamList("dependencies_count_name") + ``` + + where "dependencies_count_name" is the name of the attribute on the inference model. + """ + + class ParametrizedListInstance(ParametrizedList): + count_attr: str = attr + + return ParametrizedListInstance diff --git a/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d3818d46675d9b00735454b3db021e770bb4947 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum + + +class SchedulingResult(Enum): + + Success = 0 + """ + The proposed batch is valid and can be scheduled. + """ + + EngineSequenceLimitExceeded = 1 + """ + The proposed batch would would overflow the number of concurrent sequences the engine may support. + """ + + BatchSequenceLimitExceeded = 2 + """ + The proposed batch contains more sequences than the engine was configured + to support in a single forwardp + """ + + BatchTokenLimitExceeded = 3 + """ + The proposed batch contains more tokens than the engine was configured + to support in a single forward. + """ + + KVCacheLimitExceeded = 4 + """ + The proposed batch would require more KV cache to be allocated than the engine + currently has available. + """ + + SequenceTokenLimitExceeded = 5 + """ + The proposed batch contains a sequence that is longer than the engine/model can support. + """ + + +class SchedulingError(RuntimeError): + + result: SchedulingResult + """ + The failed result of the scheduling check. Guaranteed to not be SchedulingResult.Success. + """ + + def __init__(self, result: SchedulingResult) -> None: + self.result = result + super().__init__(f"Batch scheduling failed with result {result}") diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63a03613477a12a2a6efdd77911b28ebd747e8e3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78b51c73d2b876c4042c4d010b729b88f4287cad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..203bf18fbc8abf2d86ae66efd5985f66f58420ca Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b798e13041d9c132941527bf991dcb2058a21b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56eccb5835a03ad8ea9659a6d5887f1af2af3b7c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3390e426225b68d4c994516380f1b4de2a75234 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83999a91de2cc77c6462ca9b29b3093a86a2b2af Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d87fac037f3add60b6e444cedaf3e95effc7aa Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b5a7c43f233189a000fcc7cd26c54074bcaabad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75385f54f135c535dfcdcfd5acffd4ca9eb9ed0b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b564c6a2824b18a700e848311c7ce7e14f53e96 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8301afb2c8cf80ad099169bd55c6effc42b0edb6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ade0488bb915222196c4cd6e71056eb8f483c90b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41c322709701085712d6a278b17cc668a2aea67d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f1637e244ef849692e094cffb93436990699075 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4a4aa1c47210e5725055101ea9f3164647191e1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35e442a3d43dd838ea70f4f9dac05fc9bcc4ae5d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..520a1792feab6957cdd7e0f3a44a5e17f2daefc3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49e618e9a7395ac2890457b24dc1b3778adb9b74 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b61378c6a3aa2ec9cd69077886ecb07c3478b5d7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e512e25a2a29e1b463bdc373722a64c755b89098 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' + +from .fused_adam import FusedAdamBuilder +from .async_io import AsyncIOBuilder +from .no_impl import NotImplementedBuilder +from .cpu_adam import CPUAdamBuilder +from .cpu_adagrad import CPUAdagradBuilder +from .cpu_lion import CPULionBuilder +from .inference import InferenceBuilder diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7e6ccedf1a4fc684fa4cee6a76bf5ed4c01aae9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb338cb0a8b9916bc27588af5a9618c3cfc7cec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a526f5933ebcb040384449abe9af2a913febf0ab Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f921c83d95196b220c77131abf183dc05fb6902 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db0aa7f9e0a0300c5bd1221911966c4298b2521a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..937d37c55c39ac1237d6729f7f89e4015af82652 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e87e14bb95f2517a5b378fbd1e7ee63788f76f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea2fd080e824c38b6c070cb328147ade7c1ea9b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e10eb733fdde1b29008932f2346e320e4121146 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..86560353b1c761f157512613f8f31ab0786fafd7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import NPUOpBuilder + + +class AsyncIOBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/aio/py_lib', 'csrc/aio/common'] + return args + + def cxx_args(self): + args = super().cxx_args() + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return args + [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + args = super().extra_ldflags() + return args + ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..0dea2e78915e2a939d5c1839c2e2d88e948e10f1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import re +import os +try: + import torch_npu +except ImportError as e: + pass + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class NPUOpBuilder(OpBuilder): + _ascend_path = None + _torch_npu_path = None + _cann_version = None + + def __init__(self, name): + super().__init__(name) + self._ascend_path = self.installed_cann_path() + self._torch_npu_path = os.path.join(os.path.dirname(os.path.abspath(torch_npu.__file__))) + try: + self._cann_version = self.installed_cann_version(self.name) + except BaseException: + print(f"{self.name} ascend_cann is missing, npu ops cannot be compiled!") + + def cann_defs(self): + if self._cann_version: + return '-D__ENABLE_CANN__' + return '-D__DISABLE_CANN__' + + def installed_cann_path(self): + if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]): + return os.environ["ASCEND_HOME_PATH"] + return None + + def installed_cann_version(self, name=""): + ascend_path = self.installed_cann_path() + assert ascend_path is not None, "CANN_HOME does not exist, unable to compile NPU op(s)" + cann_version = "" + for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)): + if cann_version: + break + install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)] + if install_files: + filepath = os.path.join(dirpath, install_files[0]) + with open(filepath, "r") as f: + for line in f: + if line.find("version") != -1: + cann_version = line.strip().split("=")[-1] + break + return cann_version + + def include_paths(self): + paths = super().include_paths() + paths += [os.path.join(self._ascend_path, 'include'), os.path.join(self._torch_npu_path, 'include')] + return paths + + def cxx_args(self): + args = super().cxx_args() + args += ['-O3', '-std=c++17', '-g', '-Wno-reorder', '-fopenmp'] + args += ['-fstack-protector-all', '-Wl,-z,relro,-z,now,-z,noexecstack', '-Wl,--disable-new-dtags,--rpath'] + args += [ + self.cann_defs(), + self.cpu_arch(), + self.simd_width(), '-L' + os.path.join(self._ascend_path, 'lib64'), + '-L' + os.path.join(self._torch_npu_path, 'lib') + ] + return args + + def extra_ldflags(self): + flags = super().extra_ldflags() + flags += [ + '-L' + os.path.join(self._ascend_path, 'lib64'), '-lascendcl', + '-L' + os.path.join(self._torch_npu_path, 'lib'), '-ltorch_npu' + ] + return flags diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..161bc82efe1ca01660fdeedd30079a8f10f1d269 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPUAdagradBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" + NAME = "cpu_adagrad" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adagrad.{self.NAME}_op' + + def sources(self): + return ['csrc/adagrad/cpu_adagrad.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..a4e9569c0f336122cd003a2df5e196527d84666c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPUAdamBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..6917e0fd03d08dec42e71479110224d577b55b5b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPULionBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_LION" + NAME = "cpu_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1bc83c7cc7ca86bf8b97ea0fca773b79e4fb3b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + +try: + import torch_npu +except ImportError as e: + pass + + +class NPUFusedAdam: + + @staticmethod + def multi_tensor_adam(chunk_size, noop_flag_buffer, tensor_lists, lr, beta1, beta2, epsilon, step, adam_w_mode, + bias_correction, weight_decay, *args): + bias_correction1 = beta1**step + bias_correction2 = beta2**step + + # iteration group['params'] + for i in range(len(tensor_lists[0])): + grad_flat = tensor_lists[0][i] + param_flat = tensor_lists[1][i] + m_flat = tensor_lists[2][i] + v_flat = tensor_lists[3][i] + + if adam_w_mode: + param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam_w( + bias_correction1, + bias_correction2, + lr, + weight_decay, + beta1, + beta2, + epsilon, + grad_flat, + None, # max_grad_norm + False, # amsgrad + False, # maximize + out=(param_flat.data, m_flat, v_flat)) + else: + param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam( + bias_correction1, + bias_correction2, + lr, + beta1, + beta2, + epsilon, + grad_flat, + False, # use_locking + False, # use_nesterov + out=(param_flat.data, m_flat, v_flat)) + + +class FusedAdamBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return [] + + def include_paths(self): + return [] + + def load(self, verbose=True): + return NPUFusedAdam diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..46f28c0d401161f70431776a5a53387235ebb5ce --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py @@ -0,0 +1,307 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import IntEnum +from .builder import NPUOpBuilder + +try: + import torch + import torch_npu +except ImportError as e: + pass + + +class ActivationFuncType(IntEnum): + UNKNOWN = 0 + GELU = 1 + ReLU = 2 + GATED_GELU = 3 + GATED_SILU = 4 + + +class InferenceContext: + _workspace = None + + _seed = 42 + _curr_offset = 0 + _stream = 0 + _free_memory_size = 0 + _num_tokens = 1 + _attention_unfused_workspace_offset = 0 + _workSpaceSize = 0 + + workSpaceSize = 0 + kv_caches = None + + @staticmethod + def reset_tokens(initial_tokens=1): + InferenceContext._num_tokens = initial_tokens + + @staticmethod + def current_tokens(): + return InferenceContext._num_tokens + + @staticmethod + def GetWorkSpace(): + return InferenceContext._workspace + + +class NPUInference: + + @staticmethod + def layer_norm(inputs, gamma, beta, epsilon): + return torch.nn.functional.layer_norm(inputs, [inputs.shape[-1]], gamma, beta, eps=epsilon) + + @staticmethod + def _qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = torch.nn.functional.layer_norm(inputs, (inputs.shape[2], ), gamma, beta, eps) + weight = weight.t() if transpose else weight + tmp = torch.matmul(inp_norm, weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + return output + + @staticmethod + def qkv_gemm_fp16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def qkv_gemm_bf16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def qkv_gemm_fp32(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def _bias_add_transform_0213(vals, bias, hidden_dim, seq_length, seq_offset, heads, num_kv, rotary_dim, + rotate_half, rotate_every_two, rope_theta): + bsz, _, _ = vals.shape + q = vals[..., :hidden_dim].reshape(bsz, seq_length, heads, -1) + k = vals[..., hidden_dim:hidden_dim + num_kv * (hidden_dim // heads)].reshape(bsz, seq_length, num_kv, -1) + v = vals[..., hidden_dim + num_kv * (hidden_dim // heads):] + + if rotary_dim > 0 and rotate_every_two: + # sin, cos may use cache + seq_id = torch.arange(0, seq_length).to("npu") + inv_freq = torch.arange(0, rotary_dim, 2) / rotary_dim + inv_freq = inv_freq.to("npu") + inv_freq = 1.0 / torch.pow(rope_theta, inv_freq) + inv_freq = torch.outer(seq_id, inv_freq) + sin = inv_freq.sin() + cos = inv_freq.cos() + # shape: [bsz=1, seq_len, heads=1, rotary_dim] + sin = sin.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1) + cos = cos.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1) + + q_pos, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] + k_pos, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] + + q_pos = torch_npu.npu_rotary_mul(q_pos, cos, sin) + q = torch.cat([q_pos, q_pass], dim=-1) + k_pos = torch_npu.npu_rotary_mul(k_pos, cos, sin) + k = torch.cat([k_pos, k_pass], dim=-1) + + output = q.reshape(bsz, seq_length, -1).contiguous() # [b, s, H] + k_cache = k.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d] + v_cache = v.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d] + return output, k_cache, v_cache + + @staticmethod + def _softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + bsz, seq_len, k = query_key_value.size() + k = k // (heads + 2 * (num_kv if num_kv > 0 else heads)) + hidden_dim = heads * k + + is_promt = seq_len > 1 + if not InferenceContext.kv_caches: + InferenceContext.kv_caches = [[None, None] for _ in range(num_layers)] + if is_promt: + InferenceContext.reset_tokens(seq_len) + InferenceContext.kv_caches[layer_id] = [None, None] + + soft_len = InferenceContext.current_tokens() + workspace = InferenceContext.GetWorkSpace() + seq_offset = 0 if is_promt else soft_len - 1 + + q, k, v = NPUInference._bias_add_transform_0213(vals=query_key_value, + bias=None, + hidden_dim=hidden_dim, + seq_length=seq_len, + seq_offset=seq_offset, + heads=heads, + num_kv=num_kv if num_kv > 0 else heads, + rotary_dim=rotary_dim, + rotate_half=rotate_half, + rotate_every_two=rotate_every_two, + rope_theta=rope_theta) + + if not is_promt: + k_cache, v_cache = InferenceContext.kv_caches[layer_id] + if k_cache is not None: + k = torch.cat([k_cache, k], dim=2) + v = torch.cat([v_cache, v], dim=2) + InferenceContext.kv_caches[layer_id] = [k, v] + seq_len = k.shape[2] + + layer_scale = max(1, layer_id) if len(alibi.size()) > 1 else 1.0 + alpha = norm_factor * norm_factor / layer_scale + + output = torch_npu.npu_fusion_attention(q, + k.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(), + v.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(), + heads, + "BSH", + pse=None, + padding_mask=None, + atten_mask=attn_mask.bool(), + scale=alpha, + pre_tockens=65536, + next_tockens=65536, + keep_prob=1, + inner_precise=0)[0] + + return output, k, v + + @staticmethod + def softmax_context_fp16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def softmax_context_bf16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def softmax_context_fp32(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def _vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode): + if transposed_mode: + return torch.matmul(input, weight.t()) + return torch.matmul(input, weight) + + @staticmethod + def vector_matmul_fp16(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def vector_matmul_bf16(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def vector_matmul_fp32(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def _mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + if mlp_after_attn: + residual_add = torch.nn.functional.layer_norm(input + residual + input_bias, (input.shape[-1], ), gamma, + beta, eps) + else: + residual_add = torch.nn.functional.layer_norm(input, (input.shape[-1], ), gamma, beta, eps) + + weight_interm = weight_interm.t() if transpose else weight_interm + tmp = torch.matmul(residual_add, weight_interm) + if mlp_act_func_type == ActivationFuncType.GELU: + tmp = torch.nn.functional.gelu(tmp + bias) + elif mlp_act_func_type == ActivationFuncType.ReLU: + tmp = torch.nn.functional.relu(tmp + bias) + else: + raise Exception('Unsupported ActivationFuncType {}'.format(mlp_act_func_type)) + output = torch.matmul(tmp, weight_out.t()) + return output, residual_add + + @staticmethod + def mlp_gemm_fp16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def mlp_gemm_bf16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def mlp_gemm_fp32(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def _residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + if add_bias: + residual += attention_bias.float() + tmp = hidden_state.float() + attention_output.float() + (residual.float() + final_bias.float()) / mp_size + + input_dtype = hidden_state.dtype + residual.set_(tmp.to(input_dtype)) + + @staticmethod + def residual_add_bias_fp16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + @staticmethod + def residual_add_bias_bf16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + @staticmethod + def residual_add_bias_fp32(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + +class InferenceBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.transformer.inference.{self.NAME}_op' + + def sources(self): + return [] + + def include_paths(self): + return [] + + def load(self, verbose=True): + return NPUInference diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1771fabc22f6ad13161231900a737bba733e68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class NotImplementedBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on NPU backend.") + + def sources(self): + return [] + + def cxx_args(self): + return [] + + def extra_ldflags(self): + return [] + + def include_paths(self): + return [] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2815f164e5f2598b5f6f9a4badfbc0a8969c7a5c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_adam import CPUAdamBuilder +from .cpu_adagrad import CPUAdagradBuilder +from .fused_adam import FusedAdamBuilder +from .async_io import AsyncIOBuilder diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aacece52df7643cfaaa12f116f39f1dc70ebe458 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4f8e08b70ce1eddc1ca0760e15e44aeef02bb4e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..284adeb56c4b89c5e7209e4bedab37f3e394c2e2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4419f1ef7771d8b0d4ced285b89161032a4bfcc7 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23beb1587d2c08f3c7f6211e9871edff4ff1a5f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8fa77030afd69ebb05ed4bae8b7f8d07e232def Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..0fd43f72e60ea19f4dd167062eb1334571eee0eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import OpBuilder + + +class AsyncIOBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + return ['csrc/aio/py_lib', 'csrc/aio/common'] + + def cxx_args(self): + import torch + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + return ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..459dcce6bfae1f48d5c049f3e744f02c29870273 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import time +import importlib + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class SYCLOpBuilder(OpBuilder): + + def builder(self): + try: + from intel_extension_for_pytorch.xpu.cpp_extension import DPCPPExtension + except ImportError: + from intel_extension_for_pytorch.xpu.utils import DPCPPExtension + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + print("dpcpp sources = {}".format(self.sources())) + dpcpp_ext = DPCPPExtension(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + extra_compile_args={ + 'cxx': self.strip_empty_entries(self.cxx_args()), + }, + extra_link_args=self.strip_empty_entries(self.fixed_aotflags())) + return dpcpp_ext + + def version_dependent_macros(self): + try: + from op_builder.builder import TORCH_MAJOR, TORCH_MINOR + except ImportError: + from deepspeed.ops.op_builder.builder import TORCH_MAJOR, TORCH_MINOR + # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 + version_ge_1_1 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): + version_ge_1_1 = ['-DVERSION_GE_1_1'] + version_ge_1_3 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): + version_ge_1_3 = ['-DVERSION_GE_1_3'] + version_ge_1_5 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): + version_ge_1_5 = ['-DVERSION_GE_1_5'] + return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 + + def cxx_args(self): + cxx_flags = [ + '-fsycl', '-fsycl-targets=spir64_gen', '-g', '-gdwarf-4', '-O3', '-std=c++17', '-fPIC', '-DMKL_ILP64', + '-fno-strict-aliasing' + ] + if os.environ.get('USE_MKL_GEMM'): + cxx_flags.append('-DUSE_MKL_GEMM') + return cxx_flags + + def extra_ldflags(self): + return [ + '-fPIC', '-fsycl', '-fsycl-targets=spir64_gen', '-fsycl-max-parallel-link-jobs=8', + '-Xs "-options -cl-poison-unsupported-fp64-kernels,cl-intel-enable-auto-large-GRF-mode"', + '-Xs "-device pvc"', '-Wl,-export-dynamic' + ] + + def fixed_aotflags(self): + return [ + '-fsycl', '-fsycl-targets=spir64_gen', '-fsycl-max-parallel-link-jobs=8', '-Xs', + "-options -cl-poison-unsupported-fp64-kernels,cl-intel-enable-auto-large-GRF-mode", '-Xs', "-device pvc" + ] + + def load(self, verbose=True): + from deepspeed.git_version_info import installed_ops, torch_info # noqa: F401 + if installed_ops.get(self.name, False): + return importlib.import_module(self.absolute_name()) + else: + return self.jit_load(verbose) + + def jit_load(self, verbose=True): + if not self.is_compatible(verbose): + raise RuntimeError( + f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}" + ) + try: + import ninja # noqa: F401 + except ImportError: + raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.") + + self.jit_mode = True + from intel_extension_for_pytorch.xpu.cpp_extension import load + + start_build = time.time() + # Recognize relative paths as absolute paths for jit load + + sources = [self.deepspeed_src_path(path) for path in self.sources()] + extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()] + + # Torch will try and apply whatever CCs are in the arch list at compile time, + # we have already set the intended targets ourselves we know that will be + # needed at runtime. This prevents CC collisions such as multiple __half + # implementations. Stash arch list to reset after build. + ''' + torch_arch_list = None + if "TORCH_CUDA_ARCH_LIST" in os.environ: + torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") + os.environ["TORCH_CUDA_ARCH_LIST"] = "" + ''' + + op_module = load( + name=self.name, + sources=self.strip_empty_entries(sources), + extra_include_paths=self.strip_empty_entries(extra_include_paths), + extra_cflags=self.strip_empty_entries(self.cxx_args()), + # extra_cuda_cflags=self.strip_empty_entries(self.nvcc_args()), + extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), + verbose=verbose) + + build_duration = time.time() - start_build + if verbose: + print(f"Time to load {self.name} op: {build_duration} seconds") + ''' + # Reset arch list so we are not silently removing it for other possible use cases + if torch_arch_list: + os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list + ''' + return op_module diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..18f80848e1b800de7f8826405bb9ea9f500db263 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import SYCLOpBuilder + + +class CPUAdagradBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" + NAME = "cpu_adagrad" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adagrad.{self.NAME}_op' + + def sources(self): + return ['csrc/xpu/adagrad/cpu_adagrad.cpp', 'csrc/xpu/common/custom_cuda_kernel.dp.cpp'] + + def include_paths(self): + return ['csrc/xpu/includes'] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..4c7d4d11983987889277f21073e0433bc9440444 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import SYCLOpBuilder + + +class CPUAdamBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/xpu/adam/cpu_adam.cpp', 'csrc/xpu/adam/cpu_adam_impl.cpp'] + + return [ + 'csrc/xpu/adam/cpu_adam.cpp', 'csrc/xpu/adam/cpu_adam_impl.cpp', + 'csrc/xpu/common/custom_cuda_kernel.dp.cpp' + ] + + def include_paths(self): + return ['csrc/xpu/includes'] diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..0e0f1a66f8e648305d2ba6fd0ad85a384e6f8bd3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +from .builder import SYCLOpBuilder + + +class FusedAdamBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/xpu/adam/fused_adam_frontend.cpp', 'csrc/xpu/adam/multi_tensor_adam.dp.cpp'] + + def include_paths(self): + return ['csrc/xpu/includes', 'csrc/xpu/adam'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__init__.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e357257869f794a06d575bfa378769f8e6d3d43c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig, LocalSlidingWindowSparsityConfig +from .sparse_self_attention import SparseSelfAttention +from .bert_sparse_self_attention import BertSparseSelfAttention +from .sparse_attention_utils import SparseAttentionUtils diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/bert_sparse_self_attention.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/bert_sparse_self_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec5a403daa5459b0e526c839a6ce385414c727c9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/bert_sparse_self_attention.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/matmul.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fa1cb0483a47df5d1dcc4c10e15a0fe324e4f21 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/matmul.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/softmax.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce088543a0d0db57805121c7475c02c2a86990c9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/softmax.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_attention_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_attention_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78e114f5512795a3b4df0ebebdc61d0ad15f2196 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_attention_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_self_attention.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_self_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1e156ecb5bae9d7da324c66347e20d448cf80c4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_self_attention.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparsity_config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparsity_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4483356e042ec3c8ed351abbe8614ce22771e078 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparsity_config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..e25621bd0977c44d4c1f1d653207fd5c0fd192be --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from torch import nn +from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig + + +class BertSparseSelfAttention(nn.Module): + """Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373 + + For more information please see, TODO DeepSpeed Sparse Transformer. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial. + """ + + def __init__( + self, + config, + # SparsityConfig parameters needs to be set accordingly + sparsity_config=FixedSparsityConfig(num_heads=4)): + """Initialize the bert sparse self attention layer. + + Note) you can use any of the provided sparsity configs or simply add yours! + + Arguments: + config: required: Bert model config + sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class. + """ + + super(BertSparseSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError("The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.sparse_self_attention = SparseSelfAttention(sparsity_config) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask): + """Applies forward phase of bert sparse self attention + + Arguments: + hidden_states: required: hidden_states tensor of the bert model + attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported + + Return: + context_layer: a dense tensor containing attention context + """ + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + context_layer = self.sparse_self_attention(query_layer, + key_layer, + value_layer, + key_padding_mask=attention_mask) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, ) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/matmul.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..b30028fffbaafaf6869156406e0f8d70c8e97538 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/matmul.py @@ -0,0 +1,819 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a +# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py +import importlib +import torch + +import triton +import triton.language as tl +import triton._C.libtriton as libtriton +from deepspeed.accelerator import get_accelerator + + +@triton.jit +def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc, + stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta): + TM = meta['TM'] + TN = meta['TN'] + TK = meta['TK'] + TZ = meta['TZ'] + BLOCK = meta['BLOCK'] + #------------# + #- Prologue -# + #------------# + pid0 = tl.program_id(0) + pid1 = tl.program_id(1) + pidz = tl.program_id(2) + if meta['SDD']: + pid1 = pid1 + SDD_off_width + blockidm = tl.arange(0, TM) // BLOCK + blockidn = tl.arange(0, TN) // BLOCK + offlutm = blockidm * (TN // BLOCK) * 4 + offlutn = blockidn * 4 + header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4 + z = tl.load(header + 0) + i = tl.load(header + 1 + offlutm) + j = tl.load(header + 2 + offlutn) + AS1 = SDD_K // TZ + lockid = tl.where(TZ > 1, 1, 0) + offka = pid0 * AS1 + offkb = pid0 * AS1 + offmc = 0 + offnc = 0 + offpa = 0 + offpb = 0 + maxid = TZ + offhc = 0 + offha = z + offhb = z + ram = i * BLOCK + (tl.arange(0, TM) % BLOCK) + rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK) + else: + header = lut + pid0 * 6 + offset = tl.load(header + 0) + AS1 = tl.load(header + 1) + column = tl.load(header + 2) + depth = tl.load(header + 3) + lockid = tl.load(header + 4) + maxid = tl.load(header + 5) + pinc = lut + offset + offhc = depth + if meta['DSD']: + # output offset + offnc = pid1 * TN + offmc = column * TM + offpc = 0 + # dense input offset + offnb = pid1 * TN + offkb = tl.load(pinc) + offkb = tl.multiple_of(offkb, 8) # compiler hint + offpb = 0 + # sparse input offset + offma = 0 + offka = 0 + offpa = tl.load(pinc + 1) + offpa = tl.multiple_of(offpa, 8) # compiler hint + offpa = offpa * BLOCK * BLOCK + offha = 0 + offhb = depth + else: + # output offset + offmc = pid1 * TM + offnc = column * TN + offpc = 0 + # dense input offset + offma = pid1 * TM + offka = tl.load(pinc) + offka = tl.multiple_of(offka, 8) # compiler hint + offpa = 0 + # sparse input offset + offnb = 0 + offkb = 0 + offpb = tl.load(pinc + 1) + offpb = tl.multiple_of(offpb, 8) # compiler hint + offpb = offpb * BLOCK * BLOCK + offha = depth + offhb = 0 + ram = offma + tl.arange(0, TM) + rbn = offnb + tl.arange(0, TN) + + # initialize a, b pointers + rka = offka + tl.arange(0, TK) + rkb = offkb + tl.arange(0, TK) + pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka + pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb + if meta['DDS']: + checkam = ram[:, None] < DS0 + else: + checkam = AS1 > 0 + if meta['DSD']: + checkbn = rbn[None, :] < DS0 + else: + checkbn = AS1 > 0 + a = tl.load(pa, mask=checkam, other=0.) + b = tl.load(pb, mask=checkbn, other=0.) + + ## ---------------- ## + ## Inner Loop ## + ## ---------------- ## + acc = tl.zeros((TM, TN), dtype=tl.float32) + for k in range(AS1, 0, -TK): + acc += tl.dot(a, b) + if meta['SDD']: + inc_a = TK * stride_ka + inc_b = TK * stride_kb + else: + pinc += 2 + if meta['DSD']: + inc_b = tl.load(pinc) + inc_a = tl.load(pinc + 1) + inc_b = tl.multiple_of(inc_b, 8) + inc_a = tl.multiple_of(inc_a, 8) + inc_b = inc_b * stride_kb + if meta['DDS']: + inc_a = tl.load(pinc) + inc_b = tl.load(pinc + 1) + inc_a = tl.multiple_of(inc_a, 8) + inc_b = tl.multiple_of(inc_b, 8) + inc_a = inc_a * stride_ka + pa += inc_a + pb += inc_b + # pre-fetch + checkak = k > TK + checkbk = k > TK + checka = checkam & checkak + checkb = checkbn & checkbk + a = tl.load(pa, mask=checka) + b = tl.load(pb, mask=checkb) + c = acc.to(C.dtype.element_ty) + + if meta['SDD']: + checkc = True + rr_blockidm = tl.arange(0, TM) // BLOCK + rr_blockidn = tl.arange(0, TN) // BLOCK + rr_offlutm = rr_blockidm * (TN // BLOCK) * 4 + rr_offlutn = rr_blockidn * 4 + off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :] + bkid = tl.load(header + off_bkid) + offpc = bkid * BLOCK * BLOCK + rcm = tl.arange(0, TM) % BLOCK + rcn = tl.arange(0, TN) % BLOCK + else: + rcm = offmc + tl.arange(0, TM) + rcn = offnc + tl.arange(0, TN) + if meta['DSD']: + checkc = rcn[None, :] < DS0 + if meta['DDS']: + checkc = rcm[:, None] < DS0 + + pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc + # write-back directly + if lockid == 0: + tl.store(pc, c, mask=checkc) + # accumulate partial results using spin-locks + else: + plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1 + pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks + while tl.atomic_cas(plock, 0, 1) == 1: + pass + count = tl.load(pcount) + if count == 0: + tl.store(pc, c, mask=checkc) + else: + d = tl.load(pc, mask=checkc) + tl.store(pc, d + c, mask=checkc) + tl.atomic_xchg(pcount, (count + 1) % maxid) + tl.atomic_xchg(plock, 0) + + +############## +# MAIN API # +############## +class _sparse_matmul(torch.autograd.Function): + + sdd_cache = dict() + dsd_cache = dict() + dds_cache = dict() + locks = dict() + + # Given an array sizes representing reduction size for each + # column of a block-mode matrix multiplication, + # performs load-balancing to achieve more smaller reductions + # between `seg_size` elements + @staticmethod + def load_balance(sizes, block): + #global triton + #if triton is None: + # triton = importlib.import_module('triton') + # segment size + # heuristics taken from OpenAI blocksparse code + # https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95 + max_size = sizes.max() + min_size = sizes[sizes != 0].min() + #if max_size > min_size * 2.0: + # seg_max = max(triton.cdiv(max_size, 4), min_size*2) + #else: + # seg_max = max_size + seg_max = max_size + seg_min = max(triton.cdiv(seg_max, 4), 4) + # split reduction into segments + div = sizes // seg_max + rem = sizes % seg_max + packs = div + (sizes < seg_min).long() + (rem >= seg_min).long() + width = packs.sum() + segments = torch.empty(width, dtype=sizes.dtype) + column = torch.empty_like(segments) + lockid = torch.zeros_like(segments) + maxid = torch.zeros_like(segments) + nlocks = 0 + current = 0 + col_idx = 0 + for i in range(len(sizes)): + d, r = div[i], rem[i] + isempty = sizes[i] < seg_min + last = current + d + (r >= seg_min) + isempty + # column id + column[current:last] = col_idx + # lock id + if d > 1 or (d == 1 and r >= seg_min): + nlocks += 1 + lockid[current:last] = nlocks + maxid[current:last] = last - current + # segment size + segments[current:current + d] = seg_max + if r < seg_min and not isempty: + segments[current + d - 1] += r + if r >= seg_min or isempty: + segments[current + d] = r + current = last + col_idx += 1 + offsets = torch.zeros_like(segments) + offsets[1:] = torch.cumsum(segments[:-1], dim=0) + return segments, column, lockid, maxid, offsets + + @staticmethod + def get_locks(size, dev): + if dev not in _sparse_matmul.locks or \ + size > _sparse_matmul.locks[dev].size(0): + _sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev) + return _sparse_matmul.locks[dev] + + ########################## + # SPARSE = DENSE x DENSE # + ########################## + + @staticmethod + def make_sdd_lut(layout, block, dtype, device): + #_sparse_matmul._load_utils() + #start_width = 64 // block + #segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width) + start_width = (128 if block > 16 else 32) // block + layout = layout.type(torch.int32) + segmented = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2], + start_width) + luts, widths, packs = [], [], [] + for size, nnz in segmented: + """ width = nnz.shape[0] // (size * size) + h = nnz[:, 0] + i = nnz[:, 1] + j = nnz[:, 2] + b = nnz[:, 3] + lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous() + luts.append(lut.type(torch.int32).to(device)) + widths.append(width) + packs.append(size) """ + nnz = nnz.reshape(-1, 4) + width = nnz.shape[0] // (size * size) + luts.append(torch.from_numpy(nnz).type(torch.int32).to(device)) + widths.append(width) + packs.append(size) + # create locks + return luts, None, widths, packs + + @staticmethod + def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs, bench, time): + if trans_c: + a, b = b, a + trans_a, trans_b = not trans_b, not trans_a + AS0 = a.size(0) + # Shape check + a_dim = -2 if trans_a else -1 + b_dim = -1 if trans_b else -2 + a_inner, b_inner = a.shape[a_dim], b.shape[b_dim] + if a_inner != b_inner: + raise ValueError(f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size " + f"of tensor B along the {b_dim} dim ({b_inner})") + if a_inner % 16 != 0: + raise ValueError('Reduction size for SDD must be a multiple of 16') + + batch_size = a.size(0) + a_outer = a.size(3 if trans_a else 2) + dtype = a.dtype + is_16_multiple = a_inner % 16 == 0 + is_32_multiple = a_inner % 32 == 0 + is_64_multiple = a_inner % 64 == 0 + if not is_16_multiple: + raise ValueError('Reduction size for SDD must be a multiple of 16') + device = a.device + # create kernel + total_width = sum([width * pack * pack for width, pack in zip(widths, packs)]) + c = torch.empty((batch_size, total_width, block, block), dtype=dtype, device=a.device) + for lut, width, pack in zip(luts, widths, packs): + F32TK = [8, 16] + F16TK = [16] + F16TK += [32] if is_32_multiple else [] + F16TK += [64] if is_64_multiple else [] + TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype] + num_lock = 1 + meta = { + 'TM': block * pack, + 'TN': block * pack, + 'BLOCK': block, + 'TK': TK[0], + 'TZ': 1, + 'SDD': True, + 'DSD': False, + 'DDS': False + } + # create output + locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device) + # maximum grid size is 65535 + # so operation might be decomposed into multiple + # kernel calls + max_width = 49152 + total = 0 if bench else None + for off_width in range(0, width, max_width): + grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size] + _kernel[grid](a, + b, + c, + a.stride(0), + a.stride(1), + a.stride(3 if trans_a else 2), + a.stride(2 if trans_a else 3), + b.stride(0), + b.stride(1), + b.stride(3 if trans_b else 2), + b.stride(2 if trans_b else 3), + c.stride(0), + c.stride(0), + c.stride(2), + c.stride(3), + a_outer, + a_outer, + a_inner, + off_width, + lut, + locks, + num_lock, + num_warps=4, + **meta) + # save for backward pass + return c + + ########################## + # DENSE = DENSE x SPARSE # + ########################## + + # Given a binary layout of 0s and 1s, + # Construct look-up table for efficient execution on GPUs + @staticmethod + def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx): + # load-balancing + _empty = torch.tensor([], dtype=torch.int64, device=layout.device) + segments = _empty.clone() + column = _empty.clone() + depth = _empty.clone() + lockid = _empty.clone() + maxid = _empty.clone() + offsets = _empty.clone() + current_offset = 0 + current_maxid = 0 + for z in range(layout.size(0)): + if trans: + sizes = torch.sum(layout[z, :, :], 1) + else: + sizes = torch.sum(layout[z, :, :], 0) + z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block) + z_depth = z * torch.ones_like(z_segments) + z_lockid[z_lockid > 0] += current_maxid + current_maxid = z_lockid.max() + # concatenate depth + segments = torch.cat((segments, z_segments)) + column = torch.cat((column, z_column)) + depth = torch.cat((depth, z_depth)) + maxid = torch.cat((maxid, z_maxid)) + offsets = torch.cat((offsets, current_offset + z_offsets)) + lockid = torch.cat((lockid, z_lockid)) + current_offset += layout[z, :, :].sum() + segments *= step + # pointer increments + if trans: + nnz = layout.nonzero() + else: + nnz = layout.transpose(1, 2).nonzero() + num_blocks = nnz.size(0) + offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets)) + idx = transform(nnz[:, 2] * block) + xincs = idx.clone() + xincs[1:] -= idx[:-1] + # divide block into multiple steps + div = block // step + xincs = xincs.view(-1, 1).repeat(1, div) + xincs[:, 1:] = step + xincs[:, 0] -= (div - 1) * step + # first increment for each reduction is actually the offset + xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]] + xincs = xincs.view(-1) + # block-mode input increments + if trans: + widx = torch.arange(num_blocks) + else: + widx = _empty.clone() + current_offset = 0 + for z in range(layout.size(0)): + layoutw = layout[z, :, :].clone() + msum = layoutw.sum() + layoutw[layoutw > 0] = 1 + torch.arange(msum) + widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1)) + current_offset += msum + widx = widx + wincs = widx * block * block + wincs[1:] -= widx[:-1] * block * block + wincs = wincs.view(-1, 1).repeat(1, div) + if trans: + wincs[:, 1:] = step + wincs[:, 0] -= (div - 1) * step + else: + wincs[:, 1:] = step * block + wincs[:, 0] -= (div - 1) * step * block + wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]] + wincs = wincs.view(-1) + # adjust offset and segment size + offsets *= 2 * div + segments *= div + # create header + width = column.size(0) + offsets += 6 * width + header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous() + incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous() + incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype))) + # create lut + lut = torch.cat((header, incs)) + lut = lut.type(torch.int32).to(device) + # create locks + num_locks = max(1, lockid.max()) + return lut, num_locks, width, None + + @staticmethod + def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time): + global triton + if triton is None: + triton = importlib.import_module('triton') + + # shapes / dtypes + AS0 = a.size(0) + AS1 = a.size(1) + AS2 = a.size(3 if trans_a else 2) + AS3 = a.size(2 if trans_a else 3) + BS0 = spdims[0] + BS1 = block * spdims[2 if trans_b else 1] + BS2 = block * spdims[1 if trans_b else 2] + dtype = a.dtype + # kernel + meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': False, 'DDS': True} + # output + CS0 = AS0 + CS1 = AS1 + CS2 = BS2 if trans_c else AS2 + CS3 = AS2 if trans_c else BS2 + locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device) + c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device) + grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0] + _kernel[grid](a, + b, + c, + a.stride(0), + a.stride(1), + a.stride(3 if trans_a else 2), + a.stride(2 if trans_a else 3), + b.stride(0), + b.stride(1), + b.stride(3 if trans_b else 2), + b.stride(2 if trans_b else 3), + c.stride(0), + c.stride(1), + c.stride(3 if trans_c else 2), + c.stride(2 if trans_c else 3), + AS2, + BS2, + 0, + 0, + lut, + locks, + num_locks, + num_warps=4, + **meta) + return c + + @staticmethod + def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time): + global triton + if triton is None: + triton = importlib.import_module('triton') + + # shapes / dtypes + AS0 = spdims[0] + AS1 = block * spdims[2 if trans_a else 1] + AS2 = block * spdims[1 if trans_a else 2] + BS0 = b.size(0) + BS1 = b.size(1) + BS2 = b.size(3 if trans_b else 2) + BS3 = b.size(2 if trans_b else 3) + dtype = a.dtype + # kernel + + meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': True, 'DDS': False} + # output + CS0 = BS0 + CS1 = BS1 + CS2 = BS3 if trans_c else AS1 + CS3 = AS1 if trans_c else BS3 + locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device) + c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device) + grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0] + _kernel[grid](a, + b, + c, + a.stride(0), + a.stride(1), + a.stride(3 if trans_a else 2), + a.stride(2 if trans_a else 3), + b.stride(0), + b.stride(1), + b.stride(3 if trans_b else 2), + b.stride(2 if trans_b else 3), + c.stride(0), + c.stride(1), + c.stride(2), + c.stride(3), + BS3, + AS1, + 0, + 0, + lut, + locks, + num_locks, + num_warps=4, + **meta) + return c + + fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)} + + @staticmethod + def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs, + c_bench, c_time, da_lut, da_num_locks, da_width, da_packs, da_bench, da_time, db_lut, db_num_locks, + db_width, db_packs, db_bench, db_time): + c = _sparse_matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width, + c_packs, c_bench, c_time) + # save for backward + ctx.save_for_backward(a, b) + ctx.da_num_locks = da_num_locks + ctx.da_lut = da_lut + ctx.da_width = da_width + ctx.da_packs = da_packs + ctx.da_bench = da_bench + ctx.da_time = da_time + ctx.db_lut = db_lut + ctx.db_num_locks = db_num_locks + ctx.db_width = db_width + ctx.db_bench = db_bench + ctx.db_packs = db_packs + ctx.db_time = db_time + ctx.mode = mode + ctx.spdims = spdims + ctx.block = block + ctx.trans_a = trans_a + ctx.trans_b = trans_b + return c + + @staticmethod + def backward(ctx, dc): + # saved for backward + a, b = ctx.saved_tensors + mode = ctx.mode + # gradients w.r.t. a + if ctx.needs_input_grad[0]: + mode_da = mode[1] + mode[0] + mode[2] + da = _sparse_matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, + ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs, ctx.da_bench, + ctx.da_time) + # gradients w.r.t. b + if ctx.needs_input_grad[1]: + mode_db = mode[2] + mode[1] + mode[0] + db = _sparse_matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block, + ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs, ctx.db_bench, + ctx.db_time) + return da, db, None, None, None,\ + None, None, None, None,\ + None, None, None, None, None, None,\ + None, None, None, None, None, None,\ + None, None, None, None, None, None + + +class MatMul: + """Block-Sparse MatMul class; this class handles three types of matrix-multiplication: + - sparse = dense X dense + - dense = sparse X dense + - dense = dense X sparse + + For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509 + """ + + def make_lut(self, dtype, device): + """Generates the sparsity layout/s used in block-sparse matmul + """ + key = (dtype, device) + if key in self.lut_cache: + return self.lut_cache[key] + # C look-up table + layout, block = self.layout, self.block + step = 16 + if self.mode == 'sdd': + c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device) + elif self.mode == 'dsd': + c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a, + device) + elif self.mode == 'dds': + c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b, + device) + # DA look-up table + if self.mode == 'sdd': + da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device) + elif self.mode == 'dsd': + da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device) + elif self.mode == 'dds': + da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, + not self.trans_b, device) + # DB look-up table + if self.mode == 'sdd': + db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device) + elif self.mode == 'dsd': + db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a, + device) + elif self.mode == 'dds': + db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device) + self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\ + da_lut, da_num_locks, da_width, da_packs,\ + db_lut, db_num_locks, db_width, db_packs) + return self.lut_cache[key] + + def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False): + """Initialize the Block-Sparse MatMul class. + + Arguments: + layout: required: sparsity layout tensor + block: required: an integer determining the block size. + mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse + trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false + trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false + bench: optional: set if you want to do benchmarking + """ + + if mode not in ['sdd', 'dsd', 'dds']: + raise NotImplementedError('Supported modes are: sdd, dsd, dds') + # look-up table cache + self.lut_cache = dict() + # attributes + self.trans_a = trans_a + self.trans_b = trans_b + self.mode = mode + self.block = block + self.layout = layout + layout_dim = layout.ndim + assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s" + if not mode == 'sdd': + # Dims to be reduced on the 'inside' of the matmul, either -1 or -2 + trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, + -2) + self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner + sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1) + + # Inner dim of the dense input should be equal to the inner dim of the sparse input + self.dense_inner_size = layout.shape[sparse_inner] * block + # Expected shape for sparse inputs + self.sparse_shape = (layout.sum().item(), block, block) + + # Support using the same layout across attention heads etc. + if layout_dim == 2: + layout = layout.unsqueeze(0) + + layout = layout.long() # Above code assumes the layout tensor is an integral type + + self.spdims = layout.shape + # timings + self.bench = bench + self.time_c = None + self.time_da = None + self.time_db = None + + # pad shapes of a tensor to make it + # compatible with kernel calls + @staticmethod + def _pad_shape(x, is_sparse): + max_dim = 3 if is_sparse else 4 + for i in range(max_dim - x.dim()): + x = x.unsqueeze(0) + return x + + def __call__(self, a, b): + """Applies Block-Sparse MatMul. + + For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509 + + Arguments: + a: required: a dense/block-sparse tensor; first input of mat-mul + b: required: a dense/block-sparse tensor; second input of mat-mul + + Return: + c: a dense/block-sparse tensor result of a X b + """ + + + c_lut, c_num_locks, c_width, c_packs,\ + da_lut, da_num_locks, da_width, da_packs,\ + db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device) + # timings + time_c = [None] + time_da = [None] + time_db = [None] + + original_dims = max(a.ndim, b.ndim) + a, b = self._validate_inputs(a, b) + + # pad shapes with ones + a = MatMul._pad_shape(a, self.mode == 'dsd') + b = MatMul._pad_shape(b, self.mode == 'dds') + # execute + + c = _sparse_matmul.apply(a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut, + c_num_locks, c_width, c_packs, self.bench, time_c, da_lut, da_num_locks, da_width, + da_packs, self.bench, time_da, db_lut, db_num_locks, db_width, db_packs, self.bench, + time_db) + + # This removes any leading singleton dimensions we may have added to the tensor that weren't in the input + dims_to_trim = c.ndim - original_dims + for _ in range(dims_to_trim): + c = c.squeeze(0) + + self.time_c = time_c[0] + self.time_da = time_da[0] + self.time_db = time_db[0] + return c + + def _validate_inputs(self, a, b): + if a.device != b.device: + raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A " + f"and {b.device} for tensor B") + if not get_accelerator().on_accelerator(a): + raise ValueError("Only GPU devices are supported for now") + + # When autocast is enabled, torch.matmul autocasts to float16, so we do the same here + if torch.is_autocast_enabled(): + a, b = a.half(), b.half() + elif a.dtype != b.dtype: + raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B") + + mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b + if mode != 'sdd': + # One input is sparse + dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A') + dense_inner = dense.shape[self.dense_inner_dim] + if dense_inner != self.dense_inner_size: + raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim " + f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.") + + if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape: + raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument " + f"{sparse_name}, got {sparse.shape}") + + def add_extra_dims(x): + # Add extra leading singleton dimensions if needed + dims_needed = 4 - x.ndim + if dims_needed > 0: + singletons = [1] * dims_needed + x = x.view(*singletons, *x.shape) + elif dims_needed < 0: + raise ValueError("Tensors with more than 4 dimensions are not currently supported") + + return x + + # Pad shapes with leading singleton dimensions + a = add_extra_dims(a) + b = add_extra_dims(b) + + return a, b diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/softmax.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..debee5688fe3f06699a710dbaa35c09601189a4f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/softmax.py @@ -0,0 +1,296 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a +# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py + +import torch + +import triton +import triton.language as tl + + +def next_power_of_2(n): + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n += 1 + return n + + +def num_warps(n): + if n < 512: + return 4 + if n < 2048: + return 8 + return 16 + + +@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[6] * meta['BLOCK'])}) +@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[6] * meta['BLOCK'])}) +@triton.jit +def _forward(X, scale, LUT, RPE, KP_M, ATTN_M, sizemax, stride_zx, stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, + stride_zattnm, **meta): + TN = meta['TN'] + BLOCK = meta['BLOCK'] + pidhm = tl.program_id(0) + pidz = tl.program_id(1) + # create index ranges + rxm = pidhm % BLOCK + rbm = pidhm // BLOCK + rxn = tl.arange(0, TN) % BLOCK + rbn = tl.arange(0, TN) // BLOCK + # extract information from LUT + header = LUT + rbm * 2 + size = tl.load(header + 0) + offset = tl.load(header + 1) + check = rbn < size + rbmn = tl.where(check, rbn, size - 1) + # block id and column id + blockid = tl.load(LUT + offset + rbmn * 4 + 0) + columnid = tl.load(LUT + offset + rbmn * 4 + 1) + rowid = tl.load(LUT + offset + rbmn * 4 + 2) + headid = tl.load(LUT + offset + rbmn * 4 + 3) + # pointers to X + px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn + x = tl.load(px, mask=check, other=-float('inf')) + x = x.to(tl.float32) + # apply scale + if meta['APPLY_SCALE']: + x = x * scale + # apply RPE + if meta['APPLY_RPE']: + prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn + rpe = tl.load(prpe, mask=check, other=0) + x = x + rpe + # apply key-padding mask + if meta['APPLY_KP_MASK']: + pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn + kp_m = tl.load(pkp_m, mask=check, other=-float('inf')) + if meta['KP_MASK_MUL']: + kp_m = tl.where(kp_m == 0, -float('inf'), 0.) + x = x + kp_m + # apply attention mask + if meta['APPLY_ATTN_MASK']: + pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn + attn_m = tl.load(pattn_m, mask=check, other=-float('inf')) + if meta['ATTN_MASK_MUL']: + attn_m = tl.where(attn_m == 0, -float('inf'), 0.) + x = x + attn_m + # computation + x = tl.softmax(x) + tl.store(px, x, mask=check) + + +@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[4] * meta['BLOCK'])}) +@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[4]) * meta['BLOCK']}) +@triton.jit +def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta): + pidhm = tl.program_id(0) + pidz = tl.program_id(1) + TN = meta['TN'] + BLOCK = meta['BLOCK'] + # create index ranges + rxm = pidhm % BLOCK + rbm = pidhm // BLOCK + rxn = tl.arange(0, TN) % BLOCK + rbn = tl.arange(0, TN) // BLOCK + # extract information from look-up table + header = LUT + rbm * 2 + size = tl.load(header + 0) + offset = tl.load(header + 1) + # bounds checking on lut + check = rbn < size + rbmn = tl.where(check, rbn, size - 1) + # initialize pointers to block-sparse input + blockid = tl.load(LUT + offset + rbmn * 4) + X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn + DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn + # compute fused softmax backward + x = tl.load(X, mask=check, other=0) + dx = tl.load(DX, mask=check, other=0) + x = x.to(tl.float32) + dx = dx.to(tl.float32) + y = x * (dx - tl.sum(x * dx, 0)) * scale + tl.store(DX, y, mask=check) + + +class _sparse_softmax(torch.autograd.Function): + + bwd_kernels = dict() + + @staticmethod + def make_lut(layout, block, device): + _empty = torch.tensor([], dtype=torch.int64, device=layout.device) + sizes = _empty.clone() + # sizes along rows + for h in range(layout.shape[0]): + sizes = torch.cat((sizes, layout[h, :, :].sum(-1))) + # offsets in block format + offsets = torch.zeros_like(sizes) + offsets[1:] = torch.cumsum(sizes[:-1], dim=0) + # block indices + idx = torch.arange(layout.sum()) + head = layout.nonzero()[:, 0] + rows = layout.nonzero()[:, 1] + columns = layout.nonzero()[:, 2] + core = torch.stack((idx, columns, rows, head), dim=1).view(-1) + # construct look-up table + offsets = offsets * 4 + 2 * sizes.numel() + header = torch.stack((sizes, offsets), dim=1).view(-1) + lut = torch.cat((header, core)).type(torch.int32).to(device) + return lut, int(sizes.max()) + + @staticmethod + def forward(ctx, x, scale, rpe, key_padding_mask, attn_mask, kp_mask_mode, attn_mask_mode, spdims, block, lut, + num_blocks, maxlut, bench, time): + + apply_scale = False if scale == 1.0 else True + + # handle None rpe + if rpe is None: + apply_rpe = False + stride_zrpe, stride_hrpe, stride_srpe = 0, 0, 0 + rpe = torch.empty(0, dtype=x.dtype, device=x.device) + else: + apply_rpe = True + stride_zrpe, stride_hrpe, stride_srpe = rpe.stride(0), rpe.stride(1), rpe.stride(2) + + # handle None key_padding_mask + if key_padding_mask is None: + apply_kp_mask = False + stride_zkpm = 0 + key_padding_mask = torch.empty(0, dtype=x.dtype, device=x.device) + else: + apply_kp_mask = True + stride_zkpm = key_padding_mask.stride(0) + + # handle None attention_mask + if attn_mask is None: + apply_attn_mask = False + stride_zattnm = 0 + attn_mask = torch.empty(0, dtype=x.dtype, device=x.device) + else: + apply_attn_mask = True + stride_zattnm = attn_mask.stride(0) + + # run kernel + M = x.shape[0] + meta = { + 'BLOCK': block, + 'APPLY_SCALE': apply_scale, + 'APPLY_RPE': apply_rpe, + 'APPLY_KP_MASK': apply_kp_mask, + 'APPLY_ATTN_MASK': apply_attn_mask, + 'KP_MASK_MUL': kp_mask_mode == 'mul', + 'ATTN_MASK_MUL': attn_mask_mode == 'mul', + } + grid = lambda opt: [spdims[0] * spdims[1] * block, M] + _forward[grid](x, scale, lut, rpe, key_padding_mask, attn_mask, maxlut, x.stride(0),\ + stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, stride_zattnm, **meta) + + # save to context + ctx.mark_dirty(x) + ctx.save_for_backward(x, lut) + ctx.spdims = spdims + ctx.block = block + ctx.maxlut = maxlut + ctx.scale = scale + ctx.apply_scale = apply_scale + ctx.apply_rpe = apply_rpe + ctx.apply_kp_mask = apply_kp_mask + ctx.apply_attn_mask = apply_attn_mask + ctx.kp_mask_mode = kp_mask_mode + ctx.attn_mask_mode = attn_mask_mode + return x + + @staticmethod + def backward(ctx, dx): + + # retrieve from context + x, lut = ctx.saved_tensors + # run kernel + M = x.shape[0] + grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M] + _backward[grid](x, ctx.scale, dx, lut, ctx.maxlut, x.stride(0), dx.stride(0), BLOCK=ctx.block) + return dx, None, None, None, None, None, None, None, None, None, None, None, None, None, None + + +class Softmax: + """Block-Sparse Softmax class; this class computes softmax on a block sparse matrix. It is also able to apply either/all of the following masks: + - relative position embedding + - key padding mask + - attention mask + + For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509 + """ + + def sparse_softmax(*args, **kwargs): + return _sparse_softmax.apply(*args, **kwargs) + + def make_lut(self, device): + """Generates the sparsity layout used in block-sparse softmax + """ + key = (device, ) + if key not in self.lut_cache: + self.lut_cache[key] = _sparse_softmax.make_lut(self.layout, self.block, device) + return self.lut_cache[key] + + def __init__(self, layout, block, bench=False): + """Initialize the Block-Sparse Softmax class. + + Arguments: + layout: required: sparsity layout tensor + block: required: an integer determining the block size. + bench: optional: set if you want to do benchmarking + """ + + self.num_blocks = layout.sum().item() + self.spdims = layout.shape + self.layout = layout + self.block = block + self.bench = bench + self.lut_cache = dict() + + def __call__(self, + x, + scale=1., + rpe=None, + key_padding_mask=None, + attn_mask=None, + key_padding_mask_mode='add', + attn_mask_mode='add'): + """Applies softmax on a Block-Sparse input tensor. + + For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509 + + Arguments: + x: required: a block-sparse tensor that softmax is applied on it; computation will be in place and result will be returned in the same tensor + scale: optional: a float value; x values will be multiplied by this value before normalization. Default value is 1.0. + rpe: optional: a tensor same dimension as x that is used as relative position embedding + key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength) + attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported + key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied + attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied + + Return: + x: a block-sparse tensor contains normalized input x using softmax; and masks applied if given + """ + + time_y = [None] + if rpe is not None and rpe.dtype != x.dtype: + raise ValueError('relative position embedding must be %s' % x.dtype) + if attn_mask is not None and attn_mask.dtype != x.dtype: + raise ValueError('Attention mask must be %s' % x.dtype) + if key_padding_mask is not None and key_padding_mask.dtype != x.dtype: + raise ValueError('Key padding mask must be %s' % x.dtype) + lut, maxlut = self.make_lut(x.device) + x = Softmax.sparse_softmax(x, scale, rpe, key_padding_mask, attn_mask, key_padding_mask_mode, attn_mask_mode, + self.spdims, self.block, lut, self.num_blocks, maxlut, self.bench, time_y) + self.time_y = time_y[0] + return x diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_attention_utils.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_attention_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb0f940dff65839beac579f81c4dfb7e499e6bb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_attention_utils.py @@ -0,0 +1,208 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from torch.nn import functional as F +from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig +''' +This file contains few utility functions to handle adapting pretrained model with sparse self-attention module. +''' + + +class SparseAttentionUtils: + """This class provides some utility functions that are use integrating sparse attention into transformer models. + Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc. + + """ + + @staticmethod + def extend_position_embedding(model, max_position): + """This function extends the position embedding weights of a model loaded from a checkpoint. + It assumes the new max position is bigger than the original max length. + + Arguments: + model: required: a transformer model + max_position: required: an integer determining new position embedding size + Return: + model: updated model; in which position embedding weights have been extended based on new size + """ + + if hasattr(model, 'bert'): + original_max_position = model.bert.embeddings.position_embeddings.weight.size(0) + assert max_position > original_max_position + extend_multiples = max(1, max_position // original_max_position) + model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat( + extend_multiples, 1) + elif hasattr(model, 'roberta'): + # RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2 + original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape + original_max_position -= 2 + extend_multiples = max(1, max_position // original_max_position) + assert max_position > original_max_position + max_position += 2 + extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty( + max_position, embed_size) + k = 2 + for i in range(extend_multiples): + extended_position_embedding[k:( + k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:] + k += original_max_position + model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding + else: + raise ValueError( + 'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!' + ) + + model.config.max_position_embeddings = max_position + print(f'Extended position embeddings to {original_max_position * extend_multiples}') + + return model + + @staticmethod + def update_tokenizer_model_max_length(tokenizer, max_position): + """This function updates the position embedding length of a tokenizer to a new max position. + + Arguments: + tokenizer: required: a transformer tokenizer + max_position: required: an integer determining new position embedding size + Return: + tokenizer: updated tokenizer; in which model maximum length has been extended based on new size + """ + + tokenizer.model_max_length = max_position + tokenizer.init_kwargs['model_max_length'] = max_position + print(f'updated tokenizer model max imum length to {max_position}') + + return tokenizer + + @staticmethod + def replace_model_self_attention_with_sparse_self_attention( + model, + max_position, + # SparsityConfig parameters needs to be set accordingly + sparsity_config=SparsityConfig(num_heads=4)): + """This function replaces the self attention layers in model encoder with sparse self attention. + It currently supports bert and roberta model and can be easily extended to any other models following similar steps here. + For sparsityConfig, refer to the config class. + + Arguments: + model: required: a transformer model + max_position: required: an integer determining new position embedding size + sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class + + Return: + model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer. + """ + + if hasattr(model, 'bert'): + model.config.max_position_embeddings = max_position + model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer, + sparsity_config) + elif hasattr(model, 'roberta'): + model.config.max_position_embeddings = max_position + 2 + model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, + model.roberta.encoder.layer, + sparsity_config) + else: + raise ValueError( + 'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \ + your model type. It currently only supports \"bert\" & \"roberta\"!') + return model + + @staticmethod + def replace_self_attention_layer_with_sparse_self_attention_layer( + config, + layers, + # SparsityConfig parameters needs to be set accordingly + sparsity_config=SparsityConfig(num_heads=4)): + """This function replaces the self attention layers in attention layer with sparse self attention. + For sparsityConfig, refer to the config class. + + Arguments: + config: required: transformer model config + layers: required: transformer model attention layers + sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class + + Return: + layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer. + """ + + for layer in layers: + deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config) + deepspeed_sparse_self_attn.query = layer.attention.self.query + deepspeed_sparse_self_attn.key = layer.attention.self.key + deepspeed_sparse_self_attn.value = layer.attention.self.value + + layer.attention.self = deepspeed_sparse_self_attn + + return layers + + @staticmethod + def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, + pad_token_id, model_embeddings): + """This function pads input tokens and attention mask on sequence length dimension to be multiple of block size. + This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size. + It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs. + Note) + 1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none. + 2- you need to call unpad function before returning your model output to unpad the encoder sequence output. + + Arguments: + block_size: required: an integer determining the block size of sparsity config. + pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id. + input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary + attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. + token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). + position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings. + inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly. + model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter. + + Return: + pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size. + input_ids: if input_ids are not none padded input_ids otherwise none. + attention_mask: if attention_mask is not none padded attention_mask otherwise none. + token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none. + position_ids: if position_ids are not none padded position_ids otherwise none. + inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none. + """ + + batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1] + + pad_len = (block_size - seq_len % block_size) % block_size + if pad_len > 0: + if inputs_embeds is not None: + pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long) + pad_inputs_embeds = model_embeddings(pad_input_ids) + inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2) + # may not be needed as input_ids are not used if inputs_embeds are given + if input_ids is not None: + input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id) + if position_ids is not None: + # pad position_id with pad_token_id + position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id) + # pad attention mask without attention on the padding tokens + attention_mask = F.pad(attention_mask, (0, pad_len), value=False) + # pad token_type_ids with token_type_id = 0 + token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0) + + return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds + + @staticmethod + def unpad_sequence_output(pad_len, sequence_output): + """This function unpads sequence output if inputs of the model were padded. + This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size. + It needs to be called in your model, such as BertModel, right before you return the model outputs. + + Arguments: + pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size. + sequence_output: required: sequence output of the encoder layer. + + Return: + sequence_output: unpaded sequence output of the encoder layer. + """ + + if (pad_len > 0): + sequence_output = sequence_output[:, :-pad_len] + return sequence_output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_self_attention.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_self_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..b673c4561902e943981ca3008fae53ec73c0cd73 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_self_attention.py @@ -0,0 +1,149 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch.nn as nn +import torch +from torch import distributed as dist +from deepspeed.ops.sparse_attention import SparsityConfig + + +class SparseSelfAttention(nn.Module): + """Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509 + + For more information please see, TODO DeepSpeed Sparse Transformer. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial. + """ + + def __init__( + self, + # SparsityConfig parameters needs to be set accordingly + sparsity_config=SparsityConfig(num_heads=4), + key_padding_mask_mode='add', + attn_mask_mode='mul', + max_seq_length=2048): + """Initialize the sparse self attention layer. + Arguments: + sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class. + key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`. + attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`. + max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout. + """ + super().__init__() + + # sparsity information + self.sparsity_config = sparsity_config + + # initialize sparse layout and register as buffer + master_layout = self.sparsity_config.make_layout(max_seq_length) + self.register_buffer("master_layout", master_layout) + self._need_layout_synchronization = True + + # mask modes + self.key_padding_mask_mode = key_padding_mask_mode + self.attn_mask_mode = attn_mask_mode + + ops = dict() + + def get_layout(self, L): + # if layout is never synchronized across GPUs, broadcast the layout from global rank 0 + if self._need_layout_synchronization and dist.is_initialized(): + dist.broadcast(self.master_layout, src=0) + self._need_layout_synchronization = False + + if (L % self.sparsity_config.block != 0): + raise ValueError( + f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!') + + num_blocks = L // self.sparsity_config.block + return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor + + # add to cache + def get_ops(self, H, L): + from deepspeed.ops.sparse_attention.matmul import MatMul + from deepspeed.ops.sparse_attention.softmax import Softmax + if L not in SparseSelfAttention.ops: + sparsity_layout = self.get_layout(L) + sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd', trans_a=False, trans_b=True) + + sparse_dot_dsd_nn = MatMul(sparsity_layout, + self.sparsity_config.block, + 'dsd', + trans_a=False, + trans_b=False) + + sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block) + + SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax) + return SparseSelfAttention.ops[L] + + def transpose_key_for_scores(self, x, L): + bsz, num_heads, seq_len, head_dim = x.size() + if seq_len != L: + return x.permute(0, 1, 3, 2) + return x + + def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False): + x = x.type(qtype) + if is_key_padding_mask: + xdim = x.dim() + for d in range(xdim - 1, 0, -1): + x = x.squeeze(dim=d) + return x + return x.squeeze() + + # forward pass + def forward(self, query, key, value, rpe=None, key_padding_mask=None, attn_mask=None): + """Applies forward phase of sparse self attention + + Arguments: + query: required: query tensor + key: required: key tensor + value: required: value tensor + rpe: optional: a tensor same dimension as x that is used as relative position embedding + key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength) + attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported + key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied + attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied + + Return: + attn_output: a dense tensor containing attention context + """ + assert query.dtype == torch.half, "sparse attention only supports training in fp16 currently, please file a github issue if you need fp32 support" + bsz, num_heads, tgt_len, head_dim = query.size() + + # transpose back key if it is already transposed + key = self.transpose_key_for_scores(key, tgt_len) + + # check that operation is supported + if query.shape != key.shape or key.shape != value.shape: + raise NotImplementedError('only self-attention is supported for now') + + # squeeze key_padding_mask if it is given + if key_padding_mask is not None: + key_padding_mask = self.transpose_mask_for_sparse(query.dtype, key_padding_mask, is_key_padding_mask=True) + + # squeeze attn_mask if it is given + if attn_mask is not None: + attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask) + + # cache look-up table computations etc + sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len) + + scaling = float(head_dim)**-0.5 + + # attention scores + attn_output_weights = sparse_dot_sdd_nt(query, key) + attn_output_weights = sparse_softmax(attn_output_weights, + scale=scaling, + rpe=rpe, + key_padding_mask=key_padding_mask, + attn_mask=attn_mask, + key_padding_mask_mode=self.key_padding_mask_mode, + attn_mask_mode=self.attn_mask_mode) + + # outputs + attn_output = sparse_dot_dsd_nn(attn_output_weights, value) + return attn_output diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparsity_config.py b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparsity_config.py new file mode 100644 index 0000000000000000000000000000000000000000..1f59c4b469282ebfdb26fdb7c40c61d738231030 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparsity_config.py @@ -0,0 +1,727 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import random + + +class SparsityConfig: + """Abstract Configuration class to store `sparsity configuration of a self attention layer`. + It contains shared property of different block-sparse sparsity patterns. However, each class needs to extend it based on required property and functionality. + """ + + def __init__(self, num_heads, block=16, different_layout_per_head=False): + """Initialize the Sparsity Pattern Config. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. + """ + + self.num_heads = num_heads + self.block = block + self.different_layout_per_head = different_layout_per_head + self.num_layout_heads = num_heads if different_layout_per_head else 1 + + def setup_layout(self, seq_len): + """Create layout tensor for the given sequence length + + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout of all head; initialized with zero + """ + + if (seq_len % self.block != 0): + raise ValueError(f'Sequence Length, {seq_len}, needs to be dividable by Block size {self.block}!') + num_blocks = seq_len // self.block + # TODO Currently we allocate layout per head; needs to be updated if heads share a single layout. + layout = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64) + return layout + + def check_and_propagate_first_head_layout(self, layout): + """If all heads require same sparsity layout, it propagate first head layout to all heads + + Arguments: + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head + """ + + if not self.different_layout_per_head: + layout[1:self.num_heads, :, :] = layout[0, :, :] + return layout + + +class DenseSparsityConfig(SparsityConfig): + """Configuration class to store `Dense` configuration. + In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension. + """ + + def __init__(self, num_heads, block=16, different_layout_per_head=False): + """Initialize the Dense Sparsity Pattern Config. + In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension. + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + seq_len: required: an integer determining number of attention heads of the layer. + different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig + """ + + super().__init__(num_heads, block, different_layout_per_head) + + def make_layout(self, seq_len): + """Set 1 to all blocks of the layout meaning the pattern is dense; not sparse. + + Arguments: + seq_len: required: an integer determining the underling sequence length; must be <= max sequence length + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; for dense everything is 1 + """ + + layout = self.setup_layout(seq_len) + layout[:, :, :] = 1 + return layout + + +class FixedSparsityConfig(SparsityConfig): + """Configuration class to store `Fixed` sparsity configuration. + For more details about this sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized. + This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity. + """ + + def __init__(self, + num_heads, + block=16, + different_layout_per_head=False, + num_local_blocks=4, + num_global_blocks=1, + attention='bidirectional', + horizontal_global_attention=False, + num_different_global_patterns=1): + """Initialize `Fixed` Sparsity Pattern Config. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. + num_local_blocks: optional: an integer determining the number of blocks in local attention window. + num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks. + num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks. + """ + + super().__init__(num_heads, block, different_layout_per_head) + + self.num_local_blocks = num_local_blocks + + if (num_local_blocks % num_global_blocks != 0): + raise ValueError( + f'Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!' + ) + self.num_global_blocks = num_global_blocks + + if (attention != 'unidirectional' and attention != 'bidirectional'): + raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!') + self.attention = attention + + if (attention != 'bidirectional' and horizontal_global_attention): + raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!') + self.horizontal_global_attention = horizontal_global_attention + + if (num_different_global_patterns > 1 and not different_layout_per_head): + raise ValueError( + f'Number of different layouts cannot be more than one when you have set a single layout for all heads! Set different_layout_per_head to True.' + ) + if (num_different_global_patterns > (num_local_blocks // num_global_blocks)): + raise ValueError( + f'Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!' + ) + self.num_different_global_patterns = num_different_global_patterns + + def set_local_layout(self, h, layout): + """Sets local attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set + """ + + num_blocks = layout.shape[1] + for i in range(0, num_blocks, self.num_local_blocks): + end = min(i + self.num_local_blocks, num_blocks) + for row in range(i, end): + for col in range(i, (row + 1 if self.attention == 'unidirectional' else end)): + layout[h, row, col] = 1 + return layout + + def set_global_layout(self, h, layout): + """Sets global attention layout used by the given head in the sparse attention. + + Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions. + Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set + """ + + num_blocks = layout.shape[1] + first_global_block_idx = self.num_local_blocks - ( + 1 + h % self.num_different_global_patterns) * self.num_global_blocks + + # set all global blocks except the last one if (in last local window) + end = num_blocks - (num_blocks % self.num_local_blocks) + for i in range(first_global_block_idx, end, self.num_local_blocks): + + # vertical global attention + first_row = 0 if self.attention == 'bidirectional' else i + #(((i // self.num_local_blocks) + 1) * self.num_local_blocks) + #if (first_row < num_blocks): + layout[h, first_row:, i:i + self.num_global_blocks] = 1 + + # horizontal global attention; only in bidirectional attention + if (self.horizontal_global_attention): + layout[h, i:i + self.num_global_blocks, :] = 1 + + # set last global blocks; handle possible short last local window + if (end < num_blocks): + start = min(end + first_global_block_idx, num_blocks - self.num_global_blocks) + end = start + self.num_global_blocks + + # vertical global attention + first_row = 0 if self.attention == 'bidirectional' else start + #(((start // self.num_local_blocks) + 1) * self.num_local_blocks) + #if (first_row < num_blocks): + layout[h, first_row:, start:end] = 1 + + # horizontal global attention + if (self.horizontal_global_attention): + layout[h, start:end, :] = 1 + return layout + + def make_layout(self, seq_len): + """Generates `Fixed` sparsity layout used by each head in the sparse attention. + + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_local_layout(h, layout) + layout = self.set_global_layout(h, layout) + + layout = self.check_and_propagate_first_head_layout(layout) + return layout + + +class VariableSparsityConfig(SparsityConfig): + """Configuration class to store `Variable` sparsity configuration. + This layout is an extension of FixedSparsityConfig in which: + - user can set random layout; default value is zero means no random block + - user can provide a list of local block sizes + - user can provide a list of global block indices. + + For more details about `Fixed` sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized. + This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity. + """ + + def __init__(self, + num_heads, + block=16, + different_layout_per_head=False, + num_random_blocks=0, + local_window_blocks=[4], + global_block_indices=[0], + global_block_end_indices=None, + attention='bidirectional', + horizontal_global_attention=False): + """Initialize `Variable` Sparsity Pattern Config. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. Currently this sparsity config can only assign single layout to all heads; needs to be extended for different layout per head. + num_random_blocks: optional: an integer determining the number of random blocks in each block row. + local_window_blocks: optional: a list of integers determining the number of blocks in each local attention window. It assumes first number determines # of blocks in the first local window, second the second window, ..., and the last number determines the number of blocks in the remaining local windows. + global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window. + global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention. + num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks. + """ + + super().__init__(num_heads, block, different_layout_per_head) + + self.num_random_blocks = num_random_blocks + self.local_window_blocks = local_window_blocks + self.global_block_indices = global_block_indices + + if (global_block_end_indices is not None): + if (len(global_block_indices) != len(global_block_end_indices)): + raise ValueError( + f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!' + ) + for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)): + if start_idx >= end_idx: + raise ValueError( + f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!' + ) + self.global_block_end_indices = global_block_end_indices + + if (attention != 'unidirectional' and attention != 'bidirectional'): + raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!') + self.attention = attention + + if (attention != 'bidirectional' and horizontal_global_attention): + raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!') + self.horizontal_global_attention = horizontal_global_attention + + def set_random_layout(self, h, layout): + """Sets random attention layout used by the given head in the sparse attention. + Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_random_blocks): + raise ValueError( + f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + for row in range(0, num_blocks): + rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks) + layout[h, row, rnd_cols] = 1 + return layout + + def set_local_layout(self, h, layout): + """Sets local attention layout used by the given head in the sparse attention. + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set + """ + + num_blocks = layout.shape[1] + start_block_idx = 0 + end_block_idx = 0 + for block_size in self.local_window_blocks: + end_block_idx += block_size + end_block_idx = min(end_block_idx, num_blocks) + for row in range(start_block_idx, end_block_idx): + for col in range(start_block_idx, (row + 1 if self.attention == 'unidirectional' else end_block_idx)): + layout[h, row, col] = 1 + start_block_idx += block_size + + # if there is any remaining not attended part, use the lats local window block size as local window for the remaining applicable local windows + for i in range(start_block_idx, num_blocks, block_size): + end_block_idx = min(i + block_size, num_blocks) + for row in range(i, end_block_idx): + for col in range(i, (row + 1 if self.attention == 'unidirectional' else end_block_idx)): + layout[h, row, col] = 1 + return layout + + def set_global_layout(self, h, layout): + """Sets global attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set + """ + + num_blocks = layout.shape[1] + if (self.global_block_end_indices is None): + for idx in self.global_block_indices: + # if global block idx is in the range of the sequence blocks + if (idx < num_blocks): + #global rows + if (self.horizontal_global_attention): + layout[h, idx, :] = 1 + + #global columns + first_row = 0 if self.attention == 'bidirectional' else idx + layout[h, first_row:, idx] = 1 + else: + for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)): + # if global block idx is in the range of the sequence blocks + if (start_idx < num_blocks): + end_idx = min(end_idx, num_blocks) + #global rows + if (self.horizontal_global_attention): + layout[h, start_idx:end_idx, :] = 1 + + #global columns + first_row = 0 if self.attention == 'bidirectional' else start_idx + layout[h, first_row:, start_idx:end_idx] = 1 + return layout + + def make_layout(self, seq_len): + """Generates `Variable` sparsity layout used by each head in the sparse attention. + + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_random_layout(h, layout) + layout = self.set_local_layout(h, layout) + layout = self.set_global_layout(h, layout) + + layout = self.check_and_propagate_first_head_layout(layout) + return layout + + +class BigBirdSparsityConfig(SparsityConfig): + """Configuration class to store `BigBird` sparsity configuration. + For more details about this sparsity config, please see `Big Bird: Transformers for Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf + This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity. + """ + + def __init__(self, + num_heads, + block=16, + different_layout_per_head=False, + num_random_blocks=1, + num_sliding_window_blocks=3, + num_global_blocks=1, + attention='bidirectional'): + """Initialize the BigBird Sparsity Pattern Config. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. + num_random_blocks: optional: an integer determining the number of random blocks in each block row. + num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. + num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + """ + + super().__init__(num_heads, block, different_layout_per_head) + + self.num_random_blocks = num_random_blocks + self.num_sliding_window_blocks = num_sliding_window_blocks + self.num_global_blocks = num_global_blocks + + if (attention != 'unidirectional' and attention != 'bidirectional'): + raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!') + self.attention = attention + + def set_random_layout(self, h, layout): + """Sets random attention layout used by the given head in the sparse attention. + Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_random_blocks): + raise ValueError( + f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + + for row in range(0, num_blocks): + sample_range = range(0, num_blocks) if self.attention == 'bidirectional' else range(0, row + 1) + rnd_cols = random.sample(sample_range, self.num_random_blocks) + layout[h, row, rnd_cols] = 1 + return layout + + def set_sliding_window_layout(self, h, layout): + """Sets sliding local attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_sliding_window_blocks): + raise ValueError( + f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + + w = self.num_sliding_window_blocks // 2 + for row in range(0, num_blocks): + start = max(0, row - w) + end = min(row + w + 1, num_blocks) + layout[h, row, start:end] = 1 + return layout + + def set_global_layout_itc(self, h, layout): + """Sets global attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_global_blocks): + raise ValueError( + f'Number of global blocks, {self.num_global_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + + #global rows + layout[h, 0:self.num_global_blocks, :] = 1 + + #global columns + layout[h, :, 0:self.num_global_blocks] = 1 + + if self.attention == 'unidirectional': + # zero out anything attending to the future + layout = torch.tril(layout) + + return layout + + def make_layout(self, seq_len): + """Generates `BigBird` sparsity layout used by each head in the sparse attention. + + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_random_layout(h, layout) + layout = self.set_sliding_window_layout(h, layout) + layout = self.set_global_layout_itc(h, layout) + + layout = self.check_and_propagate_first_head_layout(layout) + return layout + + +class BSLongformerSparsityConfig(SparsityConfig): + """Configuration class to store edited `Longformer` sparsity configuration. + + Note) this is a block-sparse version of the Longformer which is slightly different than original Longformer; which is element-wise sparsity. + + For more details about this sparsity config, please see `Longformer: The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf + This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity. + """ + + def __init__(self, + num_heads, + block=16, + different_layout_per_head=False, + num_sliding_window_blocks=3, + global_block_indices=[0], + global_block_end_indices=None, + attention='bidirectional'): + """Initialize the edited `Longformer` Sparsity Pattern Config. + + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. + + num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. + global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window. + global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + """ + + super().__init__(num_heads, block, different_layout_per_head) + + self.num_sliding_window_blocks = num_sliding_window_blocks + self.global_block_indices = global_block_indices + self.attention = attention + + if (global_block_end_indices is not None): + if (len(global_block_indices) != len(global_block_end_indices)): + raise ValueError( + f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!' + ) + for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)): + if start_idx >= end_idx: + raise ValueError( + f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!' + ) + self.global_block_end_indices = global_block_end_indices + + def set_sliding_window_layout(self, h, layout): + """Sets sliding local attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_sliding_window_blocks): + raise ValueError( + f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + + w = self.num_sliding_window_blocks // 2 + for row in range(0, num_blocks): + start = max(0, row - w) + end = min(row + w + 1, num_blocks) + layout[h, row, start:end] = 1 + return layout + + def set_global_layout(self, h, layout): + """Sets global attention layout used by the given head in the sparse attention. + + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set + """ + + num_blocks = layout.shape[1] + if (self.global_block_end_indices is None): + for idx in self.global_block_indices: + # if global block idx is in the range of the sequence blocks + if (idx < num_blocks): + #global rows + layout[h, idx, :] = 1 + + #global columns + layout[h, :, idx] = 1 + else: + for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)): + # if global block idx is in the range of the sequence blocks + if (start_idx < num_blocks): + end_idx = min(end_idx, num_blocks) + #global rows + layout[h, start_idx:end_idx, :] = 1 + + #global columns + layout[h, :, start_idx:end_idx] = 1 + if self.attention == 'unidirectional': + layout = torch.tril(layout) + return layout + + def make_layout(self, seq_len): + """Generates edited `Longformer` sparsity layout used by each head in the sparse attention. + + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_sliding_window_layout(h, layout) + layout = self.set_global_layout(h, layout) + + layout = self.check_and_propagate_first_head_layout(layout) + return layout + + +class LocalSlidingWindowSparsityConfig(SparsityConfig): + """Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention. + This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity. + """ + + def __init__(self, num_heads, block=16, num_sliding_window_blocks=3, attention='unidirectional'): + """Initialize the Local Sliding Window Sparsity Pattern Config. + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + """ + + super().__init__(num_heads, block) + self.num_sliding_window_blocks = num_sliding_window_blocks + self.attention = attention + + def set_sliding_window_layout(self, h, layout): + """Sets sliding local attention layout used by the given head in the sparse attention. + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_sliding_window_blocks): + raise ValueError( + f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!' + ) + + w = self.num_sliding_window_blocks // 2 + for row in range(0, num_blocks): + start = max(0, row - w) + end = min(row + w + 1, num_blocks) if self.attention == "bidirectional" else row + 1 + layout[h, row, start:end] = 1 + return layout + + def make_layout(self, seq_len): + """Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention. + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_sliding_window_layout(h, layout) + layout = self.check_and_propagate_first_head_layout(layout) + return layout diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/matmul.tr b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/matmul.tr new file mode 100644 index 0000000000000000000000000000000000000000..9be6aa2aed7ccfb0d438d2e4d766d30b21d8f744 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/matmul.tr @@ -0,0 +1,208 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a + https:github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py +*/ + +__global__ void NAME (TYPE* A __readonly __noalias __aligned(16), + TYPE* B __readonly __noalias __aligned(16), + TYPE* C __noalias __aligned(16), + int lda __multipleof(8), + int ldb __multipleof(8), + int ldc __multipleof(8), + long stride_za __multipleof(8), + long stride_zb __multipleof(8), + long stride_zc __multipleof(8), + long stride_ha __multipleof(8), + long stride_hb __multipleof(8), + long stride_hc __multipleof(8), + int DS0, int DS1, + int SDD_K __multipleof(16), + int SDD_off_width, + int* lut, int* locks, int nlocks) { + /* ---------------- */ + /* Prologue */ + /* ---------------- */ + // program ids + int pid0 = get_program_id(0); + int pid1 = get_program_id(1); + int pidz = get_program_id(2); +#ifdef SDD + // load LUT header + pid1 = pid1 + SDD_off_width; + int blockidm[TM] = (0 ... TM) / BLOCK; + int blockidn[TN] = (0 ... TN) / BLOCK; + int offlutm[TM] = blockidm*(TN/BLOCK)*4; + int offlutn[TN] = blockidn*4; + int *header = lut + pid1 * (TM/BLOCK) * (TN/BLOCK) * 4; + int z = *(header + 0); + int i[TM] = *(header + 1 + offlutm); + int j[TN] = *(header + 2 + offlutn); + int AS1 = SDD_K / TZ; + int lockid = select(TZ > 1, 1, 0); + int offka = pid0 * AS1; + int offkb = pid0 * AS1; + int offmc = 0; + int offnc = 0; + int offpa = 0; + int offpb = 0; + int maxid = TZ; + int offhc = 0; + int offha = z; + int offhb = z; + int ram[TM] = i*BLOCK + ((0 ... TM) % BLOCK); + int rbn[TN] = j*BLOCK + ((0 ... TN) % BLOCK); +#else + // load LUT header + int *header = lut + pid0 * 6; + int offset = *(header + 0); + int AS1 = *(header + 1); + int column = *(header + 2); + int depth = *(header + 3); + int lockid = *(header + 4); + int maxid = *(header + 5); + int *pinc = lut + offset; + int offhc = depth; +#ifdef DSD + // output offset + int offnc = pid1 * TN; + int offmc = column * TM; + int offpc = 0; + // dense input offset + int offnb = pid1 * TN; + int offkb __multipleof(8) = *pinc; + int offpb = 0; + // sparse input offset + int offma = 0; + int offka = 0; + long offpa __multipleof(8) = *(pinc + 1); + offpa = offpa * BLOCK * BLOCK; + int offha = 0; + int offhb = depth; +#endif +#ifdef DDS + // output offset + int offmc = pid1 * TM; + int offnc = column * TN; + int offpc = 0; + // dense input offset + int offma = pid1 * TM; + int offka __multipleof(8) = *pinc; + int offpa = 0; + // sparse input offset + int offnb = 0; + int offkb = 0; + long offpb __multipleof(8) = *(pinc + 1); + offpb = offpb * BLOCK * BLOCK; + int offha = depth; + int offhb = 0; +#endif + int ram[TM] = offma + 0 ... TM; + int rbn[TN] = offnb + 0 ... TN; +#endif + // initialize a, b pointers + int rka[TK] = offka + 0 ... TK; + int rkb[TK] = offkb + 0 ... TK; + TYPE* pa[TM, TK] = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, newaxis] * STRIDE_AM + rka[newaxis, :] * STRIDE_AK; + TYPE* pb[TK, TN] = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[newaxis, :] * STRIDE_BN + rkb[:, newaxis] * STRIDE_BK; + // pre-fetch +#ifdef DDS + bool checkam[TM, TK] = ram[:, newaxis] < DS0; +#else + bool checkam[TM, TK] = AS1 > 0; +#endif +#ifdef DSD + bool checkbn[TK, TN] = rbn[newaxis, :] < DS0; +#else + bool checkbn[TK, TN] = AS1 > 0; +#endif + TYPE a[TM, TK] = checkam ? *pa : 0; + TYPE b[TK, TN] = checkbn ? *pb : 0; + + /* ---------------- */ + /* Inner Loop */ + /* ---------------- */ + // create result tile + float acc[TM, TN] = 0; + int step = TK; + for(int k = AS1; k > 0; k -= step) { + acc += a @ b; + // update pointers +#ifdef SDD + int inc_a = TK * STRIDE_AK; + int inc_b = TK * STRIDE_BK; +#else + pinc += 2; +#ifdef DSD + int inc_b __multipleof(8) = *pinc; + int inc_a __multipleof(8) = *(pinc + 1); + inc_b = inc_b * STRIDE_BK; +#endif +#ifdef DDS + int inc_a __multipleof(8) = *pinc; + int inc_b __multipleof(8) = *(pinc + 1); + inc_a = inc_a * STRIDE_AK; +#endif +#endif + pa += inc_a; + pb += inc_b; + // pre-fetch + bool checkak[TM, TK] = k > TK; + bool checkbk[TK, TN] = k > TK; + bool checka[TM, TK] = checkam && checkak; + bool checkb[TK, TN] = checkbk && checkbn; + a = *?(checka)pa; + b = *?(checkb)pb; + } + TYPE c[TM, TN] = acc; + + /* ---------------- */ + /* Epilogue */ + /* ---------------- */ + // initialize c pointers +#ifdef SDD + bool checkc[TM, TN] = 1; + // rematerialize + int rr_blockidm[TM] = (0 ... TM) / BLOCK; + int rr_blockidn[TN] = (0 ... TN) / BLOCK; + int rr_offlutm[TM] = rr_blockidm*(TN/BLOCK)*4; + int rr_offlutn[TN] = rr_blockidn*4; + int off_bkid[TM, TN] = 3 + rr_offlutm[:, newaxis] + rr_offlutn[newaxis, :]; + int bkid[TM, TN] = *(header + off_bkid); + long offpc[TM, TN] = bkid * BLOCK * BLOCK; + // range within blocks + int rcm[TM] = (0 ... TM) % BLOCK; + int rcn[TN] = (0 ... TN) % BLOCK; +#else + int rcm[TM] = offmc + 0 ... TM; + int rcn[TN] = offnc + 0 ... TN; +#ifdef DSD + bool checkc[TM, TN] = rcn[newaxis, :] < DS0; +#endif +#ifdef DDS + bool checkc[TM, TN] = rcm[:, newaxis] < DS0; +#endif +#endif + TYPE* pc[TM, TN] = C + offpc + offhc*stride_hc + pidz*stride_zc + rcm[:, newaxis]*STRIDE_CM + rcn[newaxis, :]*STRIDE_CN; + // write-back directly + if(lockid == 0) { + *?(checkc) pc = c; + } + // accumulate partial result using spin-locks + else { + int *plock = locks + get_program_id(2)*nlocks*get_num_programs(1) + get_program_id(1)*nlocks + lockid - 1; + int *pcount = plock + get_num_programs(2)*get_num_programs(1)*nlocks; + for(int repeat = 1; repeat == 1; repeat = atomic_cas(plock, 0, 1)); + int count = *pcount; + if(count == 0) + *?(checkc) pc = c; + else + *?(checkc) pc = c + *?(checkc)pc; + atomic_xchg(pcount, (count + 1) % maxid); + atomic_xchg(plock, 0); + } + } diff --git a/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_bwd.tr b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_bwd.tr new file mode 100644 index 0000000000000000000000000000000000000000..1a90f41d94945e1d6d6f52e6beaea94fa52cdda8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_bwd.tr @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a + https:github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/softmax.py +*/ + +__global__ void softmax_bwd(TYPE * X __readonly __noalias __aligned(16), + float scale, + TYPE* DX __readonly __noalias __aligned(16), + int* LUT, + int sizemax, + long stride_zx __multipleof(BLOCK), + long stride_zdx __multipleof(BLOCK)) { + int pidhm = get_program_id(0); + int pidz = get_program_id(1); + + // create index ranges + int rxm = pidhm % BLOCK; + int rbm = pidhm / BLOCK; + int rxn[TN] = (0 ... TN) % BLOCK; + int rbn[TN] = (0 ... TN) / BLOCK; + + // extract information from look-up table + int* header = LUT + rbm * 2; + int size = *(header + 0); + int offset = *(header + 1); + + // bounds checking on lut + bool check[TN] = rbn < size; + int rbmn[TN] = check ? rbn : size - 1; + + // initialize pointers to block-sparse input + long blockid[TN] = *(LUT + offset + rbmn*4); + + TYPE* px[TN] = X + pidz * stride_zx + + blockid * BLOCK * BLOCK + + rxm * BLOCK + + rxn; + + TYPE* pdx[TN] = DX + pidz * stride_zdx + + blockid * BLOCK * BLOCK + + rxm * BLOCK + + rxn; + + // compute fused softmax backward + TYPE x[TN] = check ? *px : 0; + TYPE dx[TN] = check ? *pdx : 0; + float Fdx[TN] = dx; + float Fx[TN] = x; + float Fxdx[TN] = Fdx*Fx; + float Fxdxsum = Fxdx[+]; + float Fy[TN] = Fx * (Fdx - Fxdxsum) * scale; + TYPE y[TN] = Fy; + + // write-back + *? (check)pdx = y; +}