Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/libsqlite3.so +3 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/__init__.py +7 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/config.py +304 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/engine.py +636 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__init__.py +4 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/layers.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization_context.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/layers.py +114 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization.py +111 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization_context.py +13 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/utils.py +288 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__init__.py +7 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/allocator.py +42 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_factory.py +129 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/inference_parameter.py +89 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/inference_utils.py +105 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py +13 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py +29 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py +26 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py +19 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py +81 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py +78 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py +22 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py +115 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py +26 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py +282 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py +220 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py +617 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py +355 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py +6 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py +207 -0
.gitattributes
CHANGED
|
@@ -181,3 +181,4 @@ parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs mer
|
|
| 181 |
parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
|
| 182 |
parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
| 183 |
parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 181 |
parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
|
| 182 |
parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
| 183 |
parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
| 184 |
+
parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/libsqlite3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327
|
| 3 |
+
size 1543808
|
parrot/lib/python3.10/site-packages/deepspeed/inference/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
from .v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig
|
| 6 |
+
from .v2.engine_v2 import InferenceEngineV2
|
| 7 |
+
from .v2 import build_hf_engine, build_engine_from_ds_checkpoint
|
parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc
ADDED
|
Binary file (19.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/config.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import deepspeed
|
| 8 |
+
from deepspeed.pydantic_v1 import Field, validator
|
| 9 |
+
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
|
| 10 |
+
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
|
| 11 |
+
from typing import Dict, Union
|
| 12 |
+
from enum import Enum
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DtypeEnum(Enum):
|
| 16 |
+
# The torch dtype must always be the first value (so we return torch.dtype)
|
| 17 |
+
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
|
| 18 |
+
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
|
| 19 |
+
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat"
|
| 20 |
+
int8 = torch.int8, "torch.int8", "int8"
|
| 21 |
+
|
| 22 |
+
# Copied from https://stackoverflow.com/a/43210118
|
| 23 |
+
# Allows us to use multiple values for each Enum index and returns first
|
| 24 |
+
# listed value when Enum is called
|
| 25 |
+
def __new__(cls, *values):
|
| 26 |
+
obj = object.__new__(cls)
|
| 27 |
+
# first value is canonical value
|
| 28 |
+
obj._value_ = values[0]
|
| 29 |
+
for other_value in values[1:]:
|
| 30 |
+
cls._value2member_map_[other_value] = obj
|
| 31 |
+
obj._all_values = values
|
| 32 |
+
return obj
|
| 33 |
+
|
| 34 |
+
def __repr__(self):
|
| 35 |
+
return "<%s.%s: %s>" % (
|
| 36 |
+
self.__class__.__name__,
|
| 37 |
+
self._name_,
|
| 38 |
+
", ".join([repr(v) for v in self._all_values]),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class MoETypeEnum(str, Enum):
|
| 43 |
+
residual = "residual"
|
| 44 |
+
standard = "standard"
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class DeepSpeedTPConfig(DeepSpeedConfigModel):
|
| 48 |
+
""" Configure tensor parallelism settings """
|
| 49 |
+
|
| 50 |
+
enabled: bool = True
|
| 51 |
+
""" Turn tensor parallelism on/off. """
|
| 52 |
+
|
| 53 |
+
tp_size: int = 1
|
| 54 |
+
""" Number of devices to split the model across using tensor parallelism. """
|
| 55 |
+
|
| 56 |
+
mpu: object = None
|
| 57 |
+
"""
|
| 58 |
+
A model parallelism unit object that implements
|
| 59 |
+
``get_{model,data}_parallel_{rank,group,world_size}()``.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
tp_group: object = None
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class DeepSpeedMoEConfig(DeepSpeedConfigModel):
|
| 66 |
+
""" Sets parameters for MoE """
|
| 67 |
+
|
| 68 |
+
enabled: bool = True
|
| 69 |
+
ep_size: int = 1
|
| 70 |
+
"""
|
| 71 |
+
The expert-parallelism size which is used for partitioning the experts
|
| 72 |
+
across the GPUs in the expert-parallel group.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
moe_experts: list = Field([1], alias="num_experts")
|
| 76 |
+
""" The global number of experts used in an MoE layer. """
|
| 77 |
+
|
| 78 |
+
type: MoETypeEnum = MoETypeEnum.standard
|
| 79 |
+
"""
|
| 80 |
+
Specify the type of MoE layer. We have two types of MoE layer: 'Standard'
|
| 81 |
+
and 'Residual'.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
ep_mp_group: object = None
|
| 85 |
+
ep_group: object = Field(None, alias="expert_group")
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class QuantTypeEnum(str, Enum):
|
| 89 |
+
asym = "asymmetric"
|
| 90 |
+
sym = "symmetric"
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class BaseQuantConfig(DeepSpeedConfigModel):
|
| 94 |
+
enabled = True
|
| 95 |
+
num_bits = 8
|
| 96 |
+
q_type: QuantTypeEnum = QuantTypeEnum.sym
|
| 97 |
+
q_groups: int = 1
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class WeightQuantConfig(BaseQuantConfig):
|
| 101 |
+
enabled = True
|
| 102 |
+
quantized_initialization: Dict = {}
|
| 103 |
+
post_init_quant: Dict = {}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class ActivationQuantConfig(BaseQuantConfig):
|
| 107 |
+
enabled = True
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class QKVQuantConfig(DeepSpeedConfigModel):
|
| 111 |
+
enabled = True
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class QuantizationConfig(DeepSpeedConfigModel):
|
| 115 |
+
enabled: bool = True
|
| 116 |
+
activation: ActivationQuantConfig = ActivationQuantConfig()
|
| 117 |
+
weight: WeightQuantConfig = WeightQuantConfig()
|
| 118 |
+
qkv: QKVQuantConfig = QKVQuantConfig()
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# todo: brainstorm on how to do ckpt loading for DS inference
|
| 122 |
+
class InferenceCheckpointConfig(DeepSpeedConfigModel):
|
| 123 |
+
checkpoint_dir: str = None
|
| 124 |
+
save_mp_checkpoint_path: str = None
|
| 125 |
+
base_dir: str = None
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class DeepSpeedInferenceConfig(DeepSpeedConfigModel):
|
| 129 |
+
""" Sets parameters for DeepSpeed Inference Engine. """
|
| 130 |
+
|
| 131 |
+
replace_with_kernel_inject: bool = Field(False, alias="kernel_inject")
|
| 132 |
+
"""
|
| 133 |
+
Set to true to inject inference kernels for models such as, Bert, GPT2,
|
| 134 |
+
GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two
|
| 135 |
+
linear layers as a tuple:
|
| 136 |
+
`(attention_output projection, transformer output projection)`
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
dtype: DtypeEnum = torch.float16
|
| 140 |
+
"""
|
| 141 |
+
Desired model data type, will convert model to this type.
|
| 142 |
+
Supported target types: `torch.half`, `torch.int8`, `torch.float`
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp")
|
| 146 |
+
"""
|
| 147 |
+
Configuration for tensor parallelism used to split the model across several
|
| 148 |
+
GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
enable_cuda_graph: bool = False
|
| 152 |
+
"""
|
| 153 |
+
Use this flag for capturing the CUDA-Graph of the inference ops, so that it
|
| 154 |
+
can run faster using the graph replay method.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
use_triton: bool = False
|
| 158 |
+
"""
|
| 159 |
+
Use this flag to use triton kernels for inference ops.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
triton_autotune: bool = False
|
| 163 |
+
"""
|
| 164 |
+
Use this flag to enable triton autotuning.
|
| 165 |
+
Turning it on is better for performance but increase the 1st runtime for
|
| 166 |
+
autotuning.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
zero: DeepSpeedZeroConfig = {}
|
| 170 |
+
"""
|
| 171 |
+
ZeRO configuration to use with the Inference Engine. Expects a dictionary
|
| 172 |
+
containing values for :any:`DeepSpeedZeroConfig`.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
triangular_masking: bool = Field(True, alias="tm")
|
| 176 |
+
"""
|
| 177 |
+
Controls the type of masking for attention scores in transformer layer.
|
| 178 |
+
Note that the masking is application specific.
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
moe: Union[bool, DeepSpeedMoEConfig] = {}
|
| 182 |
+
"""
|
| 183 |
+
Specify if the type of Transformer is MoE. Expects a dictionary containing
|
| 184 |
+
values for :any:`DeepSpeedMoEConfig`.
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
quant: QuantizationConfig = {}
|
| 188 |
+
"""
|
| 189 |
+
NOTE: only works for int8 dtype.
|
| 190 |
+
Quantization settings used for quantizing your model using the MoQ. The
|
| 191 |
+
setting can be one element or a tuple. If one value is passed in, we
|
| 192 |
+
consider it as the number of groups used in quantization. A tuple is passed
|
| 193 |
+
in if we want to mention that there is extra-grouping for the MLP part of a
|
| 194 |
+
Transformer layer (e.g. (True, 8) shows we quantize the model using 8
|
| 195 |
+
groups for all the network except the MLP part that we use 8 extra
|
| 196 |
+
grouping). Expects a dictionary containing values for
|
| 197 |
+
:any:`QuantizationConfig`.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
#todo: refactor the following 3 into the new checkpoint_config
|
| 201 |
+
checkpoint: Union[str, Dict] = None
|
| 202 |
+
"""
|
| 203 |
+
Path to deepspeed compatible checkpoint or path to JSON with load policy.
|
| 204 |
+
"""
|
| 205 |
+
|
| 206 |
+
base_dir: str = ""
|
| 207 |
+
"""
|
| 208 |
+
This shows the root directory under which all the checkpoint files exists.
|
| 209 |
+
This can be passed through the json config too.
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
set_empty_params: bool = False
|
| 213 |
+
"""
|
| 214 |
+
specifying whether the inference-module is created with empty or real Tensor
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
save_mp_checkpoint_path: str = None
|
| 218 |
+
"""
|
| 219 |
+
The path for which we want to save the loaded model with a checkpoint. This
|
| 220 |
+
feature is used for adjusting the parallelism degree to help alleviate the
|
| 221 |
+
model loading overhead. It does not save any new checkpoint if no path is
|
| 222 |
+
passed.
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config")
|
| 226 |
+
"""
|
| 227 |
+
TODO: Add docs. Expects a dictionary containing values for
|
| 228 |
+
:any:`InferenceCheckpointConfig`.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
return_tuple: bool = True
|
| 232 |
+
"""
|
| 233 |
+
Specify whether or not the transformer layers need to return a tuple or a
|
| 234 |
+
Tensor.
|
| 235 |
+
"""
|
| 236 |
+
|
| 237 |
+
training_mp_size: int = 1
|
| 238 |
+
"""
|
| 239 |
+
If loading a checkpoint this is the mp size that it was trained with, it
|
| 240 |
+
may be different than what the mp size that you want to use during
|
| 241 |
+
inference.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
replace_method: str = Field(
|
| 245 |
+
"auto",
|
| 246 |
+
deprecated=True,
|
| 247 |
+
deprecated_msg="This parameter is no longer needed, please remove from your call to DeepSpeed-inference")
|
| 248 |
+
|
| 249 |
+
injection_policy: Dict = Field(None, alias="injection_dict")
|
| 250 |
+
"""
|
| 251 |
+
Dictionary mapping a client nn.Module to its corresponding injection
|
| 252 |
+
policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}`
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
injection_policy_tuple: tuple = None
|
| 256 |
+
""" TODO: Add docs """
|
| 257 |
+
|
| 258 |
+
config: Dict = Field(None, alias="args") # todo: really no need for this field if we can refactor
|
| 259 |
+
|
| 260 |
+
max_out_tokens: int = Field(1024, alias="max_tokens")
|
| 261 |
+
"""
|
| 262 |
+
This argument shows the maximum number of tokens inference-engine can work
|
| 263 |
+
with, including the input and output tokens. Please consider increasing it
|
| 264 |
+
to the required token-length required for your use-case.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
min_out_tokens: int = Field(1, alias="min_tokens")
|
| 268 |
+
"""
|
| 269 |
+
This argument communicates to the runtime the minimum number of tokens you
|
| 270 |
+
expect you will need to generate. This will cause the runtime to error
|
| 271 |
+
if it unable to provide this and provide context on the memory pressure
|
| 272 |
+
rather than seg-faulting or providing corrupted output.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
transposed_mode: bool = Field(False, alias="transposed_mode")
|
| 276 |
+
|
| 277 |
+
mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size")
|
| 278 |
+
"""
|
| 279 |
+
Desired model parallel size, default is 1 meaning no model parallelism.
|
| 280 |
+
Deprecated, please use the ``tensor_parallel` config to control model
|
| 281 |
+
parallelism.
|
| 282 |
+
"""
|
| 283 |
+
mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu")
|
| 284 |
+
ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size")
|
| 285 |
+
ep_group: object = Field(None, alias="expert_group", deprecated=True, new_param="moe.ep_group")
|
| 286 |
+
ep_mp_group: object = Field(None, alias="expert_mp_group", deprecated=True, new_param="moe.ep_mp_group")
|
| 287 |
+
moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts")
|
| 288 |
+
moe_type: MoETypeEnum = Field(MoETypeEnum.standard, deprecated=True, new_param="moe.type")
|
| 289 |
+
|
| 290 |
+
@validator("moe")
|
| 291 |
+
def moe_backward_compat(cls, field_value, values):
|
| 292 |
+
if isinstance(field_value, bool):
|
| 293 |
+
return DeepSpeedMoEConfig(moe=field_value)
|
| 294 |
+
return field_value
|
| 295 |
+
|
| 296 |
+
@validator("use_triton")
|
| 297 |
+
def has_triton(cls, field_value, values):
|
| 298 |
+
if field_value and not deepspeed.HAS_TRITON:
|
| 299 |
+
raise ValueError('Triton needs to be installed to use deepspeed with triton kernels')
|
| 300 |
+
return field_value
|
| 301 |
+
|
| 302 |
+
class Config:
|
| 303 |
+
# Get the str representation of the datatype for serialization
|
| 304 |
+
json_encoders = {torch.dtype: lambda x: str(x)}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/engine.py
ADDED
|
@@ -0,0 +1,636 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import time
|
| 8 |
+
import os
|
| 9 |
+
from deepspeed import comm as dist
|
| 10 |
+
from deepspeed.utils.logging import log_dist
|
| 11 |
+
|
| 12 |
+
from torch.nn.modules import Module
|
| 13 |
+
from packaging import version as pkg_version
|
| 14 |
+
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
|
| 15 |
+
from deepspeed.utils.timer import SynchronizedWallClockTimer
|
| 16 |
+
|
| 17 |
+
from ..runtime.state_dict_factory import SDLoaderFactory
|
| 18 |
+
from ..runtime.weight_quantizer import WeightQuantization
|
| 19 |
+
from ..module_inject import replace_transformer_layer, generic_injection
|
| 20 |
+
from ..comm.comm import init_distributed
|
| 21 |
+
from ..pipe import PipelineModule
|
| 22 |
+
from ..moe.utils import has_moe_layers
|
| 23 |
+
from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing
|
| 24 |
+
from deepspeed.accelerator import get_accelerator
|
| 25 |
+
from ..module_inject.policy import TransformerPolicy
|
| 26 |
+
from ..module_inject.auto_tp import AutoTP
|
| 27 |
+
|
| 28 |
+
from ..module_inject.replace_policy import generic_policies
|
| 29 |
+
from ..module_inject.auto_tp_model_utils import build_bloom_alibi_tensor, build_mpt_atten_bias_tensor, build_mpt_alibi_tensor, get_alibi_mask
|
| 30 |
+
from ..ops.transformer.inference.ds_attention import DeepSpeedSelfAttention
|
| 31 |
+
from ..model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
|
| 32 |
+
|
| 33 |
+
DS_INFERENCE_ENABLED = False
|
| 34 |
+
from torch import nn
|
| 35 |
+
|
| 36 |
+
INFERENCE_MODEL_TIMER = "model-forward-inference"
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class InferenceEngine(Module):
|
| 40 |
+
inference_mp_group = None
|
| 41 |
+
inference_ep_group = None
|
| 42 |
+
expert_mp_group = None
|
| 43 |
+
|
| 44 |
+
def __init__(self, model, config):
|
| 45 |
+
"""
|
| 46 |
+
Args:
|
| 47 |
+
model: torch.nn.Module
|
| 48 |
+
config: DeepSpeedInferenceConfig
|
| 49 |
+
"""
|
| 50 |
+
global DS_INFERENCE_ENABLED
|
| 51 |
+
DS_INFERENCE_ENABLED = True
|
| 52 |
+
|
| 53 |
+
super().__init__()
|
| 54 |
+
|
| 55 |
+
# Have to import here because inference_module is a global, but python
|
| 56 |
+
# globals only work at the module level and will not be updated unless
|
| 57 |
+
# we import it each time we init a new inference engine.
|
| 58 |
+
from ..model_implementations.transformers.ds_transformer import inference_module
|
| 59 |
+
if inference_module is not None:
|
| 60 |
+
self.destroy()
|
| 61 |
+
|
| 62 |
+
self.module = model
|
| 63 |
+
self._config = config
|
| 64 |
+
|
| 65 |
+
self._get_model_config_generate(config) # keep for weird backward compatibility
|
| 66 |
+
|
| 67 |
+
# patch model generate with ours if model uses it
|
| 68 |
+
if hasattr(self.module, "generate"):
|
| 69 |
+
self.generate = self._generate
|
| 70 |
+
|
| 71 |
+
if hasattr(self.module, "config"):
|
| 72 |
+
TransformerPolicy.hf_model_config = self.module.config
|
| 73 |
+
|
| 74 |
+
if config.dtype == torch.half and not get_accelerator().is_fp16_supported():
|
| 75 |
+
raise ValueError("Type fp16 is not supported.")
|
| 76 |
+
|
| 77 |
+
# todo: keep this self.injection_dict because we don't use to change config.injection_policy API
|
| 78 |
+
# todo: this will get changed when Molly's PR on auto injection dict is merged
|
| 79 |
+
self.injection_dict = config.injection_policy
|
| 80 |
+
|
| 81 |
+
# todo: refactor the mp_group and mp_size related in the next refactor
|
| 82 |
+
self.mp_group = config.tensor_parallel.tp_group
|
| 83 |
+
self.mpu = config.tensor_parallel.mpu
|
| 84 |
+
|
| 85 |
+
#self._validate_args(self.mpu, config.replace_with_kernel_inject)
|
| 86 |
+
self.quantize_merge_count = 1
|
| 87 |
+
self.quantization_scales = None
|
| 88 |
+
|
| 89 |
+
# these are not needed in the config as we are creating them ourselves in the inference engine
|
| 90 |
+
self.ep_group = None # config.moe.ep_group
|
| 91 |
+
self.expert_mp_group = None # config.moe.ep_mp_group
|
| 92 |
+
|
| 93 |
+
self.cuda_graph_created = False
|
| 94 |
+
self.checkpoint_engine = TorchCheckpointEngine()
|
| 95 |
+
quantization_setting = None
|
| 96 |
+
self._init_quantization_setting(
|
| 97 |
+
quantization_setting) # todo: update with the new quant config for weight quant
|
| 98 |
+
self.model_profile_enabled = False
|
| 99 |
+
self._model_times = []
|
| 100 |
+
|
| 101 |
+
if not self.injection_dict and config.replace_with_kernel_inject:
|
| 102 |
+
# This is a hack to remove the prepare_mask function on HF side for BLOOM architecture
|
| 103 |
+
self.remove_mask_prepare_for_bloom()
|
| 104 |
+
|
| 105 |
+
if self.injection_dict or not config.replace_with_kernel_inject:
|
| 106 |
+
# This is a hack to redefine the alibi func due to TP
|
| 107 |
+
if config.tensor_parallel.tp_size > 1:
|
| 108 |
+
self.build_alibi_tensor()
|
| 109 |
+
self.build_attn_bias()
|
| 110 |
+
|
| 111 |
+
if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph:
|
| 112 |
+
assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \
|
| 113 |
+
"If you want to use cuda graph, please upgrade torch to at least v1.10"
|
| 114 |
+
|
| 115 |
+
# convert model to intended dtype
|
| 116 |
+
if config.dtype:
|
| 117 |
+
self._convert_to_dtype(config)
|
| 118 |
+
|
| 119 |
+
if self.mpu:
|
| 120 |
+
config.tensor_parallel.tp_size = dist.get_world_size(group=self.mpu.get_model_parallel_group())
|
| 121 |
+
self.mp_group = self.mpu.get_model_parallel_group()
|
| 122 |
+
elif config.tensor_parallel.tp_size > 1:
|
| 123 |
+
self._create_model_parallel_group(config)
|
| 124 |
+
config.tensor_parallel.tp_group = self.mp_group
|
| 125 |
+
|
| 126 |
+
if isinstance(self.module, torch.nn.Module):
|
| 127 |
+
moe, _ = has_moe_layers(self.module)
|
| 128 |
+
else:
|
| 129 |
+
moe = False
|
| 130 |
+
|
| 131 |
+
if moe and dist.get_world_size() > 1:
|
| 132 |
+
self._create_ep_parallel_group(config.moe.moe_experts)
|
| 133 |
+
|
| 134 |
+
# We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism if tp_size > 1.
|
| 135 |
+
if self.injection_dict:
|
| 136 |
+
# 1. User specified Tensor Parallelism
|
| 137 |
+
assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection"
|
| 138 |
+
for client_module, injection_policy in self.injection_dict.items():
|
| 139 |
+
|
| 140 |
+
assert issubclass(client_module,
|
| 141 |
+
torch.nn.Module), f"{client_module} is not a subclass of torch.nn.Module"
|
| 142 |
+
|
| 143 |
+
# construct the tuple and pass that instead of a string or dict.
|
| 144 |
+
if isinstance(injection_policy, str):
|
| 145 |
+
config.injection_policy_tuple = (injection_policy, )
|
| 146 |
+
else:
|
| 147 |
+
config.injection_policy_tuple = injection_policy
|
| 148 |
+
|
| 149 |
+
layer_names = [name for name, _ in self.module.named_modules()]
|
| 150 |
+
for policy in config.injection_policy_tuple:
|
| 151 |
+
if not any(name.endswith(policy) for name in layer_names):
|
| 152 |
+
raise ValueError(f"Injection policy layer'{policy}' not valid.")
|
| 153 |
+
|
| 154 |
+
self._apply_injection_policy(config, client_module)
|
| 155 |
+
else:
|
| 156 |
+
if config.replace_with_kernel_inject:
|
| 157 |
+
# 2. DeepSpeed Kernel Injection
|
| 158 |
+
self._apply_injection_policy(config)
|
| 159 |
+
elif config.tensor_parallel.tp_size > 1:
|
| 160 |
+
# 3. Automatic Tensor Parallelism
|
| 161 |
+
parser_dict = AutoTP.tp_parser(model)
|
| 162 |
+
print("AutoTP: ", parser_dict)
|
| 163 |
+
for client_module, injection_policy in parser_dict:
|
| 164 |
+
if isinstance(injection_policy, str):
|
| 165 |
+
config.injection_policy_tuple = (injection_policy, )
|
| 166 |
+
else:
|
| 167 |
+
config.injection_policy_tuple = injection_policy
|
| 168 |
+
self._apply_injection_policy(config, client_module)
|
| 169 |
+
|
| 170 |
+
device = get_accelerator().current_device_name()
|
| 171 |
+
# NOTE: This check assumes a Hugging Face hierarchy for the device type i.e. module.device.type
|
| 172 |
+
is_meta_device = hasattr(self.module, "device") and self.module.device.type == 'meta'
|
| 173 |
+
if is_meta_device:
|
| 174 |
+
self.module.to_empty(device=device)
|
| 175 |
+
else:
|
| 176 |
+
self.module.to(device)
|
| 177 |
+
|
| 178 |
+
if config.tensor_parallel.tp_size > 1:
|
| 179 |
+
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
|
| 180 |
+
dist.broadcast(_rng_state, 0)
|
| 181 |
+
get_accelerator().set_rng_state(_rng_state.cpu())
|
| 182 |
+
|
| 183 |
+
if config.tensor_parallel.tp_size > 1:
|
| 184 |
+
assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism"
|
| 185 |
+
|
| 186 |
+
# Check if local CUDA graphs can be created in replacement modules
|
| 187 |
+
self.local_cuda_graph = self._local_cuda_graph_used(self.module)
|
| 188 |
+
|
| 189 |
+
def destroy(self):
|
| 190 |
+
# Have to import here because inference_module is a global, but python
|
| 191 |
+
# globals only work at the module level and will not be updated unless
|
| 192 |
+
# we import it each time we init a new inference engine.
|
| 193 |
+
from ..model_implementations.transformers.ds_transformer import inference_module
|
| 194 |
+
DeepSpeedTransformerInference.layer_id = 0
|
| 195 |
+
DeepSpeedSelfAttention.num_layers = 0
|
| 196 |
+
if inference_module is not None:
|
| 197 |
+
inference_module.release_workspace()
|
| 198 |
+
inference_module = None
|
| 199 |
+
|
| 200 |
+
def profile_model_time(self, use_cuda_events=True):
|
| 201 |
+
if not self.model_profile_enabled and not self._config.enable_cuda_graph:
|
| 202 |
+
self.module.register_forward_pre_hook(self._pre_forward_hook)
|
| 203 |
+
self.module.register_forward_hook(self._post_forward_hook)
|
| 204 |
+
self.model_profile_enabled = True
|
| 205 |
+
self.use_cuda_events = use_cuda_events
|
| 206 |
+
if self.use_cuda_events:
|
| 207 |
+
self.timers = SynchronizedWallClockTimer()
|
| 208 |
+
|
| 209 |
+
# todo: remove this once all the config dicts are centralized from top level pydantic config
|
| 210 |
+
def _get_model_config_generate(self, config):
|
| 211 |
+
# this is being passed to replace_transformer_layer(config=self.user_model_config_dict)
|
| 212 |
+
self.config = getattr(self.module, 'config', None) if config.config is None else config.config
|
| 213 |
+
|
| 214 |
+
def remove_mask_prepare_for_bloom(self):
|
| 215 |
+
if hasattr(self.module, 'transformer'):
|
| 216 |
+
if hasattr(self.module.transformer, '_prepare_attn_mask'):
|
| 217 |
+
self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask
|
| 218 |
+
|
| 219 |
+
def build_alibi_tensor(self):
|
| 220 |
+
if hasattr(self.module, 'transformer'):
|
| 221 |
+
if hasattr(self.module.transformer, 'build_alibi_tensor'):
|
| 222 |
+
self.module.transformer.build_alibi_tensor = build_bloom_alibi_tensor
|
| 223 |
+
if hasattr(self.module.transformer, 'build_mpt_alibi_tensor'):
|
| 224 |
+
self.module.transformer.build_mpt_alibi_tensor_orig = self.module.transformer.build_mpt_alibi_tensor
|
| 225 |
+
self.module.transformer.__class__.build_mpt_alibi_tensor = build_mpt_alibi_tensor
|
| 226 |
+
if hasattr(self.module, 'model'):
|
| 227 |
+
if hasattr(self.module.model, 'get_alibi_mask'):
|
| 228 |
+
self.module.model.get_alibi_mask_orig = self.module.model.get_alibi_mask
|
| 229 |
+
self.module.model.__class__.get_alibi_mask = get_alibi_mask
|
| 230 |
+
|
| 231 |
+
def build_attn_bias(self):
|
| 232 |
+
if hasattr(self.module, 'transformer'):
|
| 233 |
+
if hasattr(self.module.transformer, '_attn_bias'):
|
| 234 |
+
self.module.transformer._attn_bias_orig = self.module.transformer._attn_bias
|
| 235 |
+
self.module.transformer.__class__._attn_bias = build_mpt_atten_bias_tensor
|
| 236 |
+
|
| 237 |
+
def _pre_forward_hook(self, module, *inputs, **kwargs):
|
| 238 |
+
if self.use_cuda_events:
|
| 239 |
+
self.timers(INFERENCE_MODEL_TIMER).start()
|
| 240 |
+
else:
|
| 241 |
+
get_accelerator().synchronize()
|
| 242 |
+
self._start = time.time()
|
| 243 |
+
|
| 244 |
+
def _post_forward_hook(self, module, input, output):
|
| 245 |
+
if self.use_cuda_events:
|
| 246 |
+
self.timers(INFERENCE_MODEL_TIMER).stop()
|
| 247 |
+
elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True)
|
| 248 |
+
else:
|
| 249 |
+
get_accelerator().synchronize()
|
| 250 |
+
self._end = time.time()
|
| 251 |
+
elapsed_time = (self._end - self._start) * 1e3 # convert seconds to ms
|
| 252 |
+
self._model_times.append(elapsed_time)
|
| 253 |
+
|
| 254 |
+
def _create_model_parallel_group(self, config):
|
| 255 |
+
# Call the init process
|
| 256 |
+
if InferenceEngine.inference_mp_group is None:
|
| 257 |
+
init_distributed()
|
| 258 |
+
local_rank = int(os.getenv('LOCAL_RANK', '0'))
|
| 259 |
+
get_accelerator().set_device(local_rank)
|
| 260 |
+
|
| 261 |
+
ranks = [i for i in range(config.tensor_parallel.tp_size)]
|
| 262 |
+
self.mp_group = dist.new_group(ranks)
|
| 263 |
+
InferenceEngine.inference_mp_group = self.mp_group
|
| 264 |
+
else:
|
| 265 |
+
self.mp_group = InferenceEngine.inference_mp_group
|
| 266 |
+
|
| 267 |
+
def _create_ep_parallel_group(self, moe_experts):
|
| 268 |
+
# Call the init process
|
| 269 |
+
self.ep_group = {}
|
| 270 |
+
self.expert_mp_group = {}
|
| 271 |
+
moe_experts = moe_experts if type(moe_experts) is list else [moe_experts]
|
| 272 |
+
for e in moe_experts:
|
| 273 |
+
self.ep_group.update({e: None})
|
| 274 |
+
self.expert_mp_group.update({e: None})
|
| 275 |
+
for moe_ep_size in self.ep_group.keys():
|
| 276 |
+
num_ep_groups = dist.get_world_size() // moe_ep_size
|
| 277 |
+
for i in range(num_ep_groups):
|
| 278 |
+
ep_cnt = i * moe_ep_size
|
| 279 |
+
size = dist.get_world_size() if moe_ep_size > dist.get_world_size() else moe_ep_size
|
| 280 |
+
ranks = list(range(ep_cnt, ep_cnt + size))
|
| 281 |
+
_ep_group = dist.new_group(ranks)
|
| 282 |
+
if dist.get_rank() in ranks:
|
| 283 |
+
self.ep_group.update({moe_ep_size: _ep_group})
|
| 284 |
+
|
| 285 |
+
if dist.get_world_size() > moe_ep_size:
|
| 286 |
+
num_expert_mp_groups = dist.get_world_size() // num_ep_groups
|
| 287 |
+
expert_mp_size = dist.get_world_size() // moe_ep_size
|
| 288 |
+
for i in range(num_expert_mp_groups):
|
| 289 |
+
expert_mp_comm_ranks = [i + nr * moe_ep_size for nr in range(expert_mp_size)]
|
| 290 |
+
_expert_mp_group = dist.new_group(expert_mp_comm_ranks)
|
| 291 |
+
if dist.get_rank() in expert_mp_comm_ranks:
|
| 292 |
+
self.expert_mp_group.update({moe_ep_size: _expert_mp_group})
|
| 293 |
+
|
| 294 |
+
def _init_quantization_setting(self, quantization_setting):
|
| 295 |
+
self.quantize_bits = 8
|
| 296 |
+
self.mlp_extra_grouping = False
|
| 297 |
+
self.quantize_groups = 1
|
| 298 |
+
if type(quantization_setting) is tuple:
|
| 299 |
+
self.mlp_extra_grouping, \
|
| 300 |
+
self.quantize_groups = quantization_setting
|
| 301 |
+
elif quantization_setting is not None:
|
| 302 |
+
self.quantize_groups = quantization_setting
|
| 303 |
+
log_dist(
|
| 304 |
+
f"quantize_bits = {self.quantize_bits} "
|
| 305 |
+
f"mlp_extra_grouping = {self.mlp_extra_grouping}, "
|
| 306 |
+
f"quantize_groups = {self.quantize_groups}", [0])
|
| 307 |
+
|
| 308 |
+
# TODO: remove this function and add this functionality to pydantic config checking
|
| 309 |
+
def _validate_args(self, mpu, replace_with_kernel_inject):
|
| 310 |
+
# TODO: to support SD pipeline we need to avoid this check for now
|
| 311 |
+
if replace_with_kernel_inject and not isinstance(self.module, Module):
|
| 312 |
+
raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}")
|
| 313 |
+
if not isinstance(self._config.tensor_parallel.tp_size, int) or self._config.tensor_parallel.tp_size < 1:
|
| 314 |
+
raise ValueError(f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}")
|
| 315 |
+
|
| 316 |
+
if mpu:
|
| 317 |
+
methods = ["get_model_parallel_group", "get_data_parallel_group"]
|
| 318 |
+
for method in methods:
|
| 319 |
+
if not hasattr(mpu, method):
|
| 320 |
+
raise ValueError(f"mpu is missing {method}")
|
| 321 |
+
if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)):
|
| 322 |
+
raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}")
|
| 323 |
+
|
| 324 |
+
supported_dtypes = [None, torch.half, torch.int8, torch.float]
|
| 325 |
+
if self._config.dtype not in supported_dtypes:
|
| 326 |
+
raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}")
|
| 327 |
+
|
| 328 |
+
if self.injection_dict is not None and not isinstance(self.injection_dict, dict):
|
| 329 |
+
raise ValueError(f"injection_dict must be None or a dict, got: {self.injection_dict}")
|
| 330 |
+
|
| 331 |
+
def load_model_with_checkpoint(self, r_module):
|
| 332 |
+
self.mp_replace = ReplaceWithTensorSlicing(
|
| 333 |
+
mp_group=self.mp_group, mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
|
| 334 |
+
error_msgs = []
|
| 335 |
+
|
| 336 |
+
def load(module, state_dict, prefix):
|
| 337 |
+
args = (state_dict, prefix, {}, True, [], [], error_msgs)
|
| 338 |
+
if hasattr(module, 'weight'):
|
| 339 |
+
if module.weight.data.is_meta:
|
| 340 |
+
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
|
| 341 |
+
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data,
|
| 342 |
+
device="cpu"),
|
| 343 |
+
requires_grad=module.weight.data.requires_grad)
|
| 344 |
+
if 'query_key_value' in prefix:
|
| 345 |
+
module.weight = self.mp_replace.strided_copy(module.weight.data,
|
| 346 |
+
state_dict[prefix + 'weight'],
|
| 347 |
+
num_splits=3)
|
| 348 |
+
else:
|
| 349 |
+
module.weight = self.mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
|
| 350 |
+
else:
|
| 351 |
+
if module.norm.weight.data.is_meta:
|
| 352 |
+
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
|
| 353 |
+
module.norm.weight = torch.nn.parameter.Parameter(
|
| 354 |
+
data=torch.empty_like(module.norm.weight.data, device="cpu"),
|
| 355 |
+
requires_grad=module.norm.weight.data.requires_grad)
|
| 356 |
+
module.norm.weight = self.mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
|
| 357 |
+
if prefix + 'bias' in self.key_list:
|
| 358 |
+
if hasattr(module, 'norm'):
|
| 359 |
+
if module.norm.bias.data.is_meta:
|
| 360 |
+
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
|
| 361 |
+
module.norm.bias = torch.nn.parameter.Parameter(
|
| 362 |
+
data=torch.empty_like(module.norm.bias.data, device="cpu"),
|
| 363 |
+
requires_grad=module.norm.bias.data.requires_grad)
|
| 364 |
+
module.norm.bias = self.mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
|
| 365 |
+
else:
|
| 366 |
+
if module.bias.data.is_meta:
|
| 367 |
+
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
|
| 368 |
+
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data,
|
| 369 |
+
device="cpu"),
|
| 370 |
+
requires_grad=module.bias.data.requires_grad)
|
| 371 |
+
data = state_dict[prefix + 'bias']
|
| 372 |
+
data = data.to(get_accelerator().current_device_name())
|
| 373 |
+
module.bias = self.mp_replace.copy(module.bias, data)
|
| 374 |
+
|
| 375 |
+
layer_policies = {
|
| 376 |
+
nn.Linear: load,
|
| 377 |
+
nn.Embedding: load,
|
| 378 |
+
nn.LayerNorm: load,
|
| 379 |
+
LinearLayer: load,
|
| 380 |
+
LinearAllreduce: load
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
def load_module_recursive(module, prefix='', level=0):
|
| 384 |
+
for name, child in module.named_children():
|
| 385 |
+
if child.__class__ in layer_policies:
|
| 386 |
+
checking_key = prefix + name + '.'
|
| 387 |
+
if not any(checking_key in item for item in self.key_list):
|
| 388 |
+
continue
|
| 389 |
+
if len(list(child.parameters())) > 0 and list(child.parameters())[0].numel() == 0:
|
| 390 |
+
if len(child.weight.ds_shape) == 1:
|
| 391 |
+
child = Normalize(dim=child.weight.ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
|
| 392 |
+
setattr(module, name, child)
|
| 393 |
+
load(child, self.sd, prefix + name + '.')
|
| 394 |
+
else:
|
| 395 |
+
load_module_recursive(child, prefix if level == 0 else prefix + name + '.', level + 1)
|
| 396 |
+
|
| 397 |
+
load_module_recursive(r_module)
|
| 398 |
+
|
| 399 |
+
embedding_weight = None
|
| 400 |
+
|
| 401 |
+
for n, p in r_module.named_parameters():
|
| 402 |
+
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
|
| 403 |
+
embedding_weight = p
|
| 404 |
+
if embedding_weight is not None and hasattr(r_module, "lm_head") and hasattr(
|
| 405 |
+
r_module.lm_head, "weight") and r_module.lm_head.weight.is_meta:
|
| 406 |
+
r_module.lm_head.weight = embedding_weight
|
| 407 |
+
|
| 408 |
+
def _apply_injection_policy(self, config, client_module=None):
|
| 409 |
+
# client_module is only passed when using the injection_dict method.
|
| 410 |
+
checkpoint_dir = config.checkpoint
|
| 411 |
+
checkpoint = SDLoaderFactory.get_sd_loader_json(checkpoint_dir,
|
| 412 |
+
self.checkpoint_engine) if checkpoint_dir is not None else None
|
| 413 |
+
|
| 414 |
+
generic_injection(self.module, dtype=config.dtype, enable_cuda_graph=config.enable_cuda_graph)
|
| 415 |
+
|
| 416 |
+
if isinstance(self.module, torch.nn.Module):
|
| 417 |
+
# config is our DeepSpeedInferenceConfig and self.config is the HF model config
|
| 418 |
+
replace_transformer_layer(client_module, self.module, checkpoint, config, self.config)
|
| 419 |
+
|
| 420 |
+
def _get_all_ckpt_names(self, checkpoints_path, tag):
|
| 421 |
+
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
|
| 422 |
+
import glob
|
| 423 |
+
|
| 424 |
+
ckpt_files = glob.glob(ckpt_file_pattern)
|
| 425 |
+
ckpt_files.sort()
|
| 426 |
+
return ckpt_files
|
| 427 |
+
|
| 428 |
+
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
|
| 429 |
+
if mp_placeholder is not None:
|
| 430 |
+
mp_rank_str = mp_placeholder
|
| 431 |
+
else:
|
| 432 |
+
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
|
| 433 |
+
mp_rank_str = "{:02d}".format(mp_rank)
|
| 434 |
+
|
| 435 |
+
ckpt_name = os.path.join(
|
| 436 |
+
checkpoints_path,
|
| 437 |
+
"mp_rank_" + mp_rank_str + "_model_states.pt",
|
| 438 |
+
)
|
| 439 |
+
return ckpt_name
|
| 440 |
+
|
| 441 |
+
def _load_checkpoint(self, load_dir, load_module_strict=True, tag=None):
|
| 442 |
+
is_pipe_parallel = isinstance(self.module, PipelineModule)
|
| 443 |
+
if is_pipe_parallel:
|
| 444 |
+
raise RuntimeError('pipeline parallelism is currently not supported in inference.')
|
| 445 |
+
if not isinstance(load_dir, dict) and os.path.isdir(load_dir):
|
| 446 |
+
if tag is None:
|
| 447 |
+
latest_path = os.path.join(load_dir, "latest")
|
| 448 |
+
if os.path.isfile(latest_path):
|
| 449 |
+
with open(latest_path, "r") as fd:
|
| 450 |
+
tag = fd.read().strip()
|
| 451 |
+
|
| 452 |
+
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
|
| 453 |
+
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine)
|
| 454 |
+
else:
|
| 455 |
+
sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, self.checkpoint_engine)
|
| 456 |
+
|
| 457 |
+
checkpoint = sd_loader['checkpoints']
|
| 458 |
+
|
| 459 |
+
if type(checkpoint) is list:
|
| 460 |
+
self.sd = torch.load(checkpoint[0], map_location='cpu')
|
| 461 |
+
self.key_list = list(self.sd.keys())
|
| 462 |
+
|
| 463 |
+
self.load_model_with_checkpoint(self.module)
|
| 464 |
+
|
| 465 |
+
for i in range(1, len(checkpoint)):
|
| 466 |
+
if not dist.is_initialized() or dist.get_rank() == 0:
|
| 467 |
+
print(f"loading checkpoint ({i})")
|
| 468 |
+
self.sd = torch.load(checkpoint[i], map_location=get_accelerator().device_name())
|
| 469 |
+
self.key_list = list(self.sd.keys())
|
| 470 |
+
self.load_model_with_checkpoint(self.module)
|
| 471 |
+
else:
|
| 472 |
+
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
|
| 473 |
+
|
| 474 |
+
load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size,
|
| 475 |
+
mp_rank,
|
| 476 |
+
is_pipe_parallel=is_pipe_parallel,
|
| 477 |
+
quantize=(self._config.dtype is torch.int8),
|
| 478 |
+
quantize_groups=self.quantize_groups,
|
| 479 |
+
mlp_extra_grouping=self.mlp_extra_grouping)
|
| 480 |
+
|
| 481 |
+
self.quantization_scales, self.quantize_merge_count = quantize_config
|
| 482 |
+
|
| 483 |
+
moe, _ = has_moe_layers(self.module)
|
| 484 |
+
if moe:
|
| 485 |
+
from deepspeed.runtime.engine import DeepSpeedEngine
|
| 486 |
+
old_moe_load = False
|
| 487 |
+
if not isinstance(checkpoint['num_experts'], list):
|
| 488 |
+
old_moe_load = True
|
| 489 |
+
DeepSpeedEngine.load_moe_state_dict(load_dir,
|
| 490 |
+
tag,
|
| 491 |
+
state_dict=checkpoint[self._choose_module_key(checkpoint)],
|
| 492 |
+
old_moe_load=old_moe_load,
|
| 493 |
+
model=self.module,
|
| 494 |
+
mpu=self.mpu,
|
| 495 |
+
checkpoint_engine=self.checkpoint_engine)
|
| 496 |
+
|
| 497 |
+
self.module.load_state_dict(state_dict=checkpoint[self._choose_module_key(checkpoint)],
|
| 498 |
+
strict=load_module_strict)
|
| 499 |
+
|
| 500 |
+
def _choose_module_key(self, sd):
|
| 501 |
+
assert not ('module' in sd
|
| 502 |
+
and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed"
|
| 503 |
+
assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed"
|
| 504 |
+
if 'module' in sd:
|
| 505 |
+
return 'module'
|
| 506 |
+
elif 'model' in sd:
|
| 507 |
+
return 'model'
|
| 508 |
+
|
| 509 |
+
def _convert_to_dtype(self, config):
|
| 510 |
+
if not isinstance(self.module, torch.nn.Module):
|
| 511 |
+
return
|
| 512 |
+
|
| 513 |
+
if False: #config.dtype is torch.int8 and self.quantization_scales is None:
|
| 514 |
+
quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping)
|
| 515 |
+
model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict,
|
| 516 |
+
self.quantize_bits, self.quantize_groups)
|
| 517 |
+
elif config.dtype == torch.half:
|
| 518 |
+
self.module.half()
|
| 519 |
+
elif config.dtype == torch.bfloat16:
|
| 520 |
+
self.module.bfloat16()
|
| 521 |
+
elif config.dtype == torch.float:
|
| 522 |
+
self.module.float()
|
| 523 |
+
|
| 524 |
+
def _create_cuda_graph(self, *inputs, **kwargs):
|
| 525 |
+
# warmup to create the workspace and cublas handle
|
| 526 |
+
cuda_stream = get_accelerator().Stream()
|
| 527 |
+
cuda_stream.wait_stream(get_accelerator().current_stream())
|
| 528 |
+
with get_accelerator().stream(cuda_stream):
|
| 529 |
+
for i in range(3):
|
| 530 |
+
ret = self.module(*inputs, **kwargs)
|
| 531 |
+
get_accelerator().current_stream().wait_stream(cuda_stream)
|
| 532 |
+
|
| 533 |
+
# create cuda_graph and assign static_inputs and static_outputs
|
| 534 |
+
self._cuda_graphs = get_accelerator().create_graph()
|
| 535 |
+
self.static_inputs = inputs
|
| 536 |
+
self.static_kwargs = kwargs
|
| 537 |
+
|
| 538 |
+
with get_accelerator().capture_to_graph(self._cuda_graphs):
|
| 539 |
+
self.static_output = self.module(*self.static_inputs, **self.static_kwargs)
|
| 540 |
+
|
| 541 |
+
self.cuda_graph_created = True
|
| 542 |
+
|
| 543 |
+
def _graph_replay(self, *inputs, **kwargs):
|
| 544 |
+
for i in range(len(inputs)):
|
| 545 |
+
if torch.is_tensor(inputs[i]):
|
| 546 |
+
self.static_inputs[i].copy_(inputs[i])
|
| 547 |
+
for k in kwargs:
|
| 548 |
+
if torch.is_tensor(kwargs[k]):
|
| 549 |
+
self.static_kwargs[k].copy_(kwargs[k])
|
| 550 |
+
get_accelerator().replay_graph(self._cuda_graphs)
|
| 551 |
+
return self.static_output
|
| 552 |
+
|
| 553 |
+
def model_times(self):
|
| 554 |
+
assert self.model_profile_enabled, "model profiling is not enabled"
|
| 555 |
+
model_times = self._model_times
|
| 556 |
+
if self._config.enable_cuda_graph and len(self._model_times) == 0:
|
| 557 |
+
raise ValueError("Model times are empty and cuda graph is enabled. If "
|
| 558 |
+
"this is a GPT-style model this combo is not supported. If this is a "
|
| 559 |
+
"BERT-style model this is a bug, please report it. "
|
| 560 |
+
f"Model type is: {type(self.module)}")
|
| 561 |
+
self._model_times = []
|
| 562 |
+
return model_times
|
| 563 |
+
|
| 564 |
+
def _module_match(self, module):
|
| 565 |
+
for policy in generic_policies:
|
| 566 |
+
policy = policy()
|
| 567 |
+
if policy.match_replaced(module):
|
| 568 |
+
return True
|
| 569 |
+
return False
|
| 570 |
+
|
| 571 |
+
def _local_cuda_graph_used(self, module):
|
| 572 |
+
if isinstance(module, torch.nn.Module):
|
| 573 |
+
return False
|
| 574 |
+
else:
|
| 575 |
+
sub_module_cuda_graph = False
|
| 576 |
+
for name in module.__dict__.keys():
|
| 577 |
+
sub_module = getattr(module, name)
|
| 578 |
+
|
| 579 |
+
if self._module_match(sub_module) and hasattr(sub_module, "enable_cuda_graph"):
|
| 580 |
+
sub_module_cuda_graph = True
|
| 581 |
+
|
| 582 |
+
return sub_module_cuda_graph
|
| 583 |
+
|
| 584 |
+
def forward(self, *inputs, **kwargs):
|
| 585 |
+
"""Execute forward propagation
|
| 586 |
+
|
| 587 |
+
Arguments:
|
| 588 |
+
*inputs: Variable length input list
|
| 589 |
+
**kwargs: variable length keyword arguments
|
| 590 |
+
"""
|
| 591 |
+
start = None
|
| 592 |
+
if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph:
|
| 593 |
+
get_accelerator().synchronize()
|
| 594 |
+
start = time.time()
|
| 595 |
+
|
| 596 |
+
if get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph:
|
| 597 |
+
if self.cuda_graph_created:
|
| 598 |
+
outputs = self._graph_replay(*inputs, **kwargs)
|
| 599 |
+
else:
|
| 600 |
+
self._create_cuda_graph(*inputs, **kwargs)
|
| 601 |
+
outputs = self._graph_replay(*inputs, **kwargs)
|
| 602 |
+
|
| 603 |
+
else:
|
| 604 |
+
outputs = self.module(*inputs, **kwargs)
|
| 605 |
+
|
| 606 |
+
if self.model_profile_enabled and self._config.enable_cuda_graph:
|
| 607 |
+
get_accelerator().synchronize()
|
| 608 |
+
duration = (time.time() - start) * 1e3 # convert seconds to ms
|
| 609 |
+
self._model_times.append(duration)
|
| 610 |
+
|
| 611 |
+
return outputs
|
| 612 |
+
|
| 613 |
+
def _generate(self, *inputs, **kwargs):
|
| 614 |
+
# Reset KV-cache at the beginning of generate
|
| 615 |
+
if hasattr(self.module, 'reset_cache'):
|
| 616 |
+
self.module.reset_cache()
|
| 617 |
+
num_beams = 1
|
| 618 |
+
if "generation_config" in kwargs:
|
| 619 |
+
gen_config = kwargs["generation_config"]
|
| 620 |
+
num_beams = getattr(gen_config, "num_beams", 1)
|
| 621 |
+
if "num_beams" in kwargs:
|
| 622 |
+
num_beams = kwargs["num_beams"]
|
| 623 |
+
|
| 624 |
+
if num_beams > 1:
|
| 625 |
+
raise NotImplementedError("DeepSpeed does not support `num_beams` > 1, if this is important to you please "
|
| 626 |
+
"add your request to: https://github.com/microsoft/DeepSpeed/issues/2506")
|
| 627 |
+
|
| 628 |
+
if ("input_ids" in kwargs) and (kwargs["input_ids"].dim() == 2):
|
| 629 |
+
for input_tensor in kwargs["input_ids"]:
|
| 630 |
+
tensor_length = input_tensor.shape[-1]
|
| 631 |
+
if tensor_length > self._config.max_out_tokens:
|
| 632 |
+
raise RuntimeError(
|
| 633 |
+
f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase `max_tokens` in the DeepSpeed Inference Config."
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
return self.module.generate(*inputs, **kwargs)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (184 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/layers.cpython-310.pyc
ADDED
|
Binary file (3.59 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization.cpython-310.pyc
ADDED
|
Binary file (3.14 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization_context.cpython-310.pyc
ADDED
|
Binary file (819 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (9.17 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/layers.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from torch import nn
|
| 9 |
+
from torch import Tensor
|
| 10 |
+
from torch.nn import functional as F
|
| 11 |
+
from .utils import Quantizer, DeQuantizer, concat_to_compat_param
|
| 12 |
+
from typing import Tuple, Callable, Dict
|
| 13 |
+
from deepspeed.runtime.zero import register_external_parameter
|
| 14 |
+
|
| 15 |
+
quantized_weight_registry = {}
|
| 16 |
+
is_zero3_enabled = False
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# deal with weight sharing
|
| 20 |
+
def get_quantized_weight_wrapper(model, pre_quant_weight: nn.Parameter, quantize_weight_fn: Callable) -> nn.Parameter:
|
| 21 |
+
if id(pre_quant_weight) in quantized_weight_registry:
|
| 22 |
+
compat_tensor = quantized_weight_registry[id(pre_quant_weight)]
|
| 23 |
+
if is_zero3_enabled:
|
| 24 |
+
register_external_parameter(model, compat_tensor)
|
| 25 |
+
|
| 26 |
+
return quantized_weight_registry[id(pre_quant_weight)]
|
| 27 |
+
else:
|
| 28 |
+
quantized_weights, quant_scale, quant_min = quantize_weight_fn()
|
| 29 |
+
quantized_weight_registry[id(pre_quant_weight)] = concat_to_compat_param(quantized_weights, quant_scale,
|
| 30 |
+
quant_min)
|
| 31 |
+
return quantized_weight_registry[id(pre_quant_weight)]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_quantize_weight_fn(quantizer: Quantizer, pre_quant_weight: nn.Parameter) -> Callable:
|
| 35 |
+
|
| 36 |
+
def func() -> Tuple[nn.Parameter, Tensor, Tensor]:
|
| 37 |
+
quantized_weights, quant_scale, quant_min = quantizer.quantize(pre_quant_weight.data)
|
| 38 |
+
# A temporary hack as zero Zero3 assume all model weights has the same type. in all_gather_coalesced.get_only_unique_item
|
| 39 |
+
quantized_weights = quantized_weights.view(pre_quant_weight.dtype)
|
| 40 |
+
quant_scale = quant_scale.type(pre_quant_weight.dtype)
|
| 41 |
+
quant_min = quant_min.type(pre_quant_weight.dtype)
|
| 42 |
+
return quantized_weights, quant_scale, quant_min
|
| 43 |
+
|
| 44 |
+
return func
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class QuantizedLinear(nn.Linear):
|
| 48 |
+
|
| 49 |
+
def __init__(self, config: Dict, pre_quant_layer: nn.Linear) -> None:
|
| 50 |
+
super(QuantizedLinear, self).__init__(in_features=pre_quant_layer.in_features,
|
| 51 |
+
out_features=pre_quant_layer.out_features,
|
| 52 |
+
bias=pre_quant_layer.bias is not None,
|
| 53 |
+
device=pre_quant_layer.weight.device,
|
| 54 |
+
dtype=pre_quant_layer.weight.dtype)
|
| 55 |
+
self.config = config
|
| 56 |
+
|
| 57 |
+
self.quantizer = Quantizer(config=config)
|
| 58 |
+
self.bias = pre_quant_layer.bias
|
| 59 |
+
self.weight = get_quantized_weight_wrapper(self, pre_quant_layer.weight,
|
| 60 |
+
get_quantize_weight_fn(self.quantizer, pre_quant_layer.weight))
|
| 61 |
+
|
| 62 |
+
self.weight.dequantizer = DeQuantizer(config, pre_quant_layer.weight.dtype)
|
| 63 |
+
|
| 64 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 65 |
+
quantized_weight, quant_scale, quant_min = self.weight.deconcat(self.weight)
|
| 66 |
+
temp_dequantized_weight = self.weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale,
|
| 67 |
+
quant_min)
|
| 68 |
+
|
| 69 |
+
# !!! Do not use torch.functional.linear(input, temp_dequantized_weight, self.bias) here as in zero3 torch.functional.linear is
|
| 70 |
+
# replaced by LinearFunctionForZeroStage3. Which assume weight is non-temporary.
|
| 71 |
+
# If weight is temp buffer there will be memory leak.
|
| 72 |
+
return torch._C._nn.linear(input, temp_dequantized_weight, self.bias)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class QuantizedEmbedding(nn.Embedding):
|
| 76 |
+
|
| 77 |
+
def __init__(self, config: Dict, pre_quant_layer: nn.Embedding) -> None:
|
| 78 |
+
super(QuantizedEmbedding, self).__init__(num_embeddings=pre_quant_layer.num_embeddings,
|
| 79 |
+
embedding_dim=pre_quant_layer.embedding_dim,
|
| 80 |
+
padding_idx=pre_quant_layer.padding_idx,
|
| 81 |
+
max_norm=pre_quant_layer.max_norm,
|
| 82 |
+
norm_type=pre_quant_layer.norm_type,
|
| 83 |
+
scale_grad_by_freq=pre_quant_layer.scale_grad_by_freq,
|
| 84 |
+
sparse=pre_quant_layer.sparse,
|
| 85 |
+
_weight=pre_quant_layer.weight,
|
| 86 |
+
device=pre_quant_layer.weight.device,
|
| 87 |
+
dtype=pre_quant_layer.weight.dtype)
|
| 88 |
+
|
| 89 |
+
assert pre_quant_layer.max_norm is None, 'Not supported'
|
| 90 |
+
assert pre_quant_layer.norm_type == 2, 'Not supported'
|
| 91 |
+
assert pre_quant_layer.scale_grad_by_freq == False, 'Not supported'
|
| 92 |
+
assert pre_quant_layer.sparse == False, 'Not supported'
|
| 93 |
+
|
| 94 |
+
self.config = config
|
| 95 |
+
quantizer = Quantizer(config=config)
|
| 96 |
+
|
| 97 |
+
self.weight = get_quantized_weight_wrapper(self, pre_quant_layer.weight,
|
| 98 |
+
get_quantize_weight_fn(quantizer, pre_quant_layer.weight))
|
| 99 |
+
|
| 100 |
+
self.weight.dequantizer = DeQuantizer(config, pre_quant_layer.weight.dtype)
|
| 101 |
+
|
| 102 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 103 |
+
quantized_weight, quant_scale, quant_min = self.weight.deconcat(self.weight)
|
| 104 |
+
temp_dequantized_weight = self.weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale,
|
| 105 |
+
quant_min)
|
| 106 |
+
|
| 107 |
+
return F.embedding(input, temp_dequantized_weight, self.padding_idx, self.max_norm, self.norm_type,
|
| 108 |
+
self.scale_grad_by_freq, self.sparse)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
QUANTIZATION_LAYER_MAPPINGS = {
|
| 112 |
+
nn.Linear: QuantizedLinear,
|
| 113 |
+
nn.Embedding: QuantizedEmbedding,
|
| 114 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from typing import Dict
|
| 9 |
+
import gc
|
| 10 |
+
from deepspeed.inference.quantization import layers
|
| 11 |
+
from .layers import QUANTIZATION_LAYER_MAPPINGS
|
| 12 |
+
from .utils import get_AsyncPartitionedParameterSwapper, recursive_setattr
|
| 13 |
+
from deepspeed.utils.logging import logger
|
| 14 |
+
from collections import deque
|
| 15 |
+
from transformers.utils.generic import ContextManagers
|
| 16 |
+
from .quantization_context import QuantizationContext
|
| 17 |
+
import contextlib
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _init_group_wise_weight_quantization(model: nn.Module, ds_config: Dict) -> nn.Module:
|
| 21 |
+
"""[Experimental] Apply group-wise weight quantization to model. Replace layers module according to config_list
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
model (nn.Module): A nn.Module
|
| 25 |
+
ds_config (Dict, optional): The ds_config dictionary. use None for non-deepspeed managed model.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
nn.Module: Quantized nn.Module
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
# global quantized_weight_registry
|
| 32 |
+
|
| 33 |
+
matched_module_list_by_key = {}
|
| 34 |
+
matched_module_count = 0
|
| 35 |
+
|
| 36 |
+
assert 'weight_quantization' in ds_config, 'Please provide quantization config in ds_config'
|
| 37 |
+
quantization_config = ds_config['weight_quantization']['post_init_quant']
|
| 38 |
+
|
| 39 |
+
# Return nvme swapper if exists, else return None.
|
| 40 |
+
# For nvme offloading we must use the same swapper here as model initialized.
|
| 41 |
+
nvme_swapper = get_AsyncPartitionedParameterSwapper(model)
|
| 42 |
+
is_zero3_enabled = 'zero_optimization' in ds_config and \
|
| 43 |
+
'stage' in ds_config['zero_optimization'] and \
|
| 44 |
+
ds_config['zero_optimization']['stage'] == 3
|
| 45 |
+
is_offloading_enabled = 'zero_optimization' in ds_config and \
|
| 46 |
+
'offload_param' in ds_config['zero_optimization']
|
| 47 |
+
|
| 48 |
+
layers.is_zero3_enabled = is_zero3_enabled
|
| 49 |
+
|
| 50 |
+
context_mgr = ContextManagers([QuantizationContext(config_dict_or_path=ds_config, param_swapper=nvme_swapper)]) \
|
| 51 |
+
if is_zero3_enabled else contextlib.suppress()
|
| 52 |
+
with context_mgr:
|
| 53 |
+
module_list = list(
|
| 54 |
+
filter(lambda named_module: type(named_module[1]) in QUANTIZATION_LAYER_MAPPINGS, model.named_modules()))
|
| 55 |
+
|
| 56 |
+
# Quantize small weight first then large.
|
| 57 |
+
if not is_offloading_enabled:
|
| 58 |
+
module_list.sort(key=lambda named_module: named_module[1].weight.ds_tensor.numel()
|
| 59 |
+
if is_zero3_enabled else named_module[1].weight.numel())
|
| 60 |
+
module_list = deque(module_list)
|
| 61 |
+
|
| 62 |
+
while len(module_list) > 0:
|
| 63 |
+
# Use popleft to timely release module's memory of replaced module after each loop iteration
|
| 64 |
+
module_name, module = module_list.popleft()
|
| 65 |
+
|
| 66 |
+
matched_key = None
|
| 67 |
+
matched_quantization_config = None
|
| 68 |
+
|
| 69 |
+
for key, config in quantization_config.items():
|
| 70 |
+
if key in module_name:
|
| 71 |
+
assert matched_key is None, f'{module_name} matched multiple quantization key word {matched_key} and {key}'
|
| 72 |
+
matched_key = key
|
| 73 |
+
matched_quantization_config = config
|
| 74 |
+
|
| 75 |
+
if matched_key is None:
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
if is_zero3_enabled:
|
| 79 |
+
module.weight.all_gather()
|
| 80 |
+
|
| 81 |
+
assert module.weight.dtype == torch.float16, 'Model weight is expected in half.'
|
| 82 |
+
|
| 83 |
+
new_module = QUANTIZATION_LAYER_MAPPINGS[type(module)](matched_quantization_config, module)
|
| 84 |
+
|
| 85 |
+
if is_zero3_enabled:
|
| 86 |
+
module.weight.partition()
|
| 87 |
+
|
| 88 |
+
recursive_setattr(model, module_name, new_module)
|
| 89 |
+
|
| 90 |
+
if matched_key not in matched_module_list_by_key:
|
| 91 |
+
matched_module_list_by_key[matched_key] = []
|
| 92 |
+
matched_module_list_by_key[matched_key].append(module_name)
|
| 93 |
+
matched_module_count += 1
|
| 94 |
+
|
| 95 |
+
# Timely recycle memory to prevent OOM on large models
|
| 96 |
+
gc.collect()
|
| 97 |
+
|
| 98 |
+
# Clear registry after model construction.
|
| 99 |
+
layers.quantized_weight_registry.clear()
|
| 100 |
+
|
| 101 |
+
logger.info(
|
| 102 |
+
f'Group-wise weight quantization summary: convert {matched_module_count} node(s) to quantized implementation')
|
| 103 |
+
summary_str = '\n'
|
| 104 |
+
|
| 105 |
+
for key, module_list in matched_module_list_by_key.items():
|
| 106 |
+
summary_str += f'Key: {key}, matched modules:\n'
|
| 107 |
+
for module_name in module_list:
|
| 108 |
+
summary_str += f'\t{module_name}\n'
|
| 109 |
+
logger.info(summary_str)
|
| 110 |
+
|
| 111 |
+
return model
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization_context.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from deepspeed.runtime.zero import partition_parameters
|
| 7 |
+
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class QuantizationContext(partition_parameters.Init):
|
| 11 |
+
|
| 12 |
+
def __init__(self, config_dict_or_path, param_swapper: AsyncPartitionedParameterSwapper = None) -> None:
|
| 13 |
+
super().__init__(config_dict_or_path=config_dict_or_path, param_swapper=param_swapper)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/quantization/utils.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import deepspeed
|
| 8 |
+
from torch import Tensor
|
| 9 |
+
from typing import Tuple
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from typing import Dict, Callable, Union
|
| 12 |
+
from deepspeed.accelerator import get_accelerator
|
| 13 |
+
import functools
|
| 14 |
+
|
| 15 |
+
device = get_accelerator().device_name() if get_accelerator().is_available() else 'cpu'
|
| 16 |
+
|
| 17 |
+
quantizer_cuda_module = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_quantizer_cuda_module():
|
| 21 |
+
global quantizer_cuda_module
|
| 22 |
+
if quantizer_cuda_module is None:
|
| 23 |
+
quantizer_cuda_module = deepspeed.ops.op_builder.QuantizerBuilder().load()
|
| 24 |
+
return quantizer_cuda_module
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def tensor_clamp(tensor: Tensor, min, max) -> Tensor:
|
| 28 |
+
if tensor.device.type == 'cpu' and tensor.dtype == torch.float16:
|
| 29 |
+
# CPU does not support FP16 clamp
|
| 30 |
+
return tensor.to(dtype=torch.float32).clamp_(min, max).to(dtype=torch.float16)
|
| 31 |
+
else:
|
| 32 |
+
return tensor.clamp_(min, max)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def tensor_round(tensor: Tensor) -> Tensor:
|
| 36 |
+
if tensor.device.type == 'cpu' and tensor.dtype == torch.float16:
|
| 37 |
+
# CPU does not support FP16 round
|
| 38 |
+
return tensor.to(dtype=torch.float32).round_().to(dtype=torch.float16)
|
| 39 |
+
else:
|
| 40 |
+
return tensor.round_()
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class Quantizer:
|
| 44 |
+
|
| 45 |
+
def __init__(self, config: Dict) -> None:
|
| 46 |
+
self.config = config
|
| 47 |
+
assert self.config['num_bits'] == 4 or self.config[
|
| 48 |
+
'num_bits'] == 8, 'Only INT4 and INT8 quantization is supported.'
|
| 49 |
+
assert self.config['symmetric'] == False, 'Only asymmetric quantization is supported at this moment.'
|
| 50 |
+
|
| 51 |
+
def quantize(self, tensor: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
|
| 52 |
+
assert tensor.shape[self.config['group_dim']] % self.config['group_size'] == 0 \
|
| 53 |
+
, f'Tensor shape: {tensor.shape} quantization config {self.config}'
|
| 54 |
+
|
| 55 |
+
tensor = torch.clone(tensor)
|
| 56 |
+
|
| 57 |
+
shape = tensor.shape
|
| 58 |
+
num_groups = shape[self.config['group_dim']] // self.config['group_size']
|
| 59 |
+
new_shape = (shape[:self.config['group_dim']] + (num_groups, self.config['group_size']) +
|
| 60 |
+
shape[self.config['group_dim'] + 1:])
|
| 61 |
+
tensor = tensor.view(new_shape)
|
| 62 |
+
|
| 63 |
+
quantized_tensor, scale, min_value = self._quantize_int8(tensor)
|
| 64 |
+
quantized_tensor = quantized_tensor.view(shape)
|
| 65 |
+
|
| 66 |
+
if self.config['num_bits'] == 4:
|
| 67 |
+
return self._compress_uint8_to_uint4(quantized_tensor), scale, min_value
|
| 68 |
+
if self.config['num_bits'] == 8:
|
| 69 |
+
return quantized_tensor, scale, min_value
|
| 70 |
+
|
| 71 |
+
assert False, 'Unsupported quantization bits {}'.format(self.config['num_bits'])
|
| 72 |
+
|
| 73 |
+
def _quantize_int8(self, tensor: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
|
| 74 |
+
q_range = 2**self.config['num_bits'] - 1
|
| 75 |
+
min_value = tensor.amin(dim=self.config['group_dim'] + 1, keepdim=True)
|
| 76 |
+
max_value = tensor.amax(dim=self.config['group_dim'] + 1, keepdim=True)
|
| 77 |
+
|
| 78 |
+
scale = q_range / (max_value - min_value)
|
| 79 |
+
|
| 80 |
+
tensor = tensor.sub_(min_value).mul_(scale)
|
| 81 |
+
tensor = tensor_round(tensor_clamp(tensor, 0, q_range)).to(torch.uint8)
|
| 82 |
+
return tensor, scale, min_value
|
| 83 |
+
|
| 84 |
+
def _compress_uint8_to_uint4(self, tensor: Tensor) -> Tensor:
|
| 85 |
+
assert tensor.shape[-1] % 2 == 0
|
| 86 |
+
|
| 87 |
+
new_data_shape = list(tensor.shape)
|
| 88 |
+
new_data_shape[-1] = new_data_shape[-1] // 2
|
| 89 |
+
|
| 90 |
+
data = torch.empty(new_data_shape, dtype=torch.uint8, device=tensor.device)
|
| 91 |
+
data = torch.bitwise_or(tensor[..., 0::2].bitwise_left_shift(4), tensor[..., 1::2])
|
| 92 |
+
|
| 93 |
+
return data
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class DeQuantizer:
|
| 97 |
+
|
| 98 |
+
def __init__(self, config: Dict, dtype: torch.dtype) -> None:
|
| 99 |
+
self.config = config
|
| 100 |
+
self.dtype = dtype
|
| 101 |
+
assert self.config['num_bits'] == 4 or self.config[
|
| 102 |
+
'num_bits'] == 8, 'Only INT4 and INT8 quantization is supported.'
|
| 103 |
+
assert self.config['symmetric'] == False, 'Only asymmetric quantization is supported at this moment.'
|
| 104 |
+
|
| 105 |
+
def dequantize(self, tensor: Tensor, quant_scale: Tensor, quant_min: Tensor) -> Tensor:
|
| 106 |
+
# Use customized CUDA quantization kernel if possible.
|
| 107 |
+
if self.config['group_size'] % 8 == 0 and \
|
| 108 |
+
(self.config['num_bits'] == 4 or self.config['num_bits'] == 8) and \
|
| 109 |
+
self.config['group_dim'] == len(tensor.shape) - 1 and \
|
| 110 |
+
self.dtype == torch.float16 and device == 'cuda':
|
| 111 |
+
|
| 112 |
+
last_dimension_size = self.config['group_size']
|
| 113 |
+
if self.config['num_bits'] == 4:
|
| 114 |
+
last_dimension_size = last_dimension_size // 2
|
| 115 |
+
quantized_tensor = get_quantizer_cuda_module().dequantize_int4_to_half_experimental(
|
| 116 |
+
tensor.reshape(-1, last_dimension_size), quant_scale, quant_min,
|
| 117 |
+
tensor.numel() // last_dimension_size, self.config['group_size'])
|
| 118 |
+
shape = list(tensor.shape)
|
| 119 |
+
shape[-1] = shape[-1] * 2
|
| 120 |
+
elif self.config['num_bits'] == 8:
|
| 121 |
+
# last_dimension_size = last_dimension_size // 2
|
| 122 |
+
quantized_tensor = get_quantizer_cuda_module().dequantize_int8_to_half_experimental(
|
| 123 |
+
tensor.reshape(-1, last_dimension_size), quant_scale, quant_min,
|
| 124 |
+
tensor.numel() // last_dimension_size, self.config['group_size'])
|
| 125 |
+
shape = list(tensor.shape)
|
| 126 |
+
|
| 127 |
+
return quantized_tensor.reshape(shape)
|
| 128 |
+
|
| 129 |
+
if self.config['num_bits'] == 4:
|
| 130 |
+
tensor = self._decompress_uint4_to_uint8(tensor)
|
| 131 |
+
elif self.config['num_bits'] != 8:
|
| 132 |
+
assert False, 'Unsupported quantization bits {}'.format(self.config['num_bits'])
|
| 133 |
+
|
| 134 |
+
shape = tensor.shape
|
| 135 |
+
num_groups = shape[self.config['group_dim']] // self.config['group_size']
|
| 136 |
+
new_shape = (shape[:self.config['group_dim']] + (num_groups, self.config['group_size']) +
|
| 137 |
+
shape[self.config['group_dim'] + 1:])
|
| 138 |
+
tensor = tensor.view(new_shape)
|
| 139 |
+
|
| 140 |
+
dequantized_tensor = self._dequantize_int8(tensor, quant_scale, quant_min).view(shape)
|
| 141 |
+
return dequantized_tensor
|
| 142 |
+
|
| 143 |
+
def _dequantize_int8(self, tensor: Tensor, quant_scale: Tensor, quant_min: Tensor) -> Tensor:
|
| 144 |
+
assert tensor.dtype == torch.uint8
|
| 145 |
+
data = torch.zeros_like(tensor, dtype=self.dtype, device=tensor.device)
|
| 146 |
+
data = data.copy_(tensor)
|
| 147 |
+
data = data.div_(quant_scale).add_(quant_min)
|
| 148 |
+
|
| 149 |
+
return data
|
| 150 |
+
|
| 151 |
+
def _decompress_uint4_to_uint8(self, tensor: Tensor) -> Tensor:
|
| 152 |
+
new_data_shape = list(tensor.shape)
|
| 153 |
+
new_data_shape[-1] = new_data_shape[-1] * 2
|
| 154 |
+
data = torch.empty(new_data_shape, dtype=torch.uint8, device=tensor.device)
|
| 155 |
+
data[..., 0::2] = tensor.bitwise_right_shift(4)
|
| 156 |
+
data[..., 1::2] = tensor.bitwise_and(0xF)
|
| 157 |
+
|
| 158 |
+
return data
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def get_AsyncPartitionedParameterSwapper(model: nn.Module):
|
| 162 |
+
for param_name, param in model.named_parameters():
|
| 163 |
+
if hasattr(param, 'nvme_swapper') and param.nvme_swapper is not None:
|
| 164 |
+
return param.nvme_swapper
|
| 165 |
+
return None
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def recursive_setattr(model, module_name, module):
|
| 169 |
+
"""
|
| 170 |
+
Recursively set the attribute of a module.
|
| 171 |
+
Args:
|
| 172 |
+
model (`torch.nn.Module`)
|
| 173 |
+
The model to set the attribute in.
|
| 174 |
+
module_name (`str`)
|
| 175 |
+
The name of the module to set the attribute in.
|
| 176 |
+
module (`torch.nn.Module`)
|
| 177 |
+
The module to set the attribute to.
|
| 178 |
+
"""
|
| 179 |
+
split_list = module_name.split('.')
|
| 180 |
+
output = model
|
| 181 |
+
for name in split_list[:-1]:
|
| 182 |
+
output = getattr(output, name)
|
| 183 |
+
output.__setattr__(split_list[-1], module)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def concat_to_compat_param(quantized_weight: Tensor,
|
| 187 |
+
quant_scale: Tensor,
|
| 188 |
+
quant_min: Tensor,
|
| 189 |
+
return_param: bool = True) -> Union[nn.Parameter, Tensor]:
|
| 190 |
+
shape_wieght = quantized_weight.shape
|
| 191 |
+
shape_scale = quant_scale.shape
|
| 192 |
+
shape_min = quant_min.shape
|
| 193 |
+
|
| 194 |
+
quantized_weight = torch.flatten(quantized_weight)
|
| 195 |
+
quant_scale = torch.flatten(quant_scale)
|
| 196 |
+
quant_min = torch.flatten(quant_min)
|
| 197 |
+
|
| 198 |
+
def deconcat_individual_tensors(shape_wieght: torch.Size, shape_scale: torch.Size,
|
| 199 |
+
shape_min: torch.Size) -> Callable:
|
| 200 |
+
|
| 201 |
+
def fn(compat_tensor: nn.Parameter) -> Tuple[Tensor, Tensor, Tensor]:
|
| 202 |
+
weight = torch.narrow(compat_tensor, 0, 0, shape_wieght.numel()).view(shape_wieght)
|
| 203 |
+
scale = torch.narrow(compat_tensor, 0, shape_wieght.numel(), shape_scale.numel()).view(shape_scale)
|
| 204 |
+
min_val = torch.narrow(compat_tensor, 0,
|
| 205 |
+
shape_wieght.numel() + shape_scale.numel(), shape_min.numel()).view(shape_min)
|
| 206 |
+
|
| 207 |
+
return weight, scale, min_val
|
| 208 |
+
|
| 209 |
+
return fn
|
| 210 |
+
|
| 211 |
+
compat_tensor = torch.concat([quantized_weight, quant_scale, quant_min])
|
| 212 |
+
if return_param:
|
| 213 |
+
compat_tensor = nn.Parameter(compat_tensor, requires_grad=False)
|
| 214 |
+
compat_tensor.deconcat = deconcat_individual_tensors(shape_wieght, shape_scale, shape_min)
|
| 215 |
+
|
| 216 |
+
return compat_tensor
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _quantize_param(param: nn.Parameter, quant_config: Dict):
|
| 220 |
+
assert not hasattr(param, 'weight_quantized'), 'Parameter has already been quantized.'
|
| 221 |
+
quantizer = Quantizer(quant_config)
|
| 222 |
+
dequantizer = DeQuantizer(quant_config, param.dtype)
|
| 223 |
+
|
| 224 |
+
quantized_weight, quant_scale, quant_min = quantizer.quantize(param.data)
|
| 225 |
+
|
| 226 |
+
quantized_weight = quantized_weight.view(param.dtype)
|
| 227 |
+
quant_scale = quant_scale.view(param.dtype)
|
| 228 |
+
quant_min = quant_min.view(param.dtype)
|
| 229 |
+
|
| 230 |
+
quantized_compat_tensor = concat_to_compat_param(quantized_weight, quant_scale, quant_min)
|
| 231 |
+
param.data = quantized_compat_tensor
|
| 232 |
+
param.deconcat = quantized_compat_tensor.deconcat
|
| 233 |
+
|
| 234 |
+
param.quantizer = quantizer
|
| 235 |
+
param.dequantizer = dequantizer
|
| 236 |
+
setattr(param, 'weight_quantized', True)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def wrap_quantized_functional(f):
|
| 240 |
+
|
| 241 |
+
@functools.wraps(f)
|
| 242 |
+
def wrapper(input: Tensor, weight: nn.Parameter, *args, **kwargs) -> Tensor:
|
| 243 |
+
if hasattr(weight, 'weight_quantized') and getattr(weight, 'weight_quantized'):
|
| 244 |
+
quantized_weight, quant_scale, quant_min = weight.deconcat(weight)
|
| 245 |
+
temp_dequantized_weight = weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale,
|
| 246 |
+
quant_min)
|
| 247 |
+
return f(input, temp_dequantized_weight, *args, **kwargs)
|
| 248 |
+
else:
|
| 249 |
+
return f(input, weight, *args, **kwargs)
|
| 250 |
+
|
| 251 |
+
return wrapper
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def wrap_load_from_state_dict(f):
|
| 255 |
+
|
| 256 |
+
@functools.wraps(f)
|
| 257 |
+
def wrapper(model, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
|
| 258 |
+
replaced_old_value = None
|
| 259 |
+
key = None
|
| 260 |
+
# We may have nested wrappers if we launch multiple initialization context.
|
| 261 |
+
# Use state_dict_quantized flag to quantize state_dict only once
|
| 262 |
+
if hasattr(model.weight, 'weight_quantized') and getattr(
|
| 263 |
+
model.weight, 'weight_quantized') and not hasattr(model.weight, 'state_dict_quantized'):
|
| 264 |
+
setattr(model.weight, 'state_dict_quantized', True)
|
| 265 |
+
key = prefix + 'weight'
|
| 266 |
+
if key in state_dict:
|
| 267 |
+
quantized_weight, quant_scale, quant_min = model.weight.quantizer.quantize(state_dict[key])
|
| 268 |
+
quantized_weight = quantized_weight.view(model.weight.dtype)
|
| 269 |
+
quant_scale = quant_scale.view(model.weight.dtype)
|
| 270 |
+
quant_min = quant_min.view(model.weight.dtype)
|
| 271 |
+
|
| 272 |
+
replaced_old_value = state_dict[key]
|
| 273 |
+
|
| 274 |
+
state_dict[key] = concat_to_compat_param(quantized_weight, quant_scale, quant_min)
|
| 275 |
+
|
| 276 |
+
f(model, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
|
| 277 |
+
|
| 278 |
+
if replaced_old_value is not None:
|
| 279 |
+
state_dict[key] = replaced_old_value
|
| 280 |
+
delattr(model.weight, 'state_dict_quantized')
|
| 281 |
+
|
| 282 |
+
return wrapper
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
WEIGHT_QUANTIZATION_LAYERS = (
|
| 286 |
+
nn.Linear,
|
| 287 |
+
nn.Embedding,
|
| 288 |
+
)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
from .config_v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig
|
| 6 |
+
from .engine_v2 import InferenceEngineV2
|
| 7 |
+
from .engine_factory import build_hf_engine, build_engine_from_ds_checkpoint
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/allocator.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from functools import reduce
|
| 7 |
+
from typing import Iterable
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from deepspeed.accelerator import get_accelerator
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Allocator:
|
| 15 |
+
cache = defaultdict(dict)
|
| 16 |
+
|
| 17 |
+
def empty_from(tensor: torch.Tensor, shape: Iterable[int]) -> torch.Tensor:
|
| 18 |
+
try:
|
| 19 |
+
return Allocator.cache[tensor][shape]
|
| 20 |
+
except KeyError:
|
| 21 |
+
shape_size = reduce(lambda x, y: x * y, shape)
|
| 22 |
+
if shape_size == 0:
|
| 23 |
+
raise ValueError("Cannot create empty tensor with size 0")
|
| 24 |
+
Allocator.cache[tensor][shape] = tensor.flatten()[:shape_size].view(shape)
|
| 25 |
+
return Allocator.cache[tensor][shape]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
empty_from = Allocator.empty_from
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def on_device(method) -> torch.Tensor:
|
| 32 |
+
"""
|
| 33 |
+
Wraps a method to ensure the returned tensor is on the current device.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def wrapped(self, *args, **kwargs):
|
| 37 |
+
tensor = method(self, *args, **kwargs)
|
| 38 |
+
if isinstance(tensor, torch.Tensor):
|
| 39 |
+
return tensor.to(get_accelerator().current_device())
|
| 40 |
+
return tensor
|
| 41 |
+
|
| 42 |
+
return wrapped
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_factory.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import pickle
|
| 10 |
+
from packaging import version
|
| 11 |
+
|
| 12 |
+
from .engine_v2 import InferenceEngineV2
|
| 13 |
+
from .config_v2 import RaggedInferenceEngineConfig
|
| 14 |
+
from .checkpoint import HuggingFaceCheckpointEngine
|
| 15 |
+
from .logging import inference_logger
|
| 16 |
+
from .model_implementations import (
|
| 17 |
+
OPTPolicy,
|
| 18 |
+
Llama2Policy,
|
| 19 |
+
MistralPolicy,
|
| 20 |
+
MixtralPolicy,
|
| 21 |
+
FalconPolicy,
|
| 22 |
+
PhiPolicy,
|
| 23 |
+
QwenPolicy,
|
| 24 |
+
Qwen2Policy,
|
| 25 |
+
)
|
| 26 |
+
from .model_implementations.inference_policy_base import POLICIES, InferenceV2Policy
|
| 27 |
+
from .model_implementations.flat_model_helpers import make_metadata_filename, ModelMetadata
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def build_engine_from_ds_checkpoint(path: str,
|
| 31 |
+
engine_config: RaggedInferenceEngineConfig,
|
| 32 |
+
debug_level: int = logging.INFO) -> InferenceEngineV2:
|
| 33 |
+
"""
|
| 34 |
+
Creates an engine from a checkpoint saved by ``InferenceEngineV2``.
|
| 35 |
+
|
| 36 |
+
Arguments:
|
| 37 |
+
path: Path to the checkpoint. This does not need to point to any files in particular,
|
| 38 |
+
just the directory containing the checkpoint.
|
| 39 |
+
engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details.
|
| 40 |
+
debug_level: Logging level to use. Unless you are actively seeing issues, the recommended
|
| 41 |
+
value is ``logging.INFO``.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
Fully initialized inference engine ready to serve queries.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
inference_logger(level=debug_level)
|
| 48 |
+
# Load metadata, for grabbing the policy name we'll have all ranks just check for
|
| 49 |
+
# rank 0.
|
| 50 |
+
metadata_filename = make_metadata_filename(path, 0, engine_config.tensor_parallel.tp_size)
|
| 51 |
+
metadata = json.load(open(metadata_filename, "r"))
|
| 52 |
+
metadata = ModelMetadata.parse_raw(metadata)
|
| 53 |
+
|
| 54 |
+
# Get the policy
|
| 55 |
+
try:
|
| 56 |
+
policy_cls: InferenceV2Policy = POLICIES[metadata.policy]
|
| 57 |
+
except KeyError:
|
| 58 |
+
raise ValueError(f"Unknown policy {metadata.policy} for model {path}")
|
| 59 |
+
|
| 60 |
+
# Load the model config
|
| 61 |
+
model_config = pickle.load(open(os.path.join(path, "ds_model_config.pkl"), "rb"))
|
| 62 |
+
policy = policy_cls(model_config, inf_checkpoint_path=path)
|
| 63 |
+
|
| 64 |
+
return InferenceEngineV2(policy, engine_config)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def build_hf_engine(path: str,
|
| 68 |
+
engine_config: RaggedInferenceEngineConfig,
|
| 69 |
+
debug_level: int = logging.INFO) -> InferenceEngineV2:
|
| 70 |
+
"""
|
| 71 |
+
Build an InferenceV2 engine for HuggingFace models. This can accept both a HuggingFace
|
| 72 |
+
model name or a path to an Inference-V2 checkpoint.
|
| 73 |
+
|
| 74 |
+
Arguments:
|
| 75 |
+
path: Path to the checkpoint. This does not need to point to any files in particular,
|
| 76 |
+
just the directory containing the checkpoint.
|
| 77 |
+
engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details.
|
| 78 |
+
debug_level: Logging level to use. Unless you are actively seeing issues, the recommended
|
| 79 |
+
value is ``logging.INFO``.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Fully initialized inference engine ready to serve queries.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
if os.path.exists(os.path.join(path, "ds_model_config.pkl")):
|
| 86 |
+
return build_engine_from_ds_checkpoint(path, engine_config, debug_level=debug_level)
|
| 87 |
+
else:
|
| 88 |
+
# Set up logging
|
| 89 |
+
inference_logger(level=debug_level)
|
| 90 |
+
# get HF checkpoint engine
|
| 91 |
+
checkpoint_engine = HuggingFaceCheckpointEngine(path)
|
| 92 |
+
|
| 93 |
+
# get model config from HF AutoConfig
|
| 94 |
+
model_config = checkpoint_engine.model_config
|
| 95 |
+
|
| 96 |
+
# get the policy
|
| 97 |
+
# TODO: generalize this to other models
|
| 98 |
+
if model_config.model_type == "opt":
|
| 99 |
+
if not model_config.do_layer_norm_before:
|
| 100 |
+
raise ValueError(
|
| 101 |
+
"Detected OPT-350m model. This model is not currently supported. If this is not the 350m model, please open an issue: https://github.com/microsoft/DeepSpeed-MII/issues"
|
| 102 |
+
)
|
| 103 |
+
policy = OPTPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 104 |
+
elif model_config.model_type == "llama":
|
| 105 |
+
policy = Llama2Policy(model_config, checkpoint_engine=checkpoint_engine)
|
| 106 |
+
elif model_config.model_type == "mistral":
|
| 107 |
+
# Ensure we're using the correct version of transformers for mistral
|
| 108 |
+
import transformers
|
| 109 |
+
assert version.parse(transformers.__version__) >= version.parse("4.34.0"), \
|
| 110 |
+
f"Mistral requires transformers >= 4.34.0, you have version {transformers.__version__}"
|
| 111 |
+
policy = MistralPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 112 |
+
elif model_config.model_type == "mixtral":
|
| 113 |
+
# Ensure we're using the correct version of transformers for mistral
|
| 114 |
+
import transformers
|
| 115 |
+
assert version.parse(transformers.__version__) >= version.parse("4.36.1"), \
|
| 116 |
+
f"Mistral requires transformers >= 4.36.1, you have version {transformers.__version__}"
|
| 117 |
+
policy = MixtralPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 118 |
+
elif model_config.model_type == "falcon":
|
| 119 |
+
policy = FalconPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 120 |
+
elif model_config.model_type == "phi":
|
| 121 |
+
policy = PhiPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 122 |
+
elif model_config.model_type == "qwen":
|
| 123 |
+
policy = QwenPolicy(model_config, checkpoint_engine=checkpoint_engine)
|
| 124 |
+
elif model_config.model_type == "qwen2":
|
| 125 |
+
policy = Qwen2Policy(model_config, checkpoint_engine=checkpoint_engine)
|
| 126 |
+
else:
|
| 127 |
+
raise ValueError(f"Unsupported model type {model_config.model_type}")
|
| 128 |
+
|
| 129 |
+
return InferenceEngineV2(policy, engine_config)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/inference_parameter.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Dict
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
CORE_PARAM = "_ds_core_param_key"
|
| 11 |
+
|
| 12 |
+
STR_TO_DTYPE = {
|
| 13 |
+
"torch.float32": torch.float32,
|
| 14 |
+
"torch.float64": torch.float64,
|
| 15 |
+
"torch.float16": torch.float16,
|
| 16 |
+
"torch.bfloat16": torch.bfloat16,
|
| 17 |
+
"torch.int64": torch.int64,
|
| 18 |
+
"torch.int32": torch.int32,
|
| 19 |
+
"torch.int16": torch.int16,
|
| 20 |
+
"torch.int8": torch.int8,
|
| 21 |
+
"torch.uint8": torch.uint8,
|
| 22 |
+
"torch.bool": torch.bool,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class InferenceParameter(torch.Tensor):
|
| 27 |
+
"""
|
| 28 |
+
An extension of the torch.Tensor class to support our inference focused features. One important
|
| 29 |
+
thing to note here is that an InferenceParam can be used a torch.Tensor, but outputs of
|
| 30 |
+
torch.Tensor operations will not be InferenceParams.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def __new__(cls, tensor, *args, **kwargs):
|
| 35 |
+
new_tensor = super().__new__(cls, tensor, *args, **kwargs)
|
| 36 |
+
if hasattr(tensor, "_aux_attrs"):
|
| 37 |
+
setattr(new_tensor, "_aux_attrs", tensor.aux_attrs)
|
| 38 |
+
return new_tensor
|
| 39 |
+
|
| 40 |
+
def to(self, *args, **kwargs):
|
| 41 |
+
new_tensor = super().to(*args, **kwargs)
|
| 42 |
+
if hasattr(self, "_aux_attrs"):
|
| 43 |
+
setattr(new_tensor, "_aux_attrs", self.aux_attrs)
|
| 44 |
+
try:
|
| 45 |
+
_ = torch.device(args[0])
|
| 46 |
+
for name, attr in new_tensor.aux_attrs.items():
|
| 47 |
+
new_attr = attr.to(*args, **kwargs)
|
| 48 |
+
setattr(new_tensor, name, new_attr)
|
| 49 |
+
new_tensor.aux_attrs[name] = new_attr
|
| 50 |
+
except:
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
return new_tensor
|
| 54 |
+
|
| 55 |
+
@classmethod
|
| 56 |
+
def initialize(cls, core_param: torch.Tensor, **kwargs) -> 'InferenceParameter':
|
| 57 |
+
"""
|
| 58 |
+
Create the inference parameter.
|
| 59 |
+
"""
|
| 60 |
+
param = InferenceParameter(core_param)
|
| 61 |
+
setattr(param, "_aux_attrs", kwargs)
|
| 62 |
+
|
| 63 |
+
for attr_name, attr in kwargs.items():
|
| 64 |
+
if hasattr(param, attr_name):
|
| 65 |
+
raise ValueError(f"Attribute {attr_name} already exists on param.")
|
| 66 |
+
|
| 67 |
+
if not isinstance(attr, torch.Tensor):
|
| 68 |
+
raise ValueError(f"Attribute {attr_name} must be a tensor.")
|
| 69 |
+
|
| 70 |
+
setattr(param, attr_name, attr)
|
| 71 |
+
|
| 72 |
+
return param
|
| 73 |
+
|
| 74 |
+
@classmethod
|
| 75 |
+
def initialize_raw(self, **kwargs) -> 'InferenceParameter':
|
| 76 |
+
"""
|
| 77 |
+
All kwargs must be torch.Tensors and must include the core parameter.
|
| 78 |
+
"""
|
| 79 |
+
if CORE_PARAM not in kwargs:
|
| 80 |
+
raise ValueError(f"Must provide core parameter, with key {CORE_PARAM}.")
|
| 81 |
+
|
| 82 |
+
return InferenceParameter.initialize(kwargs[CORE_PARAM], **kwargs)
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def aux_attrs(self) -> Dict[str, torch.Tensor]:
|
| 86 |
+
"""
|
| 87 |
+
Dictionary of auxiliary attributes.
|
| 88 |
+
"""
|
| 89 |
+
return self._aux_attrs
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/inference_utils.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Dict
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from enum import Enum, IntEnum
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class NormTypeEnum(Enum):
|
| 14 |
+
LayerNorm: str = "layer_norm"
|
| 15 |
+
RMSNorm: str = "rms_norm"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class DtypeEnum(Enum):
|
| 19 |
+
# The torch dtype must always be the first value (so we return torch.dtype)
|
| 20 |
+
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
|
| 21 |
+
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
|
| 22 |
+
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat"
|
| 23 |
+
int8 = torch.int8, "torch.int8", "int8"
|
| 24 |
+
|
| 25 |
+
# Copied from https://stackoverflow.com/a/43210118
|
| 26 |
+
# Allows us to use multiple values for each Enum index and returns first
|
| 27 |
+
# listed value when Enum is called
|
| 28 |
+
def __new__(cls, *values):
|
| 29 |
+
obj = object.__new__(cls)
|
| 30 |
+
# first value is canonical value
|
| 31 |
+
obj._value_ = values[0]
|
| 32 |
+
for other_value in values[1:]:
|
| 33 |
+
cls._value2member_map_[other_value] = obj
|
| 34 |
+
obj._all_values = values
|
| 35 |
+
return obj
|
| 36 |
+
|
| 37 |
+
def __repr__(self):
|
| 38 |
+
return "<%s.%s: %s>" % (
|
| 39 |
+
self.__class__.__name__,
|
| 40 |
+
self._name_,
|
| 41 |
+
", ".join([repr(v) for v in self._all_values]),
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
ELEM_SIZES: Dict[torch.dtype, int] = {
|
| 46 |
+
torch.float16: 2,
|
| 47 |
+
torch.bfloat16: 2,
|
| 48 |
+
torch.float32: 4,
|
| 49 |
+
torch.float64: 8,
|
| 50 |
+
torch.int8: 1,
|
| 51 |
+
torch.uint8: 1,
|
| 52 |
+
torch.int16: 2,
|
| 53 |
+
torch.int32: 4,
|
| 54 |
+
torch.int64: 8,
|
| 55 |
+
torch.bool: 1,
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class ActivationType(IntEnum):
|
| 60 |
+
"""
|
| 61 |
+
Types of activations supported by DS-Inference
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
GELU = 0
|
| 65 |
+
|
| 66 |
+
RELU = 1
|
| 67 |
+
|
| 68 |
+
SILU = 2
|
| 69 |
+
|
| 70 |
+
GEGLU = 3
|
| 71 |
+
|
| 72 |
+
ReGLU = 4
|
| 73 |
+
|
| 74 |
+
SiGLU = 5
|
| 75 |
+
|
| 76 |
+
IDENTITY = 6
|
| 77 |
+
|
| 78 |
+
InvalidType = -1
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def is_gated(act_fn: ActivationType) -> bool:
|
| 82 |
+
"""
|
| 83 |
+
Return True if the given activation function is gated.
|
| 84 |
+
"""
|
| 85 |
+
if not isinstance(act_fn, ActivationType):
|
| 86 |
+
act_fn = ActivationType(act_fn)
|
| 87 |
+
|
| 88 |
+
return act_fn in [ActivationType.GEGLU, ActivationType.ReGLU, ActivationType.SiGLU]
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def elem_size(dtype: torch.dtype) -> int:
|
| 92 |
+
"""
|
| 93 |
+
Return size in bytes of the given dtype.
|
| 94 |
+
"""
|
| 95 |
+
try:
|
| 96 |
+
return ELEM_SIZES[dtype]
|
| 97 |
+
except KeyError:
|
| 98 |
+
raise ValueError("Unknown dtype size for {}".format(dtype))
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def ceil_div(a: int, b: int) -> int:
|
| 102 |
+
"""
|
| 103 |
+
Return ceil(a / b).
|
| 104 |
+
"""
|
| 105 |
+
return -(-a // b)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc
ADDED
|
Binary file (8.44 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc
ADDED
|
Binary file (8.94 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .attn_output_parameters import *
|
| 7 |
+
from .embedding_parameters import *
|
| 8 |
+
from .mlp_parameters import *
|
| 9 |
+
from .moe_parameters import *
|
| 10 |
+
from .norm_parameters import *
|
| 11 |
+
from .qkv_parameters import *
|
| 12 |
+
from .unembed_parameters import *
|
| 13 |
+
from .invfreq_parameters import *
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (450 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc
ADDED
|
Binary file (1.03 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc
ADDED
|
Binary file (896 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc
ADDED
|
Binary file (747 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc
ADDED
|
Binary file (2.55 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc
ADDED
|
Binary file (785 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc
ADDED
|
Binary file (4.21 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc
ADDED
|
Binary file (935 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Common Attention Output Parameter Patterns
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class AttentionOutputParameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
Attention output parameter container.
|
| 17 |
+
|
| 18 |
+
Note: The differentiation for something like GQA for this matrix is primarily
|
| 19 |
+
encompassed in the sharding logic, which is currently expected to be performed by
|
| 20 |
+
the model implementation.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
params: torch.Tensor
|
| 24 |
+
"""
|
| 25 |
+
Unsharded attention output parameter of shape [model_dim, model_dim]
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def finalize(self) -> torch.Tensor:
|
| 29 |
+
return self.inference_model.transform_attn_out_param(self.params)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Embedding containers.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class EmbeddingParameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
Embedding container. This should be safe to use for all types of embeddings (i.e. word, position,
|
| 17 |
+
and token type).
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
params: torch.Tensor
|
| 21 |
+
"""
|
| 22 |
+
Vocabulary parameter of shape [vocab_size, model_dim].
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def finalize(self) -> torch.Tensor:
|
| 26 |
+
return self.inference_model.transform_embedding_param(self.params)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Common InvFreq Parameter Patterns
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class InvFreqParameter(ParameterBase):
|
| 15 |
+
|
| 16 |
+
params: torch.Tensor
|
| 17 |
+
|
| 18 |
+
def finalize(self) -> torch.Tensor:
|
| 19 |
+
return self.params.to(self.inference_model.activation_dtype.value)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
MLP Parameter Containers
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MLP1Parameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
First MLP projection weight container. This performs a straight pass-through to the
|
| 17 |
+
model implementation for transformation.
|
| 18 |
+
"""
|
| 19 |
+
params: torch.Tensor
|
| 20 |
+
|
| 21 |
+
def finalize(self) -> torch.Tensor:
|
| 22 |
+
# NOTE(cmikeh2): If we are gated but not in the format specified below, we should trigger a permutation here.
|
| 23 |
+
# I am not currently aware of any models that use this format (or how we should even detect it; probably should
|
| 24 |
+
# just be a different param entirely, but until then we'll just assume the format is correct).
|
| 25 |
+
return self.inference_model.transform_mlp_1_param(self.params)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class GatedMLPParameter(ParameterBase):
|
| 29 |
+
"""
|
| 30 |
+
Gated MLP projection container.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
gate_params: torch.Tensor
|
| 34 |
+
"""
|
| 35 |
+
Weight parameter for the gating matrix.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
up_params: torch.Tensor
|
| 39 |
+
"""
|
| 40 |
+
For lack of a better name, the non-gating weight parameters.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def finalize(self) -> torch.Tensor:
|
| 44 |
+
"""
|
| 45 |
+
Our gated format (this is different from InferenceV1!) is to have the gate and activated neurons
|
| 46 |
+
interleaved. So if we have 4 output neurons (two effective neurons) with 4 input neurons, the finalized
|
| 47 |
+
parameter will look like:
|
| 48 |
+
[g0_0, g0_1, g0_2, g0_3]
|
| 49 |
+
[a0_0, a0_1, a0_2, a0_3]
|
| 50 |
+
[g1_0, g1_1, g1_2, g1_3]
|
| 51 |
+
[a1_0, a1_1, a1_2, a1_3]
|
| 52 |
+
|
| 53 |
+
As a reference, in inference v1, the format is:
|
| 54 |
+
[g0_0, g0_1, g0_2, g0_3]
|
| 55 |
+
[g1_0, g1_1, g1_2, g1_3]
|
| 56 |
+
[a0_0, a0_1, a0_2, a0_3]
|
| 57 |
+
[a1_0, a1_1, a1_2, a1_3]
|
| 58 |
+
"""
|
| 59 |
+
assert self.gate_params.shape[0] == self.up_params.shape[
|
| 60 |
+
0], "Gated MLP parameters must have the same number of neurons."
|
| 61 |
+
total_neurons = self.gate_params.shape[0] + self.up_params.shape[0]
|
| 62 |
+
|
| 63 |
+
# flip the order if even with the correct tokenizer we get wrong output
|
| 64 |
+
#fused_param = torch.cat([self.up_params, self.gate_params], dim=-1).reshape(total_neurons, -1)
|
| 65 |
+
fused_param = torch.cat([self.gate_params, self.up_params], dim=-1).reshape(total_neurons, -1)
|
| 66 |
+
return self.inference_model.transform_mlp_1_param(fused_param)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class MLP2Parameter(ParameterBase):
|
| 70 |
+
"""
|
| 71 |
+
Second MLP projection weight container. This performs a straight pass-through to the
|
| 72 |
+
model implementation for transformation.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
params: torch.Tensor
|
| 76 |
+
"""
|
| 77 |
+
Full weight parameter.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
def finalize(self) -> torch.Tensor:
|
| 81 |
+
return self.inference_model.transform_mlp_2_param(self.params)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase, ParamList
|
| 9 |
+
"""
|
| 10 |
+
Moe Parameters
|
| 11 |
+
|
| 12 |
+
These parameters are compatible with any model inheriting from ``DSMoETransformerModelBase``.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class MoEGatingWeightParameter(ParameterBase):
|
| 17 |
+
"""
|
| 18 |
+
Gating weight matrix.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
params: torch.Tensor
|
| 22 |
+
"""
|
| 23 |
+
Projection matrix from the input activations to the gate logits.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def finalize(self) -> torch.Tensor:
|
| 27 |
+
return self.inference_model.transform_moe_gate_param(self.params)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class UnfusedMoEMLP1Parameter(ParameterBase):
|
| 31 |
+
"""
|
| 32 |
+
This container should be used when the experts are held in separate parameters
|
| 33 |
+
and need to be joined into a single group.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
experts: ParamList("n_experts") # noqa: F821
|
| 37 |
+
|
| 38 |
+
def finalize(self) -> torch.Tensor:
|
| 39 |
+
stacked_experts = torch.stack([p for p in self.experts], dim=0)
|
| 40 |
+
return self.inference_model.transform_moe_mlp_1_param(stacked_experts)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class UnfusedMoEMLP2Parameter(ParameterBase):
|
| 44 |
+
"""
|
| 45 |
+
This container should be used when the experts are held in separate parameters
|
| 46 |
+
and need to be joined into a single group.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
experts: ParamList("n_experts") # noqa: F821
|
| 50 |
+
|
| 51 |
+
def finalize(self) -> torch.Tensor:
|
| 52 |
+
stacked_experts = torch.stack([p for p in self.experts], dim=0)
|
| 53 |
+
return self.inference_model.transform_moe_mlp_2_param(stacked_experts)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class UnfusedMoEGatedMLPParameter(ParameterBase):
|
| 57 |
+
"""
|
| 58 |
+
MoE Parameter for a gated activation function in which the gating matrix is not
|
| 59 |
+
fused in the same parameter as the non-gating matrix.
|
| 60 |
+
|
| 61 |
+
This is a stacked version of the ``GatedMLPParameter``. Please see that class for more
|
| 62 |
+
documentation on the layout of the parameters.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
gating_experts: ParamList("n_experts") # noqa: F821
|
| 66 |
+
|
| 67 |
+
up_experts: ParamList("n_experts") # noqa: F821
|
| 68 |
+
|
| 69 |
+
def finalize(self) -> torch.Tensor:
|
| 70 |
+
transposed_experts = []
|
| 71 |
+
for gate, up in zip(self.gating_experts, self.up_experts):
|
| 72 |
+
assert gate.shape[0] == up.shape[0], "Gated MLP parameters must have the same number of neurons."
|
| 73 |
+
total_neurons = gate.shape[0] + up.shape[0]
|
| 74 |
+
fused_expert = torch.cat([gate, up], dim=-1).reshape(total_neurons, -1)
|
| 75 |
+
transposed_experts.append(fused_expert)
|
| 76 |
+
|
| 77 |
+
stacked_experts = torch.stack(transposed_experts, dim=0)
|
| 78 |
+
return self.inference_model.transform_moe_mlp_1_param(stacked_experts)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Common Attention Output Parameter Patterns
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class NormParameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
Simple normalization container.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
params: torch.Tensor
|
| 20 |
+
|
| 21 |
+
def finalize(self) -> torch.Tensor:
|
| 22 |
+
return self.inference_model.transform_norm_param(self.params)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Common QKV Parameter Patterns
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class FusedQKVParameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
Traditional fused QKV parameters for QKV projection. This is functionally
|
| 17 |
+
a direct copy.
|
| 18 |
+
|
| 19 |
+
src_qkv_w shape: [3 * out_features, in_features]
|
| 20 |
+
qkv_w shape: [3 * out_features, in_features]
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
params: torch.Tensor
|
| 24 |
+
|
| 25 |
+
def finalize(self) -> torch.Tensor:
|
| 26 |
+
return self.inference_model.transform_qkv_param(self.params)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class UnfusedQKVParameter(ParameterBase):
|
| 30 |
+
"""
|
| 31 |
+
QKV parameter container for unfused QKV projection.
|
| 32 |
+
|
| 33 |
+
src_param shapes: 3 x [out_features, in_features]
|
| 34 |
+
dst_param shape: [3 x out_features, in_features]
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
q_params: torch.Tensor
|
| 38 |
+
|
| 39 |
+
k_params: torch.Tensor
|
| 40 |
+
|
| 41 |
+
v_params: torch.Tensor
|
| 42 |
+
|
| 43 |
+
def finalize(self):
|
| 44 |
+
fused_param = torch.cat([self.q_params, self.k_params, self.v_params], dim=0)
|
| 45 |
+
return self.inference_model.transform_qkv_param(fused_param)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def megatron_qkv_reshape(param: torch.Tensor, head_size: int, n_heads: int) -> torch.Tensor:
|
| 49 |
+
assert param.shape[0] == 3 * n_heads * head_size
|
| 50 |
+
|
| 51 |
+
all_heads = torch.chunk(param, chunks=3 * n_heads, dim=0)
|
| 52 |
+
q_heads = all_heads[::3]
|
| 53 |
+
k_heads = all_heads[1::3]
|
| 54 |
+
v_heads = all_heads[2::3]
|
| 55 |
+
return torch.cat([q_heads, k_heads, v_heads], dim=0)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class MegatronQKVParameter(ParameterBase):
|
| 59 |
+
"""
|
| 60 |
+
QKV parameter container for Megatron-style QKV projection. Megatron stores the parameter
|
| 61 |
+
as [n_heads, 3, head_size, in_features] whereas our inference system is built around
|
| 62 |
+
[3, n_heads, head_size, in_features]. This container handles the conversion.
|
| 63 |
+
|
| 64 |
+
Note: this container expects the model implementation to implement properties for
|
| 65 |
+
`head_size` and `n_heads`.
|
| 66 |
+
|
| 67 |
+
src_qkv_w shape: [3 * out_features, in_features]
|
| 68 |
+
qkv_w shape: [3 * out_features, in_features]
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
params: torch.Tensor
|
| 72 |
+
|
| 73 |
+
def finalize(self) -> torch.Tensor:
|
| 74 |
+
head_size = self.inference_model.head_size
|
| 75 |
+
n_heads = self.inference_model.n_heads
|
| 76 |
+
|
| 77 |
+
transposed_param = megatron_qkv_reshape(self.params, head_size, n_heads)
|
| 78 |
+
return self.inference_model.transform_qkv_param(transposed_param)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def transform_gqa_megatron(src_param: torch.Tensor, head_size: int, n_q_heads: int, n_kv_heads: int) -> torch.Tensor:
|
| 82 |
+
assert src_param.shape[0] == (2 * n_kv_heads + n_q_heads) * head_size
|
| 83 |
+
|
| 84 |
+
head_ratio = n_q_heads // n_kv_heads
|
| 85 |
+
|
| 86 |
+
# Reshape to get the groups as the leading dimension
|
| 87 |
+
groups_leading_view = src_param.reshape(n_kv_heads, 2 + head_ratio, head_size, -1)
|
| 88 |
+
q_heads = groups_leading_view[:, :head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1])
|
| 89 |
+
k_heads = groups_leading_view[:, head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1])
|
| 90 |
+
v_heads = groups_leading_view[:, head_ratio + 1, :, :].reshape(-1, groups_leading_view.shape[-1])
|
| 91 |
+
# Squeeze will remove extra dimension for bias
|
| 92 |
+
return torch.cat([q_heads, k_heads, v_heads], dim=0).squeeze()
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class GQAMegatronQKVParameter(ParameterBase):
|
| 96 |
+
"""
|
| 97 |
+
QKV parameter for Megatron-style QKV projection with GQA-style QKV projection. In this
|
| 98 |
+
storage format each of the groups is stored consecutively, so there will be multiple q_heads,
|
| 99 |
+
then one k head, and one v head.
|
| 100 |
+
|
| 101 |
+
Note: this container expects the model implementation to implement properties for
|
| 102 |
+
`head_size`, `n_q_heads`, and `n_kv_heads`.
|
| 103 |
+
|
| 104 |
+
src_qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features]
|
| 105 |
+
qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features]
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
params: torch.Tensor
|
| 109 |
+
|
| 110 |
+
def finalize(self) -> torch.Tensor:
|
| 111 |
+
head_size = self.inference_model.head_size
|
| 112 |
+
n_q_heads = self.inference_model.n_heads_q
|
| 113 |
+
n_kv_heads = self.inference_model.n_heads_kv
|
| 114 |
+
transposed_param = transform_gqa_megatron(self.params, head_size, n_q_heads, n_kv_heads)
|
| 115 |
+
return self.inference_model.transform_qkv_param(transposed_param)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
| 9 |
+
"""
|
| 10 |
+
Unembedding containers.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class UnembedParameter(ParameterBase):
|
| 15 |
+
"""
|
| 16 |
+
Unembedding parameter. This will likely be mapped to the same original weight in the model as the
|
| 17 |
+
embedding, but we have a different preferred sharding approach.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
params: torch.Tensor
|
| 21 |
+
"""
|
| 22 |
+
Unembedding parameter of shape [vocab_size, model_dim].
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def finalize(self) -> torch.Tensor:
|
| 26 |
+
return self.inference_model.transform_unembed_param(self.params)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Dict, Iterable, Tuple, Optional
|
| 7 |
+
from os import path
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from deepspeed.accelerator import get_accelerator
|
| 12 |
+
from deepspeed.ops.op_builder import RaggedUtilsBuilder
|
| 13 |
+
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
|
| 14 |
+
from .layer_container_base import LayerContainer
|
| 15 |
+
from ..inference_parameter import InferenceParameter, STR_TO_DTYPE
|
| 16 |
+
from ..inference_utils import elem_size
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def pad_to_aligned_offset(offset: int, alignment: int = 256) -> int:
|
| 20 |
+
"""
|
| 21 |
+
Pad the provided offset to a well-aligned value.
|
| 22 |
+
"""
|
| 23 |
+
return ((offset + alignment - 1) // alignment) * alignment
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class TensorMetadata(DeepSpeedConfigModel):
|
| 27 |
+
"""
|
| 28 |
+
A class to represent a tensor specification.
|
| 29 |
+
"""
|
| 30 |
+
dtype: Optional[str]
|
| 31 |
+
shape: Optional[Tuple[int, ...]]
|
| 32 |
+
strides: Optional[Tuple[int, ...]]
|
| 33 |
+
offset: int
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class ParameterMetadata(DeepSpeedConfigModel):
|
| 37 |
+
"""
|
| 38 |
+
A class to represent a parameter specification.
|
| 39 |
+
"""
|
| 40 |
+
core_param: TensorMetadata = None
|
| 41 |
+
aux_params: Dict[str, TensorMetadata] = {}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class LayerMetadata(DeepSpeedConfigModel):
|
| 45 |
+
"""
|
| 46 |
+
A class to represent a layer specification.
|
| 47 |
+
"""
|
| 48 |
+
params: Dict[str, ParameterMetadata] = {}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class ModelMetadata(DeepSpeedConfigModel):
|
| 52 |
+
"""
|
| 53 |
+
A class to represent a model specification.
|
| 54 |
+
"""
|
| 55 |
+
policy: str = ""
|
| 56 |
+
layers: Dict[str, LayerMetadata] = {}
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def make_param_filename(base: str, rank: int, n_ranks: int) -> str:
|
| 60 |
+
"""
|
| 61 |
+
Make a filename for a parameter file.
|
| 62 |
+
|
| 63 |
+
Arguments:
|
| 64 |
+
rank: Rank of the file.
|
| 65 |
+
n_ranks: Total number of ranks.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
str: Filename.
|
| 69 |
+
"""
|
| 70 |
+
return path.join(base, f"params_rank_{rank}_of_{n_ranks}.pt")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def make_metadata_filename(base: str, rank: int, n_ranks: int) -> str:
|
| 74 |
+
"""
|
| 75 |
+
Make a filename for a metadata file.
|
| 76 |
+
|
| 77 |
+
Arguments:
|
| 78 |
+
rank: Rank of the file.
|
| 79 |
+
n_ranks: Total number of ranks.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
str: Filename.
|
| 83 |
+
"""
|
| 84 |
+
return path.join(base, f"metadata_rank_{rank}_of_{n_ranks}.json")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def make_model_config_filename(base: str) -> str:
|
| 88 |
+
"""
|
| 89 |
+
Make a filename for a model config file.
|
| 90 |
+
|
| 91 |
+
Arguments:
|
| 92 |
+
base: Base directory.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
str: Filename.
|
| 96 |
+
"""
|
| 97 |
+
return path.join(base, "ds_model_config.json")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def flatten_inference_model(
|
| 101 |
+
transformer_containers: Iterable[LayerContainer],
|
| 102 |
+
non_transformer_container: LayerContainer,
|
| 103 |
+
policy_name: str,
|
| 104 |
+
) -> Tuple[torch.Tensor, ModelMetadata]:
|
| 105 |
+
"""
|
| 106 |
+
Flatten the underlying parameters into
|
| 107 |
+
|
| 108 |
+
Arguments:
|
| 109 |
+
transformer_containers: Iterable of layer containers corresponding to the transformer
|
| 110 |
+
parameters.
|
| 111 |
+
non_transformer_container: Layer container corresponding to the non-transformer parameters.
|
| 112 |
+
policy_name: The name of the policy class (typically accessed with `type(policy).__name__`).
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
Iterable[Any]: Flattened list of parameters.
|
| 116 |
+
"""
|
| 117 |
+
alloc_fn = RaggedUtilsBuilder().load().allocate_view_on
|
| 118 |
+
|
| 119 |
+
total_size = 0
|
| 120 |
+
metadata = ModelMetadata(policy=policy_name)
|
| 121 |
+
|
| 122 |
+
def process_layer(layer_container: LayerContainer, l_name: str, cur_offset: int) -> int:
|
| 123 |
+
"""
|
| 124 |
+
Iterate over the parameters of a single container and collect metadata for the final
|
| 125 |
+
flattened buffer.
|
| 126 |
+
|
| 127 |
+
Arguments:
|
| 128 |
+
layer_container: The layer container to process.
|
| 129 |
+
l_name: The name of the layer container to key the metadata.
|
| 130 |
+
cur_offset: The current offset into the flattened buffer.
|
| 131 |
+
|
| 132 |
+
Captured Variables:
|
| 133 |
+
metadata: The metadata object to populate.
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
int: The updated offset into the flattened buffer.
|
| 137 |
+
"""
|
| 138 |
+
try:
|
| 139 |
+
_ = layer_container.is_populated
|
| 140 |
+
except ValueError as e:
|
| 141 |
+
raise ValueError(f"Layer container {l_name} is not populated.") from e
|
| 142 |
+
|
| 143 |
+
layer_metadata = LayerMetadata()
|
| 144 |
+
|
| 145 |
+
for p_name in layer_container.annotation_attrs:
|
| 146 |
+
param = getattr(layer_container, p_name)
|
| 147 |
+
param_metadata = ParameterMetadata()
|
| 148 |
+
|
| 149 |
+
if param is None:
|
| 150 |
+
param_metadata.core_param = TensorMetadata(offset=-1)
|
| 151 |
+
layer_metadata.params[p_name] = param_metadata
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
param_metadata.core_param = TensorMetadata(dtype=str(param.dtype),
|
| 155 |
+
shape=param.shape,
|
| 156 |
+
strides=param.stride(),
|
| 157 |
+
offset=cur_offset)
|
| 158 |
+
|
| 159 |
+
cur_offset += pad_to_aligned_offset(elem_size(param.dtype) * param.numel())
|
| 160 |
+
|
| 161 |
+
for t_name, tensor in param.aux_attrs.items():
|
| 162 |
+
param_metadata.aux_params[t_name] = TensorMetadata(dtype=str(tensor.dtype),
|
| 163 |
+
shape=tensor.shape,
|
| 164 |
+
strides=tensor.stride(),
|
| 165 |
+
offset=cur_offset)
|
| 166 |
+
|
| 167 |
+
cur_offset += pad_to_aligned_offset(elem_size(tensor.dtype) * tensor.numel())
|
| 168 |
+
|
| 169 |
+
layer_metadata.params[p_name] = param_metadata
|
| 170 |
+
|
| 171 |
+
metadata.layers[l_name] = layer_metadata
|
| 172 |
+
return cur_offset
|
| 173 |
+
|
| 174 |
+
for i, layer in enumerate(transformer_containers):
|
| 175 |
+
l_name = f"transformer_layer_{i}"
|
| 176 |
+
total_size = process_layer(layer, l_name, total_size)
|
| 177 |
+
|
| 178 |
+
l_name = "non_transformer"
|
| 179 |
+
total_size = process_layer(non_transformer_container, l_name, total_size)
|
| 180 |
+
|
| 181 |
+
buffer = torch.empty(total_size, dtype=torch.uint8, device=get_accelerator().current_device())
|
| 182 |
+
|
| 183 |
+
def copy_layer(layer_container: LayerContainer, l_name: str) -> None:
|
| 184 |
+
"""
|
| 185 |
+
Local method for copying from the layer container to the flattened buffer.
|
| 186 |
+
|
| 187 |
+
Arguments:
|
| 188 |
+
layer_container: The layer container to copy from.
|
| 189 |
+
l_name: The name of the layer container to key the metadata.
|
| 190 |
+
|
| 191 |
+
Captured Variables:
|
| 192 |
+
buffer: The flattened buffer to copy into.
|
| 193 |
+
metadata: The metadata object to populate.
|
| 194 |
+
"""
|
| 195 |
+
l_metadata = metadata.layers[l_name]
|
| 196 |
+
for p_name in layer_container.annotation_attrs:
|
| 197 |
+
p_metadata = l_metadata.params[p_name]
|
| 198 |
+
param = getattr(layer_container, p_name)
|
| 199 |
+
|
| 200 |
+
if param is None:
|
| 201 |
+
continue
|
| 202 |
+
|
| 203 |
+
core_param = alloc_fn(param, buffer, p_metadata.core_param.offset)
|
| 204 |
+
core_param.copy_(param)
|
| 205 |
+
|
| 206 |
+
aux_params = {}
|
| 207 |
+
|
| 208 |
+
for t_name, tensor in param.aux_attrs.items():
|
| 209 |
+
t_view = alloc_fn(tensor, buffer, p_metadata.aux_params[t_name].offset)
|
| 210 |
+
aux_params[t_name] = t_view
|
| 211 |
+
t_view.copy_(tensor)
|
| 212 |
+
|
| 213 |
+
setattr(layer_container, p_name, InferenceParameter.initialize(core_param, **aux_params))
|
| 214 |
+
|
| 215 |
+
for i, layer in enumerate(transformer_containers):
|
| 216 |
+
l_name = f"transformer_layer_{i}"
|
| 217 |
+
copy_layer(layer, l_name)
|
| 218 |
+
|
| 219 |
+
l_name = "non_transformer"
|
| 220 |
+
copy_layer(non_transformer_container, l_name)
|
| 221 |
+
|
| 222 |
+
return buffer, metadata
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def restore_inference_model(buffer: torch.Tensor, metadata: ModelMetadata,
|
| 226 |
+
transformer_containers: Iterable[LayerContainer],
|
| 227 |
+
non_transformer_container: LayerContainer) -> None:
|
| 228 |
+
"""
|
| 229 |
+
Restore the model from the buffer and metadata.
|
| 230 |
+
|
| 231 |
+
Arguments:
|
| 232 |
+
buffer: Buffer containing the model parameters.
|
| 233 |
+
metadata: Metadata for the model.
|
| 234 |
+
transformer_containers: Iterable of transformer layer containers.
|
| 235 |
+
non_transformer_container: Non-transformer layer container.
|
| 236 |
+
"""
|
| 237 |
+
alloc_fn = RaggedUtilsBuilder().load().allocate_view_like
|
| 238 |
+
|
| 239 |
+
def restore_layer(layer_container: LayerContainer, l_name: str) -> None:
|
| 240 |
+
"""
|
| 241 |
+
Local method for restoring a layer container from a flattened buffer. This
|
| 242 |
+
only constructs views for the parameters onto the buffer. No data movement
|
| 243 |
+
is performed.
|
| 244 |
+
|
| 245 |
+
Arguments:
|
| 246 |
+
layer_container: The layer container to restore.
|
| 247 |
+
l_name: The name of the layer container to key the metadata.
|
| 248 |
+
|
| 249 |
+
Captured Variables:
|
| 250 |
+
buffer: The flattened buffer to reconstruct views on top of.
|
| 251 |
+
metadata: The metadata object describing the each parameter in the model.
|
| 252 |
+
"""
|
| 253 |
+
l_metadata = metadata.layers[l_name]
|
| 254 |
+
|
| 255 |
+
for p_name in layer_container.annotation_attrs:
|
| 256 |
+
p_metadata = l_metadata.params[p_name]
|
| 257 |
+
|
| 258 |
+
if p_metadata.core_param.offset == -1:
|
| 259 |
+
layer_container.direct_injection(p_name, None)
|
| 260 |
+
continue
|
| 261 |
+
|
| 262 |
+
dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[p_metadata.core_param.dtype])
|
| 263 |
+
core_param = alloc_fn(p_metadata.core_param.shape, p_metadata.core_param.strides, dummy_tensor, buffer,
|
| 264 |
+
p_metadata.core_param.offset)
|
| 265 |
+
|
| 266 |
+
aux_params = {}
|
| 267 |
+
|
| 268 |
+
for t_name, t_metadata in p_metadata.aux_params.items():
|
| 269 |
+
dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[t_metadata.dtype])
|
| 270 |
+
t_view = alloc_fn(t_metadata.shape, t_metadata.strides, dummy_tensor, buffer, t_metadata.offset)
|
| 271 |
+
|
| 272 |
+
aux_params[t_name] = t_view
|
| 273 |
+
|
| 274 |
+
restored_param = InferenceParameter.initialize(core_param, **aux_params)
|
| 275 |
+
layer_container.direct_injection(p_name, restored_param)
|
| 276 |
+
|
| 277 |
+
for i, layer in enumerate(transformer_containers):
|
| 278 |
+
l_name = f"transformer_layer_{i}"
|
| 279 |
+
restore_layer(layer, l_name)
|
| 280 |
+
|
| 281 |
+
l_name = "non_transformer"
|
| 282 |
+
restore_layer(non_transformer_container, l_name)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
from abc import ABC, ABCMeta, abstractmethod
|
| 8 |
+
from typing import Any, Iterable, List, Optional, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from ..config_v2 import RaggedInferenceEngineConfig
|
| 13 |
+
from ..checkpoint import CheckpointEngineBase
|
| 14 |
+
from ..logging import inference_logger
|
| 15 |
+
from .layer_container_base import LayerContainer
|
| 16 |
+
from .inference_model_base import DSInferenceModelBase
|
| 17 |
+
from .flat_model_helpers import (
|
| 18 |
+
flatten_inference_model,
|
| 19 |
+
make_param_filename,
|
| 20 |
+
make_metadata_filename,
|
| 21 |
+
ModelMetadata,
|
| 22 |
+
restore_inference_model,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
POLICIES = {}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ContainerMap:
|
| 29 |
+
|
| 30 |
+
def __init__(self) -> None:
|
| 31 |
+
self._prefix_map = {}
|
| 32 |
+
self._transformer_params = None
|
| 33 |
+
self._non_transformer_params = None
|
| 34 |
+
|
| 35 |
+
@property
|
| 36 |
+
def transformer_params(self) -> Iterable[LayerContainer]:
|
| 37 |
+
return self._transformer_params
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def non_transformer_params(self) -> LayerContainer:
|
| 41 |
+
return self._non_transformer_params
|
| 42 |
+
|
| 43 |
+
def set_transformer_params(self, prefixes: Union[str, Iterable[str]], containers: List[LayerContainer]) -> None:
|
| 44 |
+
if not isinstance(containers, list):
|
| 45 |
+
raise ValueError(
|
| 46 |
+
f"The transformer containers should be a list, of one container per layer, but got {type(containers)} instead."
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
self._transformer_prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
|
| 50 |
+
self._transformer_params = containers
|
| 51 |
+
|
| 52 |
+
def set_non_transformer_params(self, container: LayerContainer) -> None:
|
| 53 |
+
self._non_transformer_params = container
|
| 54 |
+
|
| 55 |
+
def set_unmapped_params(self, prefixes: Union[str, Iterable[str]]) -> None:
|
| 56 |
+
self._unmapped_prefixes = prefixes
|
| 57 |
+
|
| 58 |
+
def map_param(self, name, parameter) -> None:
|
| 59 |
+
for unmapped_prefix in self._unmapped_prefixes:
|
| 60 |
+
if name.startswith(unmapped_prefix):
|
| 61 |
+
inference_logger().debug(f"Ignoring: {name} for {unmapped_prefix}")
|
| 62 |
+
return
|
| 63 |
+
|
| 64 |
+
for transformer_prefix in self._transformer_prefixes:
|
| 65 |
+
if name.startswith(transformer_prefix):
|
| 66 |
+
popped_name = name[len(transformer_prefix) + 1:]
|
| 67 |
+
layer_idx = popped_name.split(".")[0]
|
| 68 |
+
assert layer_idx.isdigit(
|
| 69 |
+
), f"expected name to start w. list index but got {layer_idx} instead, name={name}"
|
| 70 |
+
layer_idx = int(layer_idx)
|
| 71 |
+
inference_logger().debug(
|
| 72 |
+
f"Setting: {'.'.join(popped_name.split('.')[1:])} for layer-idx={layer_idx} to {parameter.shape}")
|
| 73 |
+
self._transformer_params[layer_idx].set_dependency(".".join(popped_name.split(".")[1:]), parameter)
|
| 74 |
+
return
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
inference_logger().debug(f"Setting: {name} to {parameter.shape}")
|
| 78 |
+
self._non_transformer_params.set_dependency(name, parameter)
|
| 79 |
+
except ValueError:
|
| 80 |
+
# Catch the ValueError here from the non_transformer_params because we are knowingly
|
| 81 |
+
# calling it with something that may not match. This should allow us to raise a slightly more
|
| 82 |
+
# informative error message.
|
| 83 |
+
raise ValueError(f"Cannot find container for {name}, please double check the Containers/ContainerMap")
|
| 84 |
+
|
| 85 |
+
def validate(self) -> None:
|
| 86 |
+
if not self._non_transformer_params.is_initialized:
|
| 87 |
+
raise RuntimeError("Non-transformer parameters not fully initialized after checkpoint load.")
|
| 88 |
+
|
| 89 |
+
for layer_idx, container in enumerate(self._transformer_params):
|
| 90 |
+
if not container.is_initialized:
|
| 91 |
+
raise RuntimeError(
|
| 92 |
+
f"Transformer container at index {layer_idx} not fully initialized after checkpoint load.")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class PolicyMeta(ABCMeta):
|
| 96 |
+
|
| 97 |
+
def __new__(cls, name, bases, dct):
|
| 98 |
+
new_obj = super().__new__(cls, name, bases, dct)
|
| 99 |
+
if name != "InferenceV2Policy":
|
| 100 |
+
POLICIES[name] = new_obj
|
| 101 |
+
return new_obj
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class InferenceV2Policy(ABC, metaclass=PolicyMeta):
|
| 105 |
+
"""
|
| 106 |
+
The InferenceV2Policy is the base class for all inference policies. An inference policy
|
| 107 |
+
is responsible for instantiating the inference model and mapping the parameters from the
|
| 108 |
+
checkpoint engine to the model itself.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
model_config: Any,
|
| 114 |
+
checkpoint_engine: Optional[CheckpointEngineBase] = None,
|
| 115 |
+
inf_checkpoint_path: Optional[str] = None,
|
| 116 |
+
) -> None:
|
| 117 |
+
"""
|
| 118 |
+
Create the Policy with sufficient context to build the model. There are two supported
|
| 119 |
+
model creation mechanisms.
|
| 120 |
+
|
| 121 |
+
The first is the generalized ``checkpoint_engine`` which
|
| 122 |
+
will iterate over the parameters of the model and provide them to the policy. These in
|
| 123 |
+
turn will be sharded/transformed by the model implementation.
|
| 124 |
+
|
| 125 |
+
The second is used to re-create a previously serialized DeepSpeed inference model. These
|
| 126 |
+
checkpoints should not be used across different model backend configurations.
|
| 127 |
+
|
| 128 |
+
TODO(cmikeh2): Enforce this in code
|
| 129 |
+
"""
|
| 130 |
+
if checkpoint_engine is None and inf_checkpoint_path is None:
|
| 131 |
+
raise ValueError("Either checkpoint_engine or ds_checkpoint_path must be provided.")
|
| 132 |
+
|
| 133 |
+
if checkpoint_engine is not None and inf_checkpoint_path is not None:
|
| 134 |
+
raise ValueError("Only one of checkpoint_engine or ds_checkpoint_path can be provided.")
|
| 135 |
+
|
| 136 |
+
self._checkpoint_engine = checkpoint_engine
|
| 137 |
+
self._inf_checkpoint_path = inf_checkpoint_path
|
| 138 |
+
self._model_config = model_config
|
| 139 |
+
|
| 140 |
+
def build_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> DSInferenceModelBase:
|
| 141 |
+
"""
|
| 142 |
+
Completely instantiate the inference model. This will both create the ops needed to run the
|
| 143 |
+
model, as well as load the model parameters via the checkpoint engine. For more context
|
| 144 |
+
on each of these components please see ``instantiate_model`` and ``populate_model_parameters``.
|
| 145 |
+
|
| 146 |
+
Arguments:
|
| 147 |
+
engine_config: The config that has been used to instantiate the engine. This is used
|
| 148 |
+
to communicate to the model implementation the limits on batches (sequences/tokens)
|
| 149 |
+
and bound the size of intermediate buffers.
|
| 150 |
+
mp_group: Object to enable communication between tensor parallel ranks.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
DSInferenceModelBase: An implementation of the inference model abstraction that will be
|
| 154 |
+
run by the engine.
|
| 155 |
+
"""
|
| 156 |
+
self.model = self.instantiate_model(engine_config, mp_group)
|
| 157 |
+
self.populate_model_parameters()
|
| 158 |
+
return self.model
|
| 159 |
+
|
| 160 |
+
@abstractmethod
|
| 161 |
+
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig) -> DSInferenceModelBase:
|
| 162 |
+
"""
|
| 163 |
+
Instantiate the inference model. Depending on the engine/model config, this could be where
|
| 164 |
+
different model implementations could be selected.
|
| 165 |
+
|
| 166 |
+
Arguments:
|
| 167 |
+
engine_config: The config that has been used to instantiate the engine. This is used
|
| 168 |
+
to communicate to the model implementation the limits on batches (sequences/tokens)
|
| 169 |
+
and bound the size of intermediate buffers.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
DSInferenceModelBase: An implementation of the inference model abstraction that will be
|
| 173 |
+
run by the engine.
|
| 174 |
+
"""
|
| 175 |
+
...
|
| 176 |
+
|
| 177 |
+
@abstractmethod
|
| 178 |
+
def build_container_map(self) -> ContainerMap:
|
| 179 |
+
"""
|
| 180 |
+
Build a dictionary representing the structure of the string prefixes leading
|
| 181 |
+
to the parameters to be mapped to the container.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
ContainerMap: An instantiated mapping describing how checkpoint prefixes map
|
| 185 |
+
to ``LayerContainer`` instances.
|
| 186 |
+
"""
|
| 187 |
+
raise NotImplementedError()
|
| 188 |
+
|
| 189 |
+
def populate_model_parameters(self) -> None:
|
| 190 |
+
"""
|
| 191 |
+
This model will iterate over the parameters (as provided by the checkpoint engine) and
|
| 192 |
+
use the container map built by ``build_container_map`` to populate the model
|
| 193 |
+
"""
|
| 194 |
+
|
| 195 |
+
container_map = self.build_container_map()
|
| 196 |
+
|
| 197 |
+
if self._checkpoint_engine is not None:
|
| 198 |
+
for name, parameter in self._checkpoint_engine.parameters():
|
| 199 |
+
container_map.map_param(name, parameter)
|
| 200 |
+
|
| 201 |
+
buffer, metadata = flatten_inference_model(container_map.transformer_params,
|
| 202 |
+
container_map.non_transformer_params, self.__class__.__name__)
|
| 203 |
+
else:
|
| 204 |
+
|
| 205 |
+
buffer_path = make_param_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size)
|
| 206 |
+
metadata_path = make_metadata_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size)
|
| 207 |
+
|
| 208 |
+
buffer = torch.load(buffer_path)
|
| 209 |
+
metadata = json.load(open(metadata_path, "r"))
|
| 210 |
+
metadata = ModelMetadata.parse_raw(metadata)
|
| 211 |
+
|
| 212 |
+
restore_inference_model(buffer, metadata, container_map.transformer_params,
|
| 213 |
+
container_map.non_transformer_params)
|
| 214 |
+
|
| 215 |
+
container_map.validate()
|
| 216 |
+
|
| 217 |
+
self.model.set_parameters(transformer=container_map.transformer_params,
|
| 218 |
+
non_transformer=container_map.non_transformer_params,
|
| 219 |
+
flattened_param_buffer=buffer,
|
| 220 |
+
flattened_param_metadata=metadata)
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py
ADDED
|
@@ -0,0 +1,617 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from abc import abstractmethod
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from deepspeed.accelerator import get_accelerator
|
| 12 |
+
from ..config_v2 import RaggedInferenceEngineConfig
|
| 13 |
+
from ..inference_utils import ActivationType, ceil_div, is_gated
|
| 14 |
+
from ..model_implementations import *
|
| 15 |
+
from ..model_implementations.sharding import *
|
| 16 |
+
from ..modules.configs import (
|
| 17 |
+
DSEmbeddingsConfig,
|
| 18 |
+
DSLinearConfig,
|
| 19 |
+
DSMoEConfig,
|
| 20 |
+
DSNormConfig,
|
| 21 |
+
DSSelfAttentionConfig,
|
| 22 |
+
DSUnembedConfig,
|
| 23 |
+
NormTypeEnum,
|
| 24 |
+
PositionalEmbeddingType,
|
| 25 |
+
RotateHalfConfig,
|
| 26 |
+
)
|
| 27 |
+
from ..modules import heuristics
|
| 28 |
+
from ..ragged import (
|
| 29 |
+
DSSequenceDescriptor,
|
| 30 |
+
KVCacheConfig,
|
| 31 |
+
RaggedBatchWrapper,
|
| 32 |
+
)
|
| 33 |
+
from .inference_model_base import (
|
| 34 |
+
DSInferenceModelBase,
|
| 35 |
+
DSModelImplementationConfig,
|
| 36 |
+
MPType,
|
| 37 |
+
)
|
| 38 |
+
from ..inference_parameter import InferenceParameter
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
from functools import cached_property
|
| 42 |
+
except ImportError:
|
| 43 |
+
|
| 44 |
+
def cached_property(func):
|
| 45 |
+
return property(func)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class DSTransformerModelBase(DSInferenceModelBase):
|
| 49 |
+
"""
|
| 50 |
+
Dimensioning properties
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
@abstractmethod
|
| 55 |
+
def num_layers(self) -> int:
|
| 56 |
+
"""
|
| 57 |
+
Number of the layers in the model
|
| 58 |
+
"""
|
| 59 |
+
...
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
@abstractmethod
|
| 63 |
+
def model_dim(self) -> int:
|
| 64 |
+
"""
|
| 65 |
+
Size of embedding projection and residuals.
|
| 66 |
+
"""
|
| 67 |
+
...
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
@abstractmethod
|
| 71 |
+
def vocab_size(self) -> int:
|
| 72 |
+
"""
|
| 73 |
+
Size of the vocabulary (including padding).
|
| 74 |
+
"""
|
| 75 |
+
...
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
@abstractmethod
|
| 79 |
+
def head_size(self) -> int:
|
| 80 |
+
"""
|
| 81 |
+
Size of each attention head.
|
| 82 |
+
"""
|
| 83 |
+
...
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
@abstractmethod
|
| 87 |
+
def n_heads(self) -> int:
|
| 88 |
+
"""
|
| 89 |
+
The number of query heads on the model. This should not take into account
|
| 90 |
+
any dimension reductions from model sharding.
|
| 91 |
+
"""
|
| 92 |
+
...
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def n_heads_q(self) -> int:
|
| 96 |
+
"""
|
| 97 |
+
Alias to n_heads.
|
| 98 |
+
"""
|
| 99 |
+
return self.n_heads
|
| 100 |
+
|
| 101 |
+
@property
|
| 102 |
+
def n_heads_kv(self) -> int:
|
| 103 |
+
"""
|
| 104 |
+
The number of key and value heads on the model. For GQA or MQA, overload this attribute.
|
| 105 |
+
Otherwise it adopts MHA formulations and uses n_heads. This should not take into account
|
| 106 |
+
any dimension reductions from model sharding.
|
| 107 |
+
"""
|
| 108 |
+
return self.n_heads
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
@abstractmethod
|
| 112 |
+
def intermediate_dim(self) -> int:
|
| 113 |
+
"""
|
| 114 |
+
The size of the (unsharded) intermediate projection dim. For a gated activation function
|
| 115 |
+
this is the size of the input to the second MLP layer. This should not take into account
|
| 116 |
+
any dimension reductions from model sharding.
|
| 117 |
+
"""
|
| 118 |
+
...
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
@abstractmethod
|
| 122 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
| 123 |
+
"""
|
| 124 |
+
The type of positional embedding used by the model.
|
| 125 |
+
"""
|
| 126 |
+
...
|
| 127 |
+
|
| 128 |
+
"""
|
| 129 |
+
Architectural properties
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
@property
|
| 133 |
+
@abstractmethod
|
| 134 |
+
def activation_dtype(self) -> torch.dtype:
|
| 135 |
+
"""
|
| 136 |
+
The activation dtype of the model.
|
| 137 |
+
"""
|
| 138 |
+
...
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
@abstractmethod
|
| 142 |
+
def mlp_activation_fn(self) -> ActivationType:
|
| 143 |
+
"""
|
| 144 |
+
The activation function used in the MLP.
|
| 145 |
+
"""
|
| 146 |
+
...
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
@abstractmethod
|
| 150 |
+
def norm_type(self) -> NormTypeEnum:
|
| 151 |
+
"""
|
| 152 |
+
The type of normalization used in the model.
|
| 153 |
+
"""
|
| 154 |
+
...
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
@abstractmethod
|
| 158 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
| 159 |
+
"""
|
| 160 |
+
The positional embedding configuration for the model.
|
| 161 |
+
"""
|
| 162 |
+
...
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
Derived helpers
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
@cached_property
|
| 169 |
+
def n_heads_q_local(self) -> int:
|
| 170 |
+
"""
|
| 171 |
+
Number of local heads post sharding.
|
| 172 |
+
"""
|
| 173 |
+
return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[0]
|
| 174 |
+
|
| 175 |
+
@cached_property
|
| 176 |
+
def n_heads_kv_local(self) -> int:
|
| 177 |
+
"""
|
| 178 |
+
Number of local heads post sharding.
|
| 179 |
+
"""
|
| 180 |
+
return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[1]
|
| 181 |
+
|
| 182 |
+
@property
|
| 183 |
+
def gated_mlp(self) -> bool:
|
| 184 |
+
"""
|
| 185 |
+
Return a boolean to determine whether the model uses a gated activation function.
|
| 186 |
+
"""
|
| 187 |
+
return is_gated(self.mlp_activation_fn)
|
| 188 |
+
|
| 189 |
+
"""
|
| 190 |
+
Method implementations
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
|
| 194 |
+
base_mp_group: MPType) -> None:
|
| 195 |
+
"""
|
| 196 |
+
Base implementation for initialization. By default, this will initialize
|
| 197 |
+
the traditional components of a transformer model:
|
| 198 |
+
- Embedding
|
| 199 |
+
- QKV projection
|
| 200 |
+
- Self attention
|
| 201 |
+
- Attention output projection
|
| 202 |
+
- Feed forward network
|
| 203 |
+
- Normalization
|
| 204 |
+
- Unembedding
|
| 205 |
+
|
| 206 |
+
Arguments:
|
| 207 |
+
config (DSModelImplementationConfig): Model-specific configuration. No assumptions
|
| 208 |
+
should be made about this config that are not closely tied to the specific
|
| 209 |
+
model implementation.
|
| 210 |
+
engine_config (RaggedInferenceEngineConfig): Engine configuration.
|
| 211 |
+
base_mp_group (MPType): Base communication group for Tensor-parallel inference.
|
| 212 |
+
"""
|
| 213 |
+
super().__init__(config, engine_config, base_mp_group)
|
| 214 |
+
|
| 215 |
+
self.make_norm_layer()
|
| 216 |
+
self.make_qkv_layer()
|
| 217 |
+
self.make_attn_layer()
|
| 218 |
+
self.make_attn_out_layer()
|
| 219 |
+
self.make_mlp_1_layer()
|
| 220 |
+
self.make_mlp_2_layer()
|
| 221 |
+
self.make_embedding_layer()
|
| 222 |
+
self.make_unembedding_layer()
|
| 223 |
+
self._kv_cache_config = None
|
| 224 |
+
|
| 225 |
+
######### Embedding #########
|
| 226 |
+
def make_embedding_layer(self) -> None:
|
| 227 |
+
"""
|
| 228 |
+
Performs setup and creates embedding DSModule. This will set the `self.embed` attribute.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
embed_config = DSEmbeddingsConfig(
|
| 232 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 233 |
+
residual_dtype=self.activation_dtype,
|
| 234 |
+
embedding_dim=self.model_dim,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
self.embed = heuristics.instantiate_embed(embed_config, self._engine_config)
|
| 238 |
+
|
| 239 |
+
def transform_embedding_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 240 |
+
"""
|
| 241 |
+
Performs embedding sharding along the channels dimension.
|
| 242 |
+
"""
|
| 243 |
+
# Until we can do non-contiguous all-gather, we won't shard the embedding parameters.
|
| 244 |
+
param = param.to(self.activation_dtype.value)
|
| 245 |
+
return InferenceParameter.initialize(param)
|
| 246 |
+
|
| 247 |
+
######### Unembedding #########
|
| 248 |
+
def make_unembedding_layer(self) -> None:
|
| 249 |
+
"""
|
| 250 |
+
Performs setup and creates an unembedding layer. This implementation assumes
|
| 251 |
+
normalization prior to the LM head projection. If this does not match the model's
|
| 252 |
+
implementation, override this method. This will set the ``self.unembed`` attribute.
|
| 253 |
+
"""
|
| 254 |
+
unembed_dim = sharded_unembed_dim(self.vocab_size, self.tp_rank, self.tp_size)
|
| 255 |
+
|
| 256 |
+
unembed_config = DSUnembedConfig(
|
| 257 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 258 |
+
max_sequences=self._engine_config.state_manager.max_ragged_sequence_count,
|
| 259 |
+
dtype=self.activation_dtype,
|
| 260 |
+
model_dim=self.model_dim,
|
| 261 |
+
vocab_size=unembed_dim,
|
| 262 |
+
norm_type=self.norm_type,
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
self.unembed = heuristics.instantiate_unembed(unembed_config, self._engine_config)
|
| 266 |
+
|
| 267 |
+
if self.tp_size > 1:
|
| 268 |
+
self._comm_logits = torch.empty(self.tp_size,
|
| 269 |
+
self._engine_config.state_manager.max_ragged_sequence_count,
|
| 270 |
+
unembed_dim,
|
| 271 |
+
device=get_accelerator().current_device(),
|
| 272 |
+
dtype=self.activation_dtype.value)
|
| 273 |
+
self._return_logits = torch.empty(self._engine_config.state_manager.max_ragged_sequence_count,
|
| 274 |
+
self.vocab_size,
|
| 275 |
+
device=get_accelerator().current_device(),
|
| 276 |
+
dtype=self.activation_dtype.value)
|
| 277 |
+
|
| 278 |
+
def transform_unembed_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 279 |
+
"""
|
| 280 |
+
Performs sharding along the vocab dimension.
|
| 281 |
+
"""
|
| 282 |
+
param = shard_unembed_param(param, self.tp_rank, self.tp_size).to(self.activation_dtype.value)
|
| 283 |
+
return InferenceParameter.initialize(param)
|
| 284 |
+
|
| 285 |
+
######### QKV #########
|
| 286 |
+
def make_qkv_layer(self) -> None:
|
| 287 |
+
"""
|
| 288 |
+
Instantiates the linear projection layer for the QKV linear layer. This sets the
|
| 289 |
+
`self.qkv` attribute.
|
| 290 |
+
"""
|
| 291 |
+
out_features = qkv_out_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
|
| 292 |
+
self.n_heads_kv)
|
| 293 |
+
|
| 294 |
+
linear_config = DSLinearConfig(
|
| 295 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 296 |
+
in_channels=self.model_dim,
|
| 297 |
+
out_channels=out_features,
|
| 298 |
+
input_dtype=self.activation_dtype,
|
| 299 |
+
output_dtype=self.activation_dtype,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
self.qkv = heuristics.instantiate_linear(linear_config, self._engine_config)
|
| 303 |
+
|
| 304 |
+
def transform_qkv_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 305 |
+
"""
|
| 306 |
+
Passes a QKV parameter to the underlying implementation for any necessary
|
| 307 |
+
transformations.
|
| 308 |
+
|
| 309 |
+
Args:
|
| 310 |
+
param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
|
| 311 |
+
the shape (out_neurons, in_neurons)
|
| 312 |
+
"""
|
| 313 |
+
param = shard_qkv_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, self.n_heads_kv)
|
| 314 |
+
return self.qkv.transform_param(param)
|
| 315 |
+
|
| 316 |
+
######### Attention #########
|
| 317 |
+
def make_attn_layer(self) -> None:
|
| 318 |
+
"""
|
| 319 |
+
Builds the attention layer for the model. This sets the `self.attn` attribute.
|
| 320 |
+
"""
|
| 321 |
+
softmax_scale = 1.0 / (self.head_size**0.5)
|
| 322 |
+
|
| 323 |
+
attn_config = DSSelfAttentionConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 324 |
+
n_heads_q=self.n_heads_q_local,
|
| 325 |
+
n_heads_kv=self.n_heads_kv_local,
|
| 326 |
+
head_size=self.head_size,
|
| 327 |
+
max_sequences=self._engine_config.state_manager.max_ragged_sequence_count,
|
| 328 |
+
scale_factor=softmax_scale,
|
| 329 |
+
input_dtype=self.activation_dtype,
|
| 330 |
+
output_dtype=self.activation_dtype,
|
| 331 |
+
positional_embedding_type=self.positional_embedding_type,
|
| 332 |
+
positional_embedding_config=self.positional_embedding_config)
|
| 333 |
+
|
| 334 |
+
self.attn = heuristics.instantiate_attention(attn_config, self._engine_config)
|
| 335 |
+
|
| 336 |
+
def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int,
|
| 337 |
+
max_new_blocks: int) -> Tuple[int, int]:
|
| 338 |
+
"""
|
| 339 |
+
See ``DSInferenceModelBase.get_kv_requirements`` for documentation.
|
| 340 |
+
|
| 341 |
+
This method assumes an autoregressive dense attention pattern. Override this method
|
| 342 |
+
if this does not match the model's attention pattern.
|
| 343 |
+
"""
|
| 344 |
+
total_tokens = sequence.seen_tokens + max_new_tokens
|
| 345 |
+
req_blocks = ceil_div(total_tokens, self.attn.kv_block_size)
|
| 346 |
+
block_lim = req_blocks - sequence.cur_allocated_blocks
|
| 347 |
+
|
| 348 |
+
if block_lim <= max_new_blocks:
|
| 349 |
+
return max_new_tokens, block_lim
|
| 350 |
+
|
| 351 |
+
token_capacity = (max_new_blocks +
|
| 352 |
+
sequence.cur_allocated_blocks) * self.attn.kv_block_size - sequence.seen_tokens
|
| 353 |
+
|
| 354 |
+
return token_capacity, max_new_blocks
|
| 355 |
+
|
| 356 |
+
def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int:
|
| 357 |
+
return sequence.seen_tokens % self.attn.kv_block_size
|
| 358 |
+
|
| 359 |
+
def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None:
|
| 360 |
+
"""
|
| 361 |
+
See ``DSInferenceModelBase.maybe_allocate_kv`` for documentation.
|
| 362 |
+
|
| 363 |
+
This method assumes an autoregressive dense attention pattern. Override this method
|
| 364 |
+
if this does not match the model's attention pattern.
|
| 365 |
+
"""
|
| 366 |
+
free_block = self.state_manager.free_blocks[0]
|
| 367 |
+
_, n_needed_blocks = self.get_kv_requirements(sequence, n_new_tokens, free_block)
|
| 368 |
+
|
| 369 |
+
if n_needed_blocks > 0:
|
| 370 |
+
new_blocks = self.state_manager.allocate_blocks(n_needed_blocks)
|
| 371 |
+
sequence.extend_kv_cache(new_blocks)
|
| 372 |
+
|
| 373 |
+
def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]:
|
| 374 |
+
"""
|
| 375 |
+
See ``DSInferenceModelBase.kv_cache_config`` for documentation.
|
| 376 |
+
|
| 377 |
+
This method assumes an autoregressive dense attention pattern. Override this method
|
| 378 |
+
if this does not match the model's attention pattern.
|
| 379 |
+
"""
|
| 380 |
+
if self._kv_cache_config is None:
|
| 381 |
+
cache_shape = (self.num_layers, self.n_heads_kv_local, self.head_size)
|
| 382 |
+
max_blocks = ceil_div(self.max_sequence_length, self.attn.kv_block_size)
|
| 383 |
+
self._kv_cache_config = KVCacheConfig(block_size=self.attn.kv_block_size,
|
| 384 |
+
cache_shape=cache_shape,
|
| 385 |
+
cache_dtype=self.activation_dtype,
|
| 386 |
+
max_blocks_per_allocation_group=max_blocks)
|
| 387 |
+
return (self._kv_cache_config, )
|
| 388 |
+
|
| 389 |
+
def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None:
|
| 390 |
+
"""
|
| 391 |
+
See ``DSInferenceModelBase.prepare_batch`` for documentation.
|
| 392 |
+
|
| 393 |
+
This method assumes an autoregressive dense attention pattern. Override this method
|
| 394 |
+
if this does not match the model's attention pattern.
|
| 395 |
+
"""
|
| 396 |
+
self.attn.build_atoms(wrapped_batch)
|
| 397 |
+
|
| 398 |
+
######### Attention output #########
|
| 399 |
+
def make_attn_out_layer(self) -> None:
|
| 400 |
+
"""
|
| 401 |
+
Instantiates the linear projection layer for the attention output linear layer. This sets the
|
| 402 |
+
`self.attn_out` attribute.
|
| 403 |
+
"""
|
| 404 |
+
in_features = attn_out_in_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
|
| 405 |
+
self.n_heads_kv)
|
| 406 |
+
|
| 407 |
+
linear_config = DSLinearConfig(
|
| 408 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 409 |
+
in_channels=in_features,
|
| 410 |
+
out_channels=self.model_dim,
|
| 411 |
+
input_dtype=self.activation_dtype,
|
| 412 |
+
output_dtype=self.activation_dtype,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
self.attn_out = heuristics.instantiate_linear(linear_config, self._engine_config)
|
| 416 |
+
|
| 417 |
+
def transform_attn_out_param(self, param: torch.Tensor) -> Optional[InferenceParameter]:
|
| 418 |
+
"""
|
| 419 |
+
Shards an attention output projection parameter and passes it to the underlying
|
| 420 |
+
implementation for any necessary transformations. This will return `None` for bias parameters
|
| 421 |
+
if they are not on TP rank 0.
|
| 422 |
+
|
| 423 |
+
Args:
|
| 424 |
+
param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
|
| 425 |
+
the shape (out_neurons, in_neurons).
|
| 426 |
+
"""
|
| 427 |
+
param = shard_attn_out_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
|
| 428 |
+
self.n_heads_kv)
|
| 429 |
+
|
| 430 |
+
if param is not None:
|
| 431 |
+
param = self.attn_out.transform_param(param)
|
| 432 |
+
|
| 433 |
+
return param
|
| 434 |
+
|
| 435 |
+
######### MLP #########
|
| 436 |
+
def make_mlp_1_layer(self) -> None:
|
| 437 |
+
"""
|
| 438 |
+
Instantiates the linear projection layer for the first MLP in the feedforward network.
|
| 439 |
+
This sets the `self.mlp_1` attribute.
|
| 440 |
+
"""
|
| 441 |
+
shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
|
| 442 |
+
|
| 443 |
+
linear_config = DSLinearConfig(
|
| 444 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 445 |
+
in_channels=self.model_dim,
|
| 446 |
+
out_channels=shard_size,
|
| 447 |
+
activation=self.mlp_activation_fn,
|
| 448 |
+
input_dtype=self.activation_dtype,
|
| 449 |
+
output_dtype=self.activation_dtype,
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
self.mlp_1 = heuristics.instantiate_linear(linear_config, self._engine_config)
|
| 453 |
+
|
| 454 |
+
def transform_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 455 |
+
"""
|
| 456 |
+
Shards the first MLP parameter and passes it to the underlying implementation
|
| 457 |
+
for any necessary transformations.
|
| 458 |
+
|
| 459 |
+
Args:
|
| 460 |
+
param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
|
| 461 |
+
the shape (out_neurons, in_neurons).
|
| 462 |
+
"""
|
| 463 |
+
param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp)
|
| 464 |
+
|
| 465 |
+
return self.mlp_1.transform_param(param)
|
| 466 |
+
|
| 467 |
+
def make_mlp_2_layer(self) -> None:
|
| 468 |
+
"""
|
| 469 |
+
Instantiates the linear projection layer for the second MLP in the feedforward network.
|
| 470 |
+
This sets the `self.mlp_2` attribute.
|
| 471 |
+
"""
|
| 472 |
+
shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
|
| 473 |
+
|
| 474 |
+
linear_config = DSLinearConfig(
|
| 475 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 476 |
+
in_channels=shard_size,
|
| 477 |
+
out_channels=self.model_dim,
|
| 478 |
+
input_dtype=self.activation_dtype,
|
| 479 |
+
output_dtype=self.activation_dtype,
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
self.mlp_2 = heuristics.instantiate_linear(linear_config, self._engine_config)
|
| 483 |
+
|
| 484 |
+
def transform_mlp_2_param(self, param: torch.Tensor) -> Optional[InferenceParameter]:
|
| 485 |
+
"""
|
| 486 |
+
Shards the second MLP parameter and passes it to the underlying implementation
|
| 487 |
+
for any necessary transformations. This will return `None` for bias parameters
|
| 488 |
+
if they are not on TP rank 0.
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
|
| 492 |
+
the shape (out_neurons, in_neurons).
|
| 493 |
+
"""
|
| 494 |
+
param = shard_mlp_2_param(param, self.tp_rank, self.tp_size)
|
| 495 |
+
|
| 496 |
+
if param is not None:
|
| 497 |
+
param = self.mlp_2.transform_param(param)
|
| 498 |
+
|
| 499 |
+
return param
|
| 500 |
+
|
| 501 |
+
######### Norm #########
|
| 502 |
+
def make_norm_layer(self) -> None:
|
| 503 |
+
"""
|
| 504 |
+
Instantiates the normalization layer for the model. This sets the `self.norm` attribute.
|
| 505 |
+
|
| 506 |
+
TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
|
| 507 |
+
but for now we'll just use the same one for all of them.
|
| 508 |
+
"""
|
| 509 |
+
norm_config = DSNormConfig(
|
| 510 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 511 |
+
type=self.norm_type,
|
| 512 |
+
channels=self.model_dim,
|
| 513 |
+
residual_dtype=self.activation_dtype,
|
| 514 |
+
input_dtype=self.activation_dtype,
|
| 515 |
+
output_dtype=self.activation_dtype,
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config)
|
| 519 |
+
|
| 520 |
+
def transform_norm_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 521 |
+
"""
|
| 522 |
+
Passes a normalization parameter to the underlying implementation for any
|
| 523 |
+
necessary transformations.
|
| 524 |
+
|
| 525 |
+
TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
|
| 526 |
+
but for now we'll just use the same one for all of them.
|
| 527 |
+
|
| 528 |
+
Args:
|
| 529 |
+
param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
|
| 530 |
+
shape (model_dim,)
|
| 531 |
+
"""
|
| 532 |
+
return self.norm.transform_param(param)
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
class DSMoETransformerModelBase(DSTransformerModelBase):
|
| 536 |
+
|
| 537 |
+
@property
|
| 538 |
+
def n_experts(self) -> int:
|
| 539 |
+
"""
|
| 540 |
+
Return the number of experts in the model.
|
| 541 |
+
"""
|
| 542 |
+
raise NotImplementedError("Attempted to access an unimplemented number of experts")
|
| 543 |
+
|
| 544 |
+
@property
|
| 545 |
+
def n_top_k(self) -> int:
|
| 546 |
+
"""
|
| 547 |
+
Number of experts per token.
|
| 548 |
+
"""
|
| 549 |
+
raise NotImplementedError("Attempted to access an unimplemented number of experts per token")
|
| 550 |
+
|
| 551 |
+
@property
|
| 552 |
+
def normalize_expert_scores(self) -> bool:
|
| 553 |
+
"""
|
| 554 |
+
Whether to normalize expert scores. If true, sum(expert_scores) = 1.
|
| 555 |
+
"""
|
| 556 |
+
raise NotImplementedError("Attempted to access an unimplemented normalization flag")
|
| 557 |
+
|
| 558 |
+
def make_moe_layer(self) -> None:
|
| 559 |
+
"""
|
| 560 |
+
Instantiates the MoE layer for the model. This sets the `self.moe` attribute.
|
| 561 |
+
"""
|
| 562 |
+
sharded_dim = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
|
| 563 |
+
|
| 564 |
+
moe_config = DSMoEConfig(
|
| 565 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
| 566 |
+
model_dim=self.model_dim,
|
| 567 |
+
intermediate_features=sharded_dim,
|
| 568 |
+
activation=self.mlp_activation_fn,
|
| 569 |
+
n_experts=self.n_experts,
|
| 570 |
+
top_k=self.n_top_k,
|
| 571 |
+
input_dtype=self.activation_dtype,
|
| 572 |
+
output_dtype=self.activation_dtype,
|
| 573 |
+
normalize_scores=self.normalize_expert_scores,
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
self.moe = heuristics.instantiate_moe(moe_config, self._engine_config)
|
| 577 |
+
|
| 578 |
+
def transform_moe_gate_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 579 |
+
"""
|
| 580 |
+
Passes a MoE gate parameter to the underlying implementation for any necessary transformations.
|
| 581 |
+
|
| 582 |
+
TODO(cmikeh2): This will need to be updated/overridden for expert parallelism.
|
| 583 |
+
"""
|
| 584 |
+
return self.moe.transform_gate_param(param)
|
| 585 |
+
|
| 586 |
+
def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter:
|
| 587 |
+
"""
|
| 588 |
+
Shards the first MoE param and passes it to the underlying implementation. Since it's possible for an architecture
|
| 589 |
+
to have both MoE and non-MoE layers, this can't be overloaded on the MLP1 transform. Furthermore, since both
|
| 590 |
+
the MoE DSModule owns both MLP1 and MLP2, under certain sharding conditions it's not possible for the model implementation
|
| 591 |
+
to infer from the shape whether to perform a different transformation based on MLP1 or MLP2. This (and the below)
|
| 592 |
+
separations are intended to solve both these issues.
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons).
|
| 596 |
+
"""
|
| 597 |
+
param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp, is_moe=True)
|
| 598 |
+
|
| 599 |
+
return self.moe.transform_moe_mlp_1_param(param)
|
| 600 |
+
|
| 601 |
+
def transform_moe_mlp_2_param(self, param: torch.Tensor) -> Optional[torch.Tensor]:
|
| 602 |
+
"""
|
| 603 |
+
Shards the second MoE param and passes it to the underlying implementation. See the above for context on why this API
|
| 604 |
+
exists.
|
| 605 |
+
|
| 606 |
+
This will return `None` for expert bias params not on TP rank 0. NOTE(cmikeh2): Does it make sense to round-robin assign?
|
| 607 |
+
My intuition is that this will make debugging much more difficult for minimal memory reduction.
|
| 608 |
+
|
| 609 |
+
Args:
|
| 610 |
+
param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons).
|
| 611 |
+
"""
|
| 612 |
+
param = shard_mlp_2_param(param, self.tp_rank, self.tp_size, is_moe=True)
|
| 613 |
+
|
| 614 |
+
if param is not None:
|
| 615 |
+
param = self.moe.transform_moe_mlp_2_param(param)
|
| 616 |
+
|
| 617 |
+
return param
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
from typing import Type
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from deepspeed.accelerator import get_accelerator
|
| 12 |
+
from .parameter_base import ParameterBase, ParametrizedList
|
| 13 |
+
from ..inference_parameter import InferenceParameter
|
| 14 |
+
|
| 15 |
+
# Currently have dependency loops for the type hints.
|
| 16 |
+
InferenceModel = Type["InferenceModel"]
|
| 17 |
+
LayerContainer = Type["LayerContainer"]
|
| 18 |
+
|
| 19 |
+
MAPPING_KEY = "PARAM_MAPPING"
|
| 20 |
+
PLIST_HELPERS = "_ds_plist_strip_vals"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def make_finalization_callback(all_names: str):
|
| 24 |
+
"""
|
| 25 |
+
Helper method for building the finalization callback for a LayerContainer. This
|
| 26 |
+
is not client code and should not be used or called directly.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def finalization_callback(self, param: ParameterBase, finalized_param: torch.Tensor) -> None:
|
| 30 |
+
"""
|
| 31 |
+
Callback for when a parameter is finalized.
|
| 32 |
+
"""
|
| 33 |
+
self._finalized_params += 1
|
| 34 |
+
|
| 35 |
+
for name in all_names:
|
| 36 |
+
if getattr(self, name) is param:
|
| 37 |
+
setattr(self, name, finalized_param)
|
| 38 |
+
|
| 39 |
+
return finalization_callback
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class LayerMetaclass(type):
|
| 43 |
+
"""
|
| 44 |
+
MetaClass for the LayerContainer base class. This class will parse the annotations
|
| 45 |
+
of the class that correspond to `ParameterBase` and create None initializers for each
|
| 46 |
+
as well as a finalization callback that for when each `ParameterBase` is finalized
|
| 47 |
+
and should be replaced with a Tensor.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __new__(cls, clsname, bases, attrs):
|
| 51 |
+
|
| 52 |
+
annotations = attrs.get("__annotations__", {})
|
| 53 |
+
|
| 54 |
+
for base in bases:
|
| 55 |
+
# We'll pick up all annotations on any base classes. This will allow us to
|
| 56 |
+
# to use inheritance to share common parameter groups in base classes.
|
| 57 |
+
if hasattr(base, "__annotations__"):
|
| 58 |
+
annotations.update(base.__annotations__)
|
| 59 |
+
|
| 60 |
+
if hasattr(base, MAPPING_KEY):
|
| 61 |
+
if MAPPING_KEY not in attrs:
|
| 62 |
+
# This is likely a fail state. If a parent has MAPPING KEY but the child does
|
| 63 |
+
# not, then we're guaranteed only a subset of the parameters will be mapped.
|
| 64 |
+
attrs[MAPPING_KEY] = {}
|
| 65 |
+
attrs[MAPPING_KEY].update(getattr(base, MAPPING_KEY))
|
| 66 |
+
|
| 67 |
+
all_names = [name for name, annotation in annotations.items() if issubclass(annotation, ParameterBase)]
|
| 68 |
+
|
| 69 |
+
if MAPPING_KEY in attrs:
|
| 70 |
+
# If we have a mapping key at all, then we will enter the validation mode for building
|
| 71 |
+
# helpers for mapping and ensuring we have complete mapping.
|
| 72 |
+
|
| 73 |
+
# First we'll build a flat list of every dependency for this layer.
|
| 74 |
+
all_deps = set()
|
| 75 |
+
for name in all_names:
|
| 76 |
+
parameter_deps = [
|
| 77 |
+
name for name, annotation in annotations[name].__annotations__.items()
|
| 78 |
+
if issubclass(annotation, (torch.Tensor, ParametrizedList))
|
| 79 |
+
]
|
| 80 |
+
|
| 81 |
+
all_deps.update([f"{name}.{dep}" for dep in parameter_deps])
|
| 82 |
+
|
| 83 |
+
# Create static helper for doing the string processing only once.
|
| 84 |
+
attrs[PLIST_HELPERS] = []
|
| 85 |
+
|
| 86 |
+
# Iterate over all the mappings
|
| 87 |
+
for src_name, target_or_targets in attrs[MAPPING_KEY].items():
|
| 88 |
+
if isinstance(target_or_targets, str):
|
| 89 |
+
target_or_targets = [target_or_targets]
|
| 90 |
+
|
| 91 |
+
actual_targets = []
|
| 92 |
+
for target_name in target_or_targets:
|
| 93 |
+
base_dependency, dependency_attr = target_name.split(".")
|
| 94 |
+
|
| 95 |
+
# Check for invalid mappings
|
| 96 |
+
if base_dependency not in all_names:
|
| 97 |
+
raise ValueError(
|
| 98 |
+
"Target parameter \"{}\" not found in this layer. Valid targets are {}".format(
|
| 99 |
+
base_dependency, all_names))
|
| 100 |
+
if dependency_attr not in annotations[base_dependency].__annotations__:
|
| 101 |
+
# This check is not universal (see below) if a single dependency is being
|
| 102 |
+
# mapped to by a single row.
|
| 103 |
+
raise ValueError(
|
| 104 |
+
"Target dependency \"{}\" not found on parameter \"{}\". Valid targets are {}".format(
|
| 105 |
+
dependency_attr, base_dependency, annotations[base_dependency].__annotations__.keys()))
|
| 106 |
+
if target_name not in all_deps:
|
| 107 |
+
raise ValueError(
|
| 108 |
+
"Target dependency \"{}\" was targeted with multiple mapping rules.".format(target_name))
|
| 109 |
+
|
| 110 |
+
# If we've made it this far, the dependency definitely exists.
|
| 111 |
+
actual_targets.append(annotations[base_dependency].__annotations__[dependency_attr])
|
| 112 |
+
|
| 113 |
+
all_deps.remove(target_name)
|
| 114 |
+
|
| 115 |
+
are_plists = [issubclass(target, ParametrizedList) for target in actual_targets]
|
| 116 |
+
if all(are_plists):
|
| 117 |
+
# We can do direct sets on everything but ParametrizedLists, so we'll only explicitly
|
| 118 |
+
# handle these here.
|
| 119 |
+
# TODO(cmikeh2): SPLIT, error if more than 1
|
| 120 |
+
glob_count = src_name.count("*")
|
| 121 |
+
if glob_count > 1:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
"ParametrizedList index inference can only work with a single glob: {}".format(src_name))
|
| 124 |
+
elif glob_count == 0:
|
| 125 |
+
raise ValueError(
|
| 126 |
+
"Must have wildcard (*) in source name for ParametrizedList mapping: {}".format(src_name))
|
| 127 |
+
|
| 128 |
+
wildcard_idx = src_name.find("*")
|
| 129 |
+
prefix = src_name[:wildcard_idx]
|
| 130 |
+
suffix = src_name[wildcard_idx + 1:]
|
| 131 |
+
attrs[PLIST_HELPERS].append((prefix, suffix, target_or_targets))
|
| 132 |
+
elif any(are_plists):
|
| 133 |
+
raise ValueError("Cannot mix ParametrizedLists and Tensors in a single mapping rule.")
|
| 134 |
+
|
| 135 |
+
if len(all_deps) > 0:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
"A parameter mapping was provided for {}, but the following dependencies were not mapped: {}".
|
| 138 |
+
format(clsname, all_deps))
|
| 139 |
+
|
| 140 |
+
attrs["finalization_callback"] = make_finalization_callback(all_names)
|
| 141 |
+
|
| 142 |
+
new_obj = super().__new__(cls, clsname, bases, attrs)
|
| 143 |
+
|
| 144 |
+
setattr(new_obj, "_n_params", len(all_names))
|
| 145 |
+
setattr(new_obj, "_annotation_attrs", all_names)
|
| 146 |
+
|
| 147 |
+
return new_obj
|
| 148 |
+
|
| 149 |
+
def __call__(cls, *args, **kwargs):
|
| 150 |
+
instance = cls.__new__(cls, *args, **kwargs)
|
| 151 |
+
instance.__init__(*args, **kwargs)
|
| 152 |
+
|
| 153 |
+
for name, annotation in instance.__annotations__.items():
|
| 154 |
+
if issubclass(annotation, ParameterBase):
|
| 155 |
+
# TODO(cmikeh2): Do we want to make this a property
|
| 156 |
+
# It might also make sense to do this in the base class __init__
|
| 157 |
+
# but since it is tied with the changes made in __new__ it feels
|
| 158 |
+
# to me like it should be here.
|
| 159 |
+
setattr(instance, name, annotation(instance.inference_model, instance))
|
| 160 |
+
|
| 161 |
+
return instance
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class LayerContainer(metaclass=LayerMetaclass):
|
| 165 |
+
"""
|
| 166 |
+
Abstract base class for containing model parameters.
|
| 167 |
+
|
| 168 |
+
This is primarily a guidance abstraction since we do not put any restrictions
|
| 169 |
+
on how the parameters are stored.
|
| 170 |
+
|
| 171 |
+
To use this class, annotate the class with `ParameterBase` subclasses and give them
|
| 172 |
+
names. As a checkpoint is loaded into this container, the `ParameterBase` instances
|
| 173 |
+
will be replaced with realized Tensors as soon as each of their dependencies are met.
|
| 174 |
+
|
| 175 |
+
To enable automatic mapping, add a static attribute `PARAM_MAPPING` to the class
|
| 176 |
+
definition. This should be a dictionary mapping from a source string to one or
|
| 177 |
+
more dependencies.
|
| 178 |
+
|
| 179 |
+
```python
|
| 180 |
+
class MyLayer(LayerContainer):
|
| 181 |
+
PARAM_MAPPING = {
|
| 182 |
+
"path.to.param.dependency", "container_param_1.dependency",
|
| 183 |
+
"path.to.param2.dependency", "container_param_2.dependency",
|
| 184 |
+
"path.to.param3.*.dependency", "container_param_3.list_dependency"
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
...
|
| 188 |
+
```
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
def __init__(self, model: InferenceModel) -> None:
|
| 192 |
+
"""
|
| 193 |
+
Initialization of the LayerContainer. This method does not need to be overridden
|
| 194 |
+
for any children classes.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
model (InferenceModel): Inference model that will be used to shard and transform
|
| 198 |
+
parameters correctly, as well as provide specific information about the model
|
| 199 |
+
for `ParameterizedList`s that may be part of one of the member `ParameterBase`s.
|
| 200 |
+
"""
|
| 201 |
+
self.inference_model = model
|
| 202 |
+
self._finalized_params = 0
|
| 203 |
+
|
| 204 |
+
def _initialization_checker(self, check_device: bool = True) -> bool:
|
| 205 |
+
"""
|
| 206 |
+
Returns whether or not all parameters have been initialized and transformed by
|
| 207 |
+
the model. Once this returns True, all the `ParameterBase` instances will be
|
| 208 |
+
torch.Tensors.
|
| 209 |
+
"""
|
| 210 |
+
if self._finalized_params != self.n_params:
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
for name in self._annotation_attrs:
|
| 214 |
+
tensor = getattr(self, name)
|
| 215 |
+
if tensor is None:
|
| 216 |
+
continue
|
| 217 |
+
elif not isinstance(tensor, InferenceParameter):
|
| 218 |
+
raise ValueError("Layer should be finalized, but {} ({}) is neither InferenceParameter or None".format(
|
| 219 |
+
name, type(tensor)))
|
| 220 |
+
elif check_device and tensor.device != torch.device(get_accelerator().current_device()):
|
| 221 |
+
raise RuntimeError("Layer should be finalized, but {} is not on device {}".format(
|
| 222 |
+
name,
|
| 223 |
+
get_accelerator().current_device()))
|
| 224 |
+
return True
|
| 225 |
+
|
| 226 |
+
@property
|
| 227 |
+
def is_populated(self) -> bool:
|
| 228 |
+
"""
|
| 229 |
+
Returns whether or not all parameters have been populated by the checkpoint engine, but
|
| 230 |
+
does not validat the parameters are on the correct device.
|
| 231 |
+
"""
|
| 232 |
+
return self._initialization_checker(check_device=False)
|
| 233 |
+
|
| 234 |
+
@property
|
| 235 |
+
def is_initialized(self) -> bool:
|
| 236 |
+
"""
|
| 237 |
+
Returns whether or not all parameters have been initialized and transformed by
|
| 238 |
+
the model and are located on the appropriate device. Once this returns True, all
|
| 239 |
+
the `ParameterBase` instances ``InferenceParameter``s or explicitly set to ``None``.
|
| 240 |
+
"""
|
| 241 |
+
return self._initialization_checker()
|
| 242 |
+
|
| 243 |
+
@property
|
| 244 |
+
def n_params(self) -> int:
|
| 245 |
+
"""
|
| 246 |
+
The number of parameters this container holds. This is a read-only value
|
| 247 |
+
that is set by the metaclass.
|
| 248 |
+
"""
|
| 249 |
+
return self._n_params
|
| 250 |
+
|
| 251 |
+
@property
|
| 252 |
+
def annotation_attrs(self) -> list:
|
| 253 |
+
return self._annotation_attrs
|
| 254 |
+
|
| 255 |
+
@property
|
| 256 |
+
def mapping_params(self) -> dict:
|
| 257 |
+
return getattr(self.__class__, MAPPING_KEY, {})
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def plist_helpers(self) -> list:
|
| 261 |
+
return getattr(self.__class__, PLIST_HELPERS, [])
|
| 262 |
+
|
| 263 |
+
def direct_injection(self, name: str, tensor: InferenceParameter) -> None:
|
| 264 |
+
|
| 265 |
+
if name not in self._annotation_attrs:
|
| 266 |
+
raise ValueError(f"Cannot directly inject {name}, not a valid parameter.")
|
| 267 |
+
|
| 268 |
+
setattr(self, name, tensor)
|
| 269 |
+
self._finalized_params += 1
|
| 270 |
+
|
| 271 |
+
def set_dependency(self, dep_name: str, dep_value: torch.Tensor) -> None:
|
| 272 |
+
"""
|
| 273 |
+
Set dependency can be used for managing dependencies when a mapping is provided
|
| 274 |
+
in the class definition for the layer. The dep_name here should have any prefix
|
| 275 |
+
for transformer layers removed (such as model.layers.*.attn.qkv.weight -> attn.qkv.weight).
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
dep_name (str): The name of the dependency to set.
|
| 279 |
+
dep_value (torch.Tensor): The value to set the dependency to.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
def get_dep_name_target(dep_name: str) -> str:
|
| 283 |
+
"""
|
| 284 |
+
Helper method for getting the target name for a dependency from the
|
| 285 |
+
mapping params. Tries to match exact string first, then looks for
|
| 286 |
+
wildcards and attempts regex matching. Will return empty string if
|
| 287 |
+
no match found.
|
| 288 |
+
"""
|
| 289 |
+
if dep_name in self.mapping_params:
|
| 290 |
+
# If we have an exact match, it's a direct mapping and we can
|
| 291 |
+
# immediately set the value.
|
| 292 |
+
return self.mapping_params[dep_name]
|
| 293 |
+
|
| 294 |
+
matched_targets = []
|
| 295 |
+
for key, target in self.mapping_params.items():
|
| 296 |
+
regex_key = key.replace("*", ".*")
|
| 297 |
+
if re.match(regex_key, dep_name):
|
| 298 |
+
matched_targets.append(target)
|
| 299 |
+
if len(matched_targets) > 1:
|
| 300 |
+
raise ValueError(f"Multiple targets matched for dependency {dep_name}: {matched_targets}")
|
| 301 |
+
if matched_targets:
|
| 302 |
+
return matched_targets[0]
|
| 303 |
+
return ""
|
| 304 |
+
|
| 305 |
+
if dep_name in self.mapping_params:
|
| 306 |
+
# If we have an exact match, it's a direct mapping and we can immediately set
|
| 307 |
+
# the value.
|
| 308 |
+
target = self.mapping_params[dep_name]
|
| 309 |
+
|
| 310 |
+
# Convert single targets to a list for consistency
|
| 311 |
+
if isinstance(target, str):
|
| 312 |
+
target = [target]
|
| 313 |
+
|
| 314 |
+
for target_name in target:
|
| 315 |
+
# Double setting doesn't set the attribute correctly, so we do a getattr then setattr
|
| 316 |
+
target_param_name, target_dependency_name = target_name.split(".")
|
| 317 |
+
target_param = getattr(self, target_param_name)
|
| 318 |
+
setattr(target_param, target_dependency_name, dep_value)
|
| 319 |
+
return
|
| 320 |
+
|
| 321 |
+
# Otherwise we need to map to one of the parameter lists.
|
| 322 |
+
for prefix, suffix, dests in self.plist_helpers:
|
| 323 |
+
if dep_name.startswith(prefix) and dep_name.endswith(suffix):
|
| 324 |
+
# We have a match, so we can set the value.
|
| 325 |
+
target_idx = int(dep_name[len(prefix):-len(suffix)])
|
| 326 |
+
|
| 327 |
+
# Convert single targets to a list for consistency
|
| 328 |
+
if isinstance(dests, str):
|
| 329 |
+
dests = [dests]
|
| 330 |
+
|
| 331 |
+
for dest in dests:
|
| 332 |
+
target_param_name, target_dependency_name = dest.split(".")
|
| 333 |
+
target_param = getattr(self, target_param_name)
|
| 334 |
+
target_dependency = getattr(target_param, target_dependency_name)
|
| 335 |
+
target_dependency[target_idx] = dep_value
|
| 336 |
+
return
|
| 337 |
+
|
| 338 |
+
# TODO: Refactor this with the help of cmikeh2
|
| 339 |
+
# We should be able to combine this with the wildcard matching above.
|
| 340 |
+
target = get_dep_name_target(dep_name)
|
| 341 |
+
if target:
|
| 342 |
+
# Convert single targets to a list for consistency
|
| 343 |
+
if isinstance(target, str):
|
| 344 |
+
target = [target]
|
| 345 |
+
|
| 346 |
+
for target_name in target:
|
| 347 |
+
# Double setting doesn't set the attribute correctly, so we do a getattr then setattr
|
| 348 |
+
target_param_name, target_dependency_name = target_name.split(".")
|
| 349 |
+
target_param = getattr(self, target_param_name)
|
| 350 |
+
setattr(target_param, target_dependency_name, dep_value)
|
| 351 |
+
return
|
| 352 |
+
|
| 353 |
+
raise ValueError(
|
| 354 |
+
"Could not find a mapping for dependency \"{}\". Check that it is included in the ``MAPPING_PARAMS``. See docstring for more on ``MAPPING_PARAMS``"
|
| 355 |
+
.format(dep_name))
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .policy import MistralPolicy
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (248 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc
ADDED
|
Binary file (1.85 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc
ADDED
|
Binary file (6.84 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc
ADDED
|
Binary file (1.55 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Iterable, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
import deepspeed.comm as dist
|
| 11 |
+
|
| 12 |
+
from ...allocator import empty_from
|
| 13 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
| 14 |
+
from ...model_implementations import *
|
| 15 |
+
from ...modules.configs import *
|
| 16 |
+
from ...modules.interfaces import *
|
| 17 |
+
from ...ragged import RaggedBatchWrapper
|
| 18 |
+
|
| 19 |
+
from .container import MistralNonTransformerContainer, MistralTransformerContainer
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MistralInferenceModel(DSTransformerModelBase):
|
| 23 |
+
"""
|
| 24 |
+
Inference model implementation for ragged batching for Mistral models.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
_non_transformer: Optional[MistralNonTransformerContainer]
|
| 28 |
+
"""
|
| 29 |
+
Embed + unembed container. Specializing the type annotation.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
_transformer: Optional[Iterable[MistralTransformerContainer]]
|
| 33 |
+
"""
|
| 34 |
+
Per-layer transformer container. Specializing the type annotation.
|
| 35 |
+
"""
|
| 36 |
+
"""
|
| 37 |
+
Properties ineherited from `DSInferenceModelBase`
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def max_sequence_length(self) -> int:
|
| 42 |
+
return self._config.max_seq_length
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
Properties ineherited from `DSTransformerModelBase`
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def num_layers(self) -> int:
|
| 50 |
+
return self._config.num_hidden_layers
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def model_dim(self) -> int:
|
| 54 |
+
return self._config.hidden_size
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def vocab_size(self) -> int:
|
| 58 |
+
return self._config.vocab_size
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def head_size(self) -> int:
|
| 62 |
+
return self.model_dim // self.n_heads
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def n_heads(self) -> int:
|
| 66 |
+
return self._config.num_attention_heads
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def intermediate_dim(self) -> int:
|
| 70 |
+
return self._config.intermediate_size
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def n_heads_kv(self) -> int:
|
| 74 |
+
return self._config.num_key_value_heads
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def activation_dtype(self) -> DtypeEnum:
|
| 78 |
+
if self._config.torch_dtype == torch.float16:
|
| 79 |
+
return DtypeEnum.fp16
|
| 80 |
+
elif self._config.torch_dtype == torch.bfloat16:
|
| 81 |
+
return DtypeEnum.bf16
|
| 82 |
+
else:
|
| 83 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def mlp_activation_fn(self) -> ActivationType:
|
| 87 |
+
activation = self._config.hidden_act.lower()
|
| 88 |
+
if activation == "gelu":
|
| 89 |
+
return ActivationType.GEGLU
|
| 90 |
+
elif activation == "relu":
|
| 91 |
+
return ActivationType.ReGLU
|
| 92 |
+
elif activation == "gegelu":
|
| 93 |
+
return ActivationType.GEGLU
|
| 94 |
+
elif activation == "silu":
|
| 95 |
+
return ActivationType.SiGLU
|
| 96 |
+
else:
|
| 97 |
+
raise NotImplementedError(f"Activation {activation} not supported")
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def norm_type(self) -> NormTypeEnum:
|
| 101 |
+
return NormTypeEnum.RMSNorm
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
| 105 |
+
return PositionalEmbeddingType.rotate_half
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
| 109 |
+
return RotateHalfConfig(theta_base=self._config.rope_theta)
|
| 110 |
+
|
| 111 |
+
"""
|
| 112 |
+
Forward implementations
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
| 116 |
+
"""
|
| 117 |
+
Performs the embedding lookup prior to running the transformer of the model.
|
| 118 |
+
|
| 119 |
+
Arguments:
|
| 120 |
+
ragged_batch (RaggedBatchWrapper): The batch to embed.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
torch.Tensor: The embedded batch.
|
| 124 |
+
"""
|
| 125 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
|
| 126 |
+
|
| 127 |
+
if embed.shape[-1] != self.model_dim:
|
| 128 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
| 129 |
+
|
| 130 |
+
return embed
|
| 131 |
+
|
| 132 |
+
def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
| 133 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 134 |
+
"""
|
| 135 |
+
Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
|
| 136 |
+
optimization to fuse the layer norm of the next layer into the current layer.
|
| 137 |
+
|
| 138 |
+
Arguments:
|
| 139 |
+
layer_idx (int): The index of the layer to execute.
|
| 140 |
+
residual (torch.Tensor): The residual tensor from the previous layer.
|
| 141 |
+
hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
|
| 142 |
+
hidden states after pre normalization.
|
| 143 |
+
ragged_batch_info (RaggedBatchWrapper): The batch metadata.
|
| 144 |
+
"""
|
| 145 |
+
# TODO(cmikeh2): Distribute ragged_batch_info to all modules
|
| 146 |
+
|
| 147 |
+
cur_params = self._transformer[layer_idx]
|
| 148 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
| 149 |
+
|
| 150 |
+
hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None)
|
| 151 |
+
hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
|
| 152 |
+
hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
|
| 153 |
+
|
| 154 |
+
if self.tp_size > 1:
|
| 155 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
| 156 |
+
|
| 157 |
+
residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
|
| 158 |
+
|
| 159 |
+
# Should be configurable in the future
|
| 160 |
+
hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
|
| 161 |
+
hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
|
| 162 |
+
|
| 163 |
+
if self.tp_size > 1:
|
| 164 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
| 165 |
+
|
| 166 |
+
if layer_idx != self.num_layers - 1:
|
| 167 |
+
next_params = self._transformer[layer_idx + 1]
|
| 168 |
+
residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
|
| 169 |
+
else:
|
| 170 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
| 171 |
+
# here is safe.
|
| 172 |
+
residual.add_(hidden_states)
|
| 173 |
+
|
| 174 |
+
return residual, hidden_states
|
| 175 |
+
|
| 176 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
| 177 |
+
"""
|
| 178 |
+
Performs unembedding of the hidden states to logits. This will only sample the final
|
| 179 |
+
token of each sequence.
|
| 180 |
+
"""
|
| 181 |
+
logits = self.unembed(hidden_states,
|
| 182 |
+
self._non_transformer.word_unembed,
|
| 183 |
+
ragged_batch_info,
|
| 184 |
+
gamma=self._non_transformer.final_norm)
|
| 185 |
+
|
| 186 |
+
if self.tp_size > 1:
|
| 187 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
| 188 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
| 189 |
+
|
| 190 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
| 191 |
+
|
| 192 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
| 193 |
+
|
| 194 |
+
return full_logits
|
| 195 |
+
else:
|
| 196 |
+
return logits
|
| 197 |
+
|
| 198 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
| 199 |
+
|
| 200 |
+
residual = self._forward_embed(wrapped_batch)
|
| 201 |
+
|
| 202 |
+
residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
|
| 203 |
+
|
| 204 |
+
for layer_idx in range(self.num_layers):
|
| 205 |
+
residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch)
|
| 206 |
+
|
| 207 |
+
return self._forward_unembed(residual, wrapped_batch)
|