ZTWHHH commited on
Commit
0b76046
·
verified ·
1 Parent(s): ceeaaea

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc +0 -0
  2. parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py +43 -0
  4. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py +268 -0
  5. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py +26 -0
  6. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py +19 -0
  7. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py +272 -0
  8. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py +77 -0
  9. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py +255 -0
  10. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py +54 -0
  11. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py +13 -0
  32. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py +103 -0
  42. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py +86 -0
  43. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py +25 -0
  44. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py +25 -0
  45. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py +25 -0
  46. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py +74 -0
  47. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py +307 -0
  48. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py +33 -0
  49. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py +9 -0
  50. parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc +0 -0
parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (387 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Optional
7
+ from deepspeed.pydantic_v1 import Field
8
+ from deepspeed.runtime.config_utils import DeepSpeedConfigModel
9
+ from .ragged import DSStateManagerConfig
10
+
11
+
12
+ class DeepSpeedTPConfig(DeepSpeedConfigModel):
13
+ """ Configure tensor parallelism settings """
14
+
15
+ tp_size: int = 1
16
+ """ Number of devices to split the model across using tensor parallelism. """
17
+
18
+
19
+ class QuantizationConfig(DeepSpeedConfigModel):
20
+ """ Configure tensor parallelism settings """
21
+
22
+ quantization_mode: Optional[str] = None
23
+ """ The quantization mode in string format. The supported modes are as follows:
24
+ - 'wf6af16', weight-only quantization with FP6 weight and FP16 activation.
25
+ """
26
+ # TODO: may reuse the constants in deepspeed/compression/constants.py
27
+
28
+
29
+ class RaggedInferenceEngineConfig(DeepSpeedConfigModel):
30
+ """ Sets parameters for DeepSpeed Inference Engine. """
31
+
32
+ tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp")
33
+ """
34
+ Configuration for tensor parallelism used to split the model across several
35
+ GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`.
36
+ """
37
+
38
+ state_manager: DSStateManagerConfig = Field({}, alias="manager")
39
+ """
40
+ Configuration for managing persistent state
41
+ """
42
+
43
+ quantization: QuantizationConfig = {}
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import json
8
+ import pickle
9
+ from typing import Iterable, Tuple
10
+
11
+ import torch
12
+
13
+ import deepspeed.comm as dist
14
+
15
+ from deepspeed.accelerator import get_accelerator
16
+ from deepspeed.comm.comm import init_distributed
17
+
18
+ from .model_implementations import InferenceV2Policy
19
+ from .logging import inference_logger
20
+ from .ragged import DSStateManager, RaggedBatchWrapper, PlaceholderSequenceDescriptor
21
+ from .scheduling_utils import SchedulingError, SchedulingResult
22
+ from .model_implementations.flat_model_helpers import make_param_filename, make_metadata_filename
23
+ from .model_implementations.inference_model_base import DSInferenceModelBase
24
+
25
+ from .config_v2 import RaggedInferenceEngineConfig
26
+
27
+ INFERENCE_MODEL_TIMER = "model-forward-inference"
28
+
29
+
30
+ class InferenceEngineV2:
31
+
32
+ _config: RaggedInferenceEngineConfig
33
+ """
34
+ Configuration of the inference engine.
35
+ """
36
+
37
+ _model: DSInferenceModelBase
38
+ """
39
+ Inference model supporting ragged inference.
40
+ """
41
+
42
+ _state_manager: DSStateManager
43
+ """
44
+ Persistent state manager for sequences and KV-cache.
45
+ """
46
+
47
+ @property
48
+ def free_blocks(self) -> torch.Tensor:
49
+ """
50
+ Number of free KV blocks. This is a tensor of shape [n_kv_cache_groups] where each
51
+ element is the number of free blocks in the corresponding KV cache group.
52
+ """
53
+ return self._state_manager.free_blocks
54
+
55
+ @property
56
+ def n_kv_cache_groups(self) -> int:
57
+ """
58
+ Number of KV cache groups.
59
+ """
60
+ return self._state_manager.n_kv_cache_groups
61
+
62
+ def model(self) -> DSInferenceModelBase:
63
+ """
64
+ The model implementation.
65
+ """
66
+ return self._model
67
+
68
+ def __init__(self, policy: InferenceV2Policy, engine_config: RaggedInferenceEngineConfig) -> None:
69
+ """
70
+ Create the Inference V2 engine.
71
+
72
+ Arguments:
73
+ policy (InferenceV2Policy): Policy for the model implementation. This policy object
74
+ will be used to build the model and load the checkpoint associated with it.
75
+ engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine.
76
+ """
77
+ self._config = engine_config
78
+ self._policy = policy
79
+ self._base_mp_group = self._initialize_tp_group()
80
+
81
+ # Build model from policy
82
+ inference_logger().info("Building model...")
83
+ self._model = self._policy.build_model(self._config, self._base_mp_group)
84
+ inference_logger().info("Model built.")
85
+
86
+ # Create state manager
87
+ self._batch = RaggedBatchWrapper(self._config.state_manager)
88
+ self._state_manager = DSStateManager(self._config.state_manager,
89
+ self._model.kv_cache_config(),
90
+ base_mp_group=self._base_mp_group)
91
+ self._model.set_state_manager(self._state_manager)
92
+
93
+ def _initialize_tp_group(self):
94
+ """
95
+ Implementation of our TP group initialization.
96
+ """
97
+ init_distributed()
98
+ local_rank = int(os.getenv("LOCAL_RANK", 0))
99
+ get_accelerator().set_device(local_rank)
100
+
101
+ if local_rank >= self._config.tensor_parallel.tp_size:
102
+ raise RuntimeError("Local rank is greater than TP size, ensure that the TP config is correct.")
103
+
104
+ ranks = list(range(self._config.tensor_parallel.tp_size))
105
+ return dist.new_group(ranks=ranks)
106
+
107
+ def put(self,
108
+ batch_uids: Iterable[int],
109
+ batch_tokens: Iterable[torch.Tensor],
110
+ do_checks: bool = True) -> torch.Tensor:
111
+ """
112
+ Put a ragged batch onto the inference engine. This will perform one forward and return
113
+ a Tensor of the shape [len(batch_uids), *output_shape]. Logits for the non-final tokens
114
+ are not calculated.
115
+
116
+ Arguments:
117
+ batch_uids: Iterable of uids for the batch on the host
118
+ batch_tokens: Iterable of token tensors for the batch on the host
119
+ do_checks: Check schedulability when it is set to True. You can skip this check for better performance when it has already been completed.
120
+ """
121
+
122
+ if do_checks:
123
+ token_lens = [len(tokens) for tokens in batch_tokens]
124
+ schedule_check = self.can_schedule(batch_uids, token_lens)
125
+ if schedule_check != SchedulingResult.Success:
126
+ raise SchedulingError(schedule_check)
127
+
128
+ self._batch.clear()
129
+ for uid, tokens in zip(batch_uids, batch_tokens):
130
+
131
+ host_seq_desc = self._state_manager.get_or_create_sequence(uid)
132
+ self._model.maybe_allocate_kv(host_seq_desc, tokens.numel())
133
+ host_seq_desc.pre_forward(tokens.numel())
134
+
135
+ # We can disable checks since we already validated schedulability.
136
+ self._batch.insert_sequence(host_seq_desc, tokens, do_checks=do_checks)
137
+
138
+ # Send all metadata to the device
139
+ self._batch.finalize()
140
+
141
+ # Prep all data structures for the actual forward (in anticipation of CG in the future)
142
+ # and also to amortize some of the costs in a more straightforward way.
143
+ self._model.prepare_batch(self._batch)
144
+
145
+ # Model implementation will pick up in the forward.
146
+ logits = self._model.forward(self._batch)
147
+
148
+ # We return one set of logits per sequence in the batch (saves cost on unembedding)
149
+ assert logits.shape[0] == self._batch.current_sequences
150
+
151
+ for uid in batch_uids:
152
+ host_seq_desc = self._state_manager.get_sequence(uid)
153
+ host_seq_desc.post_forward() # Updates sequence metadata.
154
+ self._model.maybe_free_kv(host_seq_desc)
155
+
156
+ return logits
157
+
158
+ def query(self, uid: int, max_request_tokens: int, max_request_blocks) -> Tuple[int, torch.Tensor]:
159
+ """
160
+ Determine the number of tokens and KV blocks to reserve for a given request. Given a UID
161
+ (this UID may not be recognized by the model yet), this will return the number of tokens
162
+ and blocks to reserve for the request.
163
+
164
+ Arguments:
165
+ uid (int): The UID of the sequence (as tracked by the scheduling entity). If
166
+ this is a new sequence (with a UID unknown to the inference engine), then
167
+ an empty placeholder is created to pass to the occupancy logic.
168
+ n_tokens (int): The number of tokens to hypothetically send.
169
+
170
+ Returns:
171
+ Tuple[int, Optional[int]]: Tuple of free kv blocks and the number of blocks
172
+ required to schedule the sequence.
173
+ """
174
+ seq_desc = self._state_manager.get_sequence(uid)
175
+ if seq_desc is None:
176
+ if (self._state_manager.n_tracked_sequences == self._config.state_manager.max_tracked_sequences):
177
+ return (0, 0)
178
+ seq_desc = PlaceholderSequenceDescriptor()
179
+
180
+ req_tokens, req_blocks = self._model.get_kv_requirements(seq_desc, max_request_tokens, max_request_blocks)
181
+
182
+ return (req_tokens, req_blocks)
183
+
184
+ def can_schedule(self, uids: Iterable[int], lengths: Iterable[int]) -> SchedulingResult:
185
+ """
186
+ Dry run a batch to determine if it can be scheduled. Placeholder sequences will be
187
+ created for any UIDs that are unknown to the inference engine.
188
+
189
+ Arguments:
190
+ uids (Iterable[int]): Iterable of UIDs for the batch
191
+ lengths (Iterable[int]): Iterable of lengths for each sequence of the batch. This lengths
192
+ corresponds to the number of tokens to send in the hypothetical forward; history
193
+ tokens will be determined via UID lookup and future tokens are disregarded.
194
+
195
+ Returns:
196
+ bool: True if the batch can be scheduled, False otherwise.
197
+ """
198
+
199
+ cur_seqs = self._state_manager.n_tracked_sequences
200
+ free_blocks = self._state_manager.free_blocks
201
+ req_blocks = 0
202
+ batch_len = 0
203
+
204
+ if len(uids) > self._config.state_manager.max_ragged_sequence_count:
205
+ # Can only compose a batch from a limited number of sequences
206
+ return SchedulingResult.BatchSequenceLimitExceeded
207
+
208
+ for uid, length in zip(uids, lengths):
209
+ seq_desc = self._state_manager.get_sequence(uid)
210
+ if seq_desc is None:
211
+ cur_seqs += 1
212
+ seq_desc = PlaceholderSequenceDescriptor()
213
+
214
+ sched_len, sched_blocks = self._model.get_kv_requirements(seq_desc, length, free_blocks)
215
+
216
+ if sched_len != length:
217
+ # We ran out of KV cache
218
+ return SchedulingResult.KVCacheLimitExceeded
219
+
220
+ batch_len += length
221
+ free_blocks -= sched_blocks
222
+
223
+ if cur_seqs > self._config.state_manager.max_tracked_sequences:
224
+ # Would run out of tracking metadata
225
+ return SchedulingResult.EngineSequenceLimitExceeded
226
+
227
+ if batch_len > self._config.state_manager.max_ragged_batch_size:
228
+ # Would exceed the maximum batch size
229
+ return SchedulingResult.BatchTokenLimitExceeded
230
+
231
+ return SchedulingResult.Success
232
+
233
+ def get_remaining_block_capacity(self, uid: int) -> int:
234
+ """
235
+ Get the remaining capacity of the last block already allocated.
236
+ """
237
+ seq_desc = self._state_manager.get_sequence(uid)
238
+ if seq_desc is None:
239
+ return 0
240
+ return self._model.get_remaining_block_capacity(seq_desc)
241
+
242
+ def flush(self, uid: int) -> None:
243
+ """
244
+ Remove all state associated with a sequence from the inference engine.
245
+
246
+ Arguments:
247
+ uid (int): The UID of the sequence to flush.
248
+ """
249
+ self._state_manager.flush_sequence(uid)
250
+
251
+ def serialize(self, save_path: str) -> None:
252
+ """
253
+ Serialize the model to a file.
254
+
255
+ Arguments:
256
+ path (str): Path to the file to serialize to.
257
+ """
258
+ param_file_name = make_param_filename(save_path, self._model.tp_rank, self._model.tp_size)
259
+ metadata_file_name = make_metadata_filename(save_path, self._model.tp_rank, self._model.tp_size)
260
+
261
+ # Save the flattened parameters
262
+
263
+ torch.save(self._model.flattened_params, param_file_name)
264
+
265
+ json.dump(self._model.flattened_param_metadata.json(), open(metadata_file_name, "w"))
266
+
267
+ if self._model.tp_rank == 0:
268
+ pickle.dump(self._model._config, open(os.path.join(save_path, "ds_model_config.pkl"), "wb"))
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import logging
7
+
8
+ from deepspeed.utils.logging import LoggerFactory
9
+
10
+ inf_logger = None
11
+
12
+
13
+ def inference_logger(level: int = logging.INFO) -> logging.Logger:
14
+ """
15
+ Create the inference logger. NOTE: Logging is not cost free. On a 3960X,
16
+ there is a cost of about 6 us per call to a no-op logger, so this should
17
+ be used during setup only and not during the inference loop.
18
+
19
+ Args:
20
+ level (int, optional): The logging level. Defaults to logging.INFO.
21
+ """
22
+ global inf_logger
23
+ if inf_logger is None:
24
+ inf_logger = LoggerFactory.create_logger(name="DS-Inference", level=level)
25
+ inf_logger.debug("Inference logger created.")
26
+ return inf_logger
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .inference_model_base import DSInferenceModelBase
7
+ from .inference_transformer_base import DSTransformerModelBase, DSMoETransformerModelBase
8
+ from .inference_policy_base import InferenceV2Policy, ContainerMap
9
+ from .sharding import *
10
+
11
+ # Model Implementations
12
+ from .llama_v2 import *
13
+ from .opt import *
14
+ from .mistral import *
15
+ from .mixtral import *
16
+ from .falcon import *
17
+ from .phi import *
18
+ from .qwen import *
19
+ from .qwen_v2 import *
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+ from typing import Iterable, Optional, Tuple, Type
8
+
9
+ import torch
10
+
11
+ import deepspeed.comm as dist
12
+ from ..ragged import DSStateManager, RaggedBatchWrapper
13
+ from ..ragged.manager_configs import KVCacheConfig
14
+ from ..ragged import DSSequenceDescriptor
15
+ from ..model_implementations.layer_container_base import LayerContainer
16
+ from ..config_v2 import RaggedInferenceEngineConfig
17
+ from .flat_model_helpers import ModelMetadata
18
+
19
+ try:
20
+ from functools import cached_property
21
+ except ImportError:
22
+
23
+ def cached_property(func):
24
+ return property(func)
25
+
26
+
27
+ """
28
+ This abstract class defines the interfaces that a model implementation should implement
29
+ in order to include anything that may be called by the engine. Most models should be able
30
+ to inherit from `DSInferenceTransformerModelBase` to reduce implementation work so it is recommended
31
+ to begin there.
32
+ """
33
+ """
34
+ Placeholder for typing the model config, which can vary based on model implementation/
35
+ """
36
+ DSModelImplementationConfig = Type['DSModelImplementationConfig']
37
+ """
38
+ Placeholder for typing the distributed comm object.
39
+
40
+ TODO(cmikeh2): Replace when we have a more defined API for the inference communication system.
41
+ """
42
+ MPType = Type["MPType"]
43
+
44
+
45
+ class DSInferenceModelBase(torch.nn.Module, ABC):
46
+ """
47
+ Implementation of a model for inference composable with ragged batching.
48
+ """
49
+
50
+ _config: DSModelImplementationConfig
51
+ """
52
+ Model-specific configuration. No abstraction surrounds this yet.
53
+ """
54
+
55
+ _engine_config: RaggedInferenceEngineConfig
56
+ """
57
+ Engine configuration.
58
+ """
59
+
60
+ _base_mp_group: MPType
61
+ """
62
+ Base communication group for Tensor-parallel inference.
63
+ """
64
+
65
+ _non_transformer: Optional[LayerContainer]
66
+ """
67
+ Abstract container for storing both embedding (pre-transformer) and unembedding (post-transformer)
68
+ parameters. This attribute should be None at model instantiation until the Policy sets
69
+ the model parameters. These parameters are grouped together since many model implementations
70
+ will tie the embedding and unembedding parameters together.
71
+ """
72
+
73
+ _transformer: Optional[Iterable[LayerContainer]]
74
+ """
75
+ List of abstract containers (1 per layer) for storing transformer (transformer)
76
+ parameters. This attribute should be None at model instantiation until the Policy
77
+ sets the model parameters.
78
+ """
79
+
80
+ state_manager: Optional[DSStateManager]
81
+ """
82
+ Since the state manager is lazy initialized, by the engine, it is not guaranteed to be present
83
+ until full initialization.
84
+ """
85
+
86
+ def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
87
+ base_mp_group: MPType) -> None:
88
+ """
89
+ Minimal initialization of the model.
90
+
91
+ Arguments:
92
+ config (DSModelImplementationConfig): Model-specific configuration. No assumptions
93
+ should be made about this config that are not closely tied to the specific
94
+ model implementation.
95
+ engine_config (RaggedInferenceEngineConfig): Engine configuration.
96
+ base_mp_group (MPType): Base communication group for Tensor-parallel inference.
97
+ """
98
+ super().__init__()
99
+ self._config = config
100
+ self._engine_config = engine_config
101
+ self._base_mp_group = base_mp_group
102
+
103
+ # Set to None until the Policy sets the model parameters
104
+ self._non_transformer = None
105
+ self._transformer = None
106
+ self._flattened_param_buffer = None
107
+ self._flattened_param_metadata = None
108
+
109
+ @property
110
+ def config(self) -> DSModelImplementationConfig:
111
+ """
112
+ The model config.
113
+ """
114
+ return self._config
115
+
116
+ def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer,
117
+ flattened_param_buffer: torch.Tensor, flattened_param_metadata: ModelMetadata):
118
+ """
119
+ Set the model parameters for the embedding, transformer, and unembedding containers.
120
+ """
121
+ self._transformer = transformer
122
+ self._non_transformer = non_transformer
123
+ self._flattened_param_buffer = flattened_param_buffer
124
+ self._flattened_param_metadata = flattened_param_metadata
125
+
126
+ def set_state_manager(self, state_manager: DSStateManager):
127
+ """
128
+ Sets the state manager attribute. This is called by the inference engine after
129
+ the model is fully initialized.
130
+ """
131
+ self.state_manager = state_manager
132
+
133
+ @cached_property
134
+ def tp_rank(self) -> int:
135
+ """
136
+ The rank of the current process.
137
+
138
+ # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at
139
+ the frequency we need.
140
+ """
141
+ return dist.get_rank(group=self._base_mp_group)
142
+
143
+ @cached_property
144
+ def tp_size(self) -> int:
145
+ """
146
+ The total number of processes.
147
+
148
+ # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at
149
+ the frequency we need.
150
+ """
151
+ return dist.get_world_size(group=self._base_mp_group)
152
+
153
+ @property
154
+ def model_config(self):
155
+ """
156
+ The model config.
157
+ """
158
+ return self._config
159
+
160
+ @property
161
+ def engine_config(self):
162
+ """
163
+ The engine config.
164
+ """
165
+ return self._engine_config
166
+
167
+ @property
168
+ def flattened_params(self) -> Optional[torch.Tensor]:
169
+ """
170
+ The flattened parameter buffer.
171
+ """
172
+ return self._flattened_param_buffer
173
+
174
+ @property
175
+ def flattened_param_metadata(self) -> Optional[ModelMetadata]:
176
+ """
177
+ The flattened parameter metadata.
178
+ """
179
+ return self._flattened_param_metadata
180
+
181
+ @abstractmethod
182
+ def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int,
183
+ max_new_blocks: Tuple[int, ...]) -> Tuple[int, torch.Tensor]:
184
+ """
185
+ Given a sequence and the number of new tokens in the sequence, determine the
186
+ number of new KV blocks needed to support the sequence. This method is
187
+ used to help the engine provide schedulability APIs and can be used as a helper
188
+ for ``maybe_allocate_kv``.
189
+
190
+ Args:
191
+ sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage.
192
+ max_new_tokens (int): Maximum number of tokens to hypothetically schedule.
193
+ max_new_blocks (int): Maximum number of blocks to hypothetically allocate.
194
+
195
+ Returns:
196
+ Tuple[int, torch.Tensor]: The tuple of number of tokens scheduled and number
197
+ of blocks allocated (per KV cache). In general, only one of these numbers will
198
+ match the corresponding input argument, but this is not guaranteed.
199
+ """
200
+ raise NotImplementedError()
201
+
202
+ @abstractmethod
203
+ def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int:
204
+ raise NotImplementedError()
205
+
206
+ @abstractmethod
207
+ def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None:
208
+ """
209
+ Given a sequence and the number of new tokens in the sequence, determine
210
+ whether or not additional KV-storage is needed and allocate it if so.
211
+
212
+ Args:
213
+ sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage.
214
+ n_new_tokens (int): The number of new tokens in the sequence.
215
+ """
216
+ raise NotImplementedError()
217
+
218
+ @abstractmethod
219
+ def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]:
220
+ """
221
+ Return the KV-cache configuration for this model. This should be a tuple of one or more
222
+ KVCacheConfig objects (one for each distinct cache group).
223
+ """
224
+ raise NotImplementedError()
225
+
226
+ @property
227
+ @abstractmethod
228
+ def max_sequence_length(self) -> int:
229
+ """
230
+ The maximum sequence length supported by the model.
231
+ """
232
+ ...
233
+
234
+ def maybe_free_kv(self, sequence: DSSequenceDescriptor) -> None:
235
+ """
236
+ After completing a forward pass, determine whether or not the there are any KV blocks
237
+ that maybe freed since they are no longer in use.
238
+
239
+ Consider the following example:
240
+
241
+ We have a block size of 4 and a local window size of 8. At the beginning of the forward
242
+ pass there 10 tokens had been seen and the new forward has a size of 4. This would lend
243
+ itself to the following cache structure prior to the forward:
244
+ [[0, 1, 2*, 3*] [4*, 5*, 6*, 7*] [8*, 9*, x, x] [x x x x]]
245
+ Where x's denote empty cache locations and * denote values that are needed for attention
246
+ of the next open slot. After the forward, the cache would look like the following:
247
+ [[0, 1, 2, 3] [4, 5, 6*, 7*] [8*, 9*, 10*, 11*] [12* 13* x x]]
248
+ In this case, the first block is no longer needed since it is not needed for any future
249
+ local attention windows. This function would be responsible for freeing that block.
250
+
251
+ Default behavior assumes no local patterns that require freeing and in general should
252
+ be sufficient.
253
+ """
254
+ pass
255
+
256
+ @abstractmethod
257
+ def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None:
258
+ """
259
+ This will be called before each forward with the intent of building forward-specific metadata
260
+ about a batch. The intent here is to build data structures like attention atoms without necessarily
261
+ needing to implement graphable kernels to do so.
262
+
263
+ Abstract so as to force model implementations to opt out of doing anything here explicitly.
264
+ """
265
+ raise NotImplementedError()
266
+
267
+ def forward(wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
268
+ """
269
+ Complete a forward pass of the model. This interface should be graphable, so it
270
+ should not rely on the ability to use python control flow.
271
+ """
272
+ raise NotImplementedError()
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from deepspeed.inference.v2.model_implementations.common_parameters import *
9
+ from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer
10
+ '''
11
+ # HF Mistral model (mistralai/Mistral-7B-v0.1) looks like this:
12
+ MistralForCausalLM(
13
+ (model): MistralModel(
14
+ (embed_tokens): Embedding(32000, 4096)
15
+ (layers): ModuleList(
16
+ (0-31): 32 x MistralDecoderLayer(
17
+ (self_attn): MistralAttention(
18
+ (q_proj): Linear(in_features=4096, out_features=4096, bias=False)
19
+ (k_proj): Linear(in_features=4096, out_features=1024, bias=False)
20
+ (v_proj): Linear(in_features=4096, out_features=1024, bias=False)
21
+ (o_proj): Linear(in_features=4096, out_features=4096, bias=False)
22
+ (rotary_emb): MistralRotaryEmbedding()
23
+ )
24
+ (mlp): MistralMLP(
25
+ (gate_proj): Linear(in_features=4096, out_features=14336, bias=False)
26
+ (up_proj): Linear(in_features=4096, out_features=14336, bias=False)
27
+ (down_proj): Linear(in_features=14336, out_features=4096, bias=False)
28
+ (act_fn): SiLUActivation()
29
+ )
30
+ (input_layernorm): MistralRMSNorm()
31
+ (post_attention_layernorm): MistralRMSNorm()
32
+ )
33
+ )
34
+ (norm): MistralRMSNorm()
35
+ )
36
+ (lm_head): Linear(in_features=4096, out_features=32000, bias=False)
37
+ )
38
+ '''
39
+
40
+
41
+ class MistralTransformerContainer(LayerContainer):
42
+ """
43
+ Transformer layer container for the Mistral model.
44
+ """
45
+ qkv_w: UnfusedQKVParameter
46
+ attn_out_w: AttentionOutputParameter
47
+ mlp_1_w: GatedMLPParameter
48
+ mlp_2_w: MLP2Parameter
49
+ attn_norm_gamma: NormParameter
50
+ mlp_norm_gamma: NormParameter
51
+
52
+ PARAM_MAPPING = {
53
+ "self_attn.q_proj.weight": "qkv_w.q_params",
54
+ "self_attn.k_proj.weight": "qkv_w.k_params",
55
+ "self_attn.v_proj.weight": "qkv_w.v_params",
56
+ "self_attn.o_proj.weight": "attn_out_w.params",
57
+ "mlp.gate_proj.weight": "mlp_1_w.gate_params",
58
+ "mlp.up_proj.weight": "mlp_1_w.up_params",
59
+ "mlp.down_proj.weight": "mlp_2_w.params",
60
+ "input_layernorm.weight": "attn_norm_gamma.params",
61
+ "post_attention_layernorm.weight": "mlp_norm_gamma.params",
62
+ }
63
+
64
+
65
+ class MistralNonTransformerContainer(LayerContainer):
66
+ """
67
+ Non-Transformer layer container for the Mistral model.
68
+ """
69
+ word_emb: EmbeddingParameter
70
+ word_unembed: UnembedParameter
71
+ final_norm: NormParameter
72
+
73
+ PARAM_MAPPING = {
74
+ "model.embed_tokens.weight": "word_emb.params",
75
+ "model.norm.weight": "final_norm.params",
76
+ "lm_head.weight": "word_unembed.params",
77
+ }
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import weakref
7
+ from abc import abstractmethod
8
+ from typing import Type
9
+
10
+ import torch
11
+
12
+ # Currently have dependency loops for the type hints.
13
+ InferenceModel = Type["InferenceModel"]
14
+ LayerContainer = Type["LayerContainer"]
15
+
16
+ MAPPING_KEY = "PARAM_MAPPING"
17
+
18
+
19
+ def make_param_getter(clsname, param):
20
+ """
21
+ Normal getter implementation for a property.
22
+ """
23
+
24
+ def param_getter(self):
25
+ return getattr(self, f"__{clsname}__{param}")
26
+
27
+ return param_getter
28
+
29
+
30
+ def make_param_setter(clsname, param):
31
+ """
32
+ Setter implementation that will call complete component to potentially
33
+ finalize the parameter.
34
+ """
35
+
36
+ def param_setter(self, value):
37
+ setattr(self, f"__{clsname}__{param}", value)
38
+ self.complete_component()
39
+
40
+ return param_setter
41
+
42
+
43
+ def make_readonly_setter():
44
+ """
45
+ Setter implementation that will raise an error if called.
46
+ """
47
+
48
+ def paramlist_setter(self, value):
49
+ raise ValueError("Cannot set a ParametrizedList directly.")
50
+
51
+ return paramlist_setter
52
+
53
+
54
+ class ParameterMetaclass(type):
55
+ """
56
+ MetaClass for the ParameterBase base class. This class will parse the `src_params`
57
+ attribute and create properties for each of the dependencies. A dependency can either
58
+ be represented as a string, which is interpreted as a named Tensor, or a `ParametrizedList`
59
+ subclass.
60
+ """
61
+
62
+ def __new__(cls, clsname, bases, attrs):
63
+
64
+ annotations = attrs.get("__annotations__", {})
65
+ dependencies = {
66
+ name: annotation
67
+ for name, annotation in annotations.items() if issubclass(annotation, (torch.Tensor, ParametrizedList))
68
+ }
69
+ n_dependencies = len(dependencies)
70
+
71
+ # Create properties for each of our dependencies
72
+ for d_name, d_type in dependencies.items():
73
+ if issubclass(d_type, ParametrizedList):
74
+ assert hasattr(
75
+ d_type, "count_attr"
76
+ ), "ParametrizedList must have a count_attr attribute to access on the inference module."
77
+ attrs[d_name] = property(make_param_getter(clsname, d_name), make_readonly_setter())
78
+ else: # torch.Tensor
79
+ attrs[d_name] = property(make_param_getter(clsname, d_name), make_param_setter(clsname, d_name))
80
+
81
+ new_cls = super().__new__(cls, clsname, bases, attrs)
82
+ new_cls.n_dependencies = n_dependencies
83
+
84
+ return new_cls
85
+
86
+ def __call__(cls, *args, **kwargs):
87
+ new_obj = super().__call__(*args, **kwargs)
88
+ new_obj.__init__(*args, **kwargs)
89
+
90
+ setattr(new_obj, "dest_param", None)
91
+
92
+ # Initialize our dependences to None/empty `ParametrizedList`s
93
+ for name, annotation in new_obj.__annotations__.items():
94
+ if issubclass(annotation, ParametrizedList):
95
+ #TODO(jeff): update assert with this, model implementation attribute does not align or missing wrt the ParametrizedList attributes
96
+ assert hasattr(
97
+ new_obj.inference_model, annotation.count_attr
98
+ ), f"new_obj={new_obj.__class__.__name__}, name={name}, annotation.count_attr={annotation.count_attr}"
99
+ param_list = annotation(new_obj, getattr(new_obj.inference_model, annotation.count_attr))
100
+ setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", param_list)
101
+ else: # torch.Tensor
102
+ setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", None)
103
+
104
+ return new_obj
105
+
106
+
107
+ class ParameterBase(metaclass=ParameterMetaclass):
108
+ """
109
+ A ParameterBase allows us to consolidate tracking the dependencies of loading a parameter from
110
+ a checkpoint into a single object. This class should not be used directly, but rather subclassed
111
+ and the `src_params` attribute set to a list of strings and/or `ParametrizedList`s.
112
+ """
113
+
114
+ # inference_model: InferenceModel
115
+ """
116
+ Inference model that will provide context on how to shard and transform the parameter.
117
+ """
118
+
119
+ #completed_components: int
120
+ """
121
+ How many of the layer dependencies have been met. This is used to determine when the parameter
122
+ is ready to be finalized. A ParametrizedList counts as a single dependency for the purposes
123
+ of this counter.
124
+ """
125
+
126
+ def __init__(self, model: InferenceModel, parent_container: LayerContainer) -> None:
127
+ """
128
+ Direct constructor. This should not be called from client code.
129
+
130
+ Args:
131
+ model (InferenceModel): Inference model that will be used to shard and transform the
132
+ parameter in `finalize`.
133
+ parent_container (LayerContainer): The parent container that this parameter is a member
134
+ of. We will build a weakref to this container to call the finalization callback.
135
+ """
136
+ self.inference_model = model
137
+ self.completed_components = 0
138
+ self.parent_container = weakref.ref(parent_container)
139
+
140
+ @abstractmethod
141
+ def finalize(self) -> torch.Tensor:
142
+ """
143
+ Finalize the parameter after all of its source parameters have been set. This method
144
+ will be automatically called when all inputs have been set. It should return the Tensor
145
+ with all transformations performed on it.
146
+ """
147
+ pass
148
+
149
+ def complete_component(self) -> None:
150
+ """
151
+ Mark a component as completed. This should be called by the relevant setter of a direct
152
+ property or a ParametrizedList. This method will automatically call `finalize` when all
153
+ dependencies have been met and then call the finalization callback on the parent container.
154
+
155
+ Once the finalization callback has been called, the parameter will be replaced with the
156
+ `dst_param` attribute on the parent container, and this instance will be destroyed.
157
+ """
158
+ self.completed_components += 1
159
+
160
+ if self.completed_components != self.n_dependencies:
161
+ return
162
+
163
+ finalized_param = self.finalize()
164
+ self.parent_container().finalization_callback(self, finalized_param)
165
+
166
+
167
+ class ParametrizedList:
168
+ """
169
+ A ParametrizedList is a list of parameters that are dependencies
170
+ of a `ParameterBase` but may vary in length depending on the model
171
+ configuration (rather than architecture). For example, a MoE layer
172
+ may have different number of experts depending on the size of the model.
173
+
174
+ This class is used to manage these lists and provide integer indexing
175
+ of a single component rather than accessing names directly. For example,
176
+ it tends to be more natural to access the 8th expert with `experts[8]`
177
+ rather than a name like `expert_8`, especially as an attribute.
178
+
179
+ To inherit from this class, set static variables `name` and `count_attr`.
180
+
181
+ ```python
182
+ class MyParametrizedList(ParametrizedList):
183
+ count_attr: str = "my_list_count"
184
+ ```
185
+
186
+ In the above example, `my_list_count` should be an accessible attribute
187
+ of the inference model (i.e. via `self.inference_model.my_list_count`).
188
+
189
+ NOTE: There are some APIs in which this type cannot be used as if it is
190
+ just a list of Tensors. For example, `torch.cat(param_list)` will not work.
191
+ However, you can make it compatible with a tuple wrapper:
192
+ `torch.cat(tuple(param_list))`
193
+ """
194
+
195
+ n_params: int
196
+ """
197
+ Number of params this list contains.
198
+ """
199
+
200
+ param: ParameterBase
201
+ """
202
+ WeakRef to the owning parameter.
203
+ """
204
+
205
+ def __init__(self, param: ParameterBase, n_params: int) -> None:
206
+ """
207
+ Constructor. Should not be called from client code.
208
+
209
+ Args:
210
+ param (ParameterBase): The owning parameter.
211
+ n_params (int): The number of parameters this list contains. This should be
212
+ """
213
+ self.n_params = n_params
214
+ self.set_params = 0
215
+ self.param = weakref.ref(param)
216
+ self._params = [None] * n_params
217
+
218
+ def __getitem__(self, index):
219
+ return self._params[index]
220
+
221
+ def __setitem__(self, index, value):
222
+ if self._params[index] is not None:
223
+ raise ValueError("Cannot set a parameter twice.")
224
+
225
+ self._params[index] = value
226
+ self.set_params += 1
227
+
228
+ if self.set_params != self.n_params:
229
+ return
230
+
231
+ self.param().complete_component()
232
+
233
+ def __iter__(self):
234
+ return iter(self._params)
235
+
236
+
237
+ def ParamList(attr: str):
238
+ """
239
+ Helper to create a subclass of ParametrizedList with the desired `count_attr`.
240
+
241
+ In this manner, we can annotate the type of a Parameter dependency with the
242
+ following:
243
+
244
+ ```python
245
+ class CustomParameter(ParameterBase):
246
+ dependency_list: ParamList("dependencies_count_name")
247
+ ```
248
+
249
+ where "dependencies_count_name" is the name of the attribute on the inference model.
250
+ """
251
+
252
+ class ParametrizedListInstance(ParametrizedList):
253
+ count_attr: str = attr
254
+
255
+ return ParametrizedListInstance
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from enum import Enum
7
+
8
+
9
+ class SchedulingResult(Enum):
10
+
11
+ Success = 0
12
+ """
13
+ The proposed batch is valid and can be scheduled.
14
+ """
15
+
16
+ EngineSequenceLimitExceeded = 1
17
+ """
18
+ The proposed batch would would overflow the number of concurrent sequences the engine may support.
19
+ """
20
+
21
+ BatchSequenceLimitExceeded = 2
22
+ """
23
+ The proposed batch contains more sequences than the engine was configured
24
+ to support in a single forwardp
25
+ """
26
+
27
+ BatchTokenLimitExceeded = 3
28
+ """
29
+ The proposed batch contains more tokens than the engine was configured
30
+ to support in a single forward.
31
+ """
32
+
33
+ KVCacheLimitExceeded = 4
34
+ """
35
+ The proposed batch would require more KV cache to be allocated than the engine
36
+ currently has available.
37
+ """
38
+
39
+ SequenceTokenLimitExceeded = 5
40
+ """
41
+ The proposed batch contains a sequence that is longer than the engine/model can support.
42
+ """
43
+
44
+
45
+ class SchedulingError(RuntimeError):
46
+
47
+ result: SchedulingResult
48
+ """
49
+ The failed result of the scheduling check. Guaranteed to not be SchedulingResult.Success.
50
+ """
51
+
52
+ def __init__(self, result: SchedulingResult) -> None:
53
+ self.result = result
54
+ super().__init__(f"Batch scheduling failed with result {result}")
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc ADDED
Binary file (903 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc ADDED
Binary file (4.8 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc ADDED
Binary file (3.24 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
6
+
7
+ from .fused_adam import FusedAdamBuilder
8
+ from .async_io import AsyncIOBuilder
9
+ from .no_impl import NotImplementedBuilder
10
+ from .cpu_adam import CPUAdamBuilder
11
+ from .cpu_adagrad import CPUAdagradBuilder
12
+ from .cpu_lion import CPULionBuilder
13
+ from .inference import InferenceBuilder
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc ADDED
Binary file (9.54 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import distutils.spawn
7
+ import subprocess
8
+
9
+ from .builder import NPUOpBuilder
10
+
11
+
12
+ class AsyncIOBuilder(NPUOpBuilder):
13
+ BUILD_VAR = "DS_BUILD_AIO"
14
+ NAME = "async_io"
15
+
16
+ def __init__(self):
17
+ super().__init__(name=self.NAME)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.ops.aio.{self.NAME}_op'
21
+
22
+ def sources(self):
23
+ return [
24
+ 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp',
25
+ 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp',
26
+ 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp',
27
+ 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp',
28
+ 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp'
29
+ ]
30
+
31
+ def include_paths(self):
32
+ args = super().include_paths()
33
+ args += ['csrc/aio/py_lib', 'csrc/aio/common']
34
+ return args
35
+
36
+ def cxx_args(self):
37
+ args = super().cxx_args()
38
+ # -O0 for improved debugging, since performance is bound by I/O
39
+ CPU_ARCH = self.cpu_arch()
40
+ SIMD_WIDTH = self.simd_width()
41
+ import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed
42
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2])
43
+ if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1:
44
+ CPP_STD = '-std=c++17'
45
+ else:
46
+ CPP_STD = '-std=c++14'
47
+ return args + [
48
+ '-g',
49
+ '-Wall',
50
+ '-O0',
51
+ CPP_STD,
52
+ '-shared',
53
+ '-fPIC',
54
+ '-Wno-reorder',
55
+ CPU_ARCH,
56
+ '-fopenmp',
57
+ SIMD_WIDTH,
58
+ '-laio',
59
+ ]
60
+
61
+ def extra_ldflags(self):
62
+ args = super().extra_ldflags()
63
+ return args + ['-laio']
64
+
65
+ def check_for_libaio_pkg(self):
66
+ libs = dict(
67
+ dpkg=["-l", "libaio-dev", "apt"],
68
+ pacman=["-Q", "libaio", "pacman"],
69
+ rpm=["-q", "libaio-devel", "yum"],
70
+ )
71
+
72
+ found = False
73
+ for pkgmgr, data in libs.items():
74
+ flag, lib, tool = data
75
+ path = distutils.spawn.find_executable(pkgmgr)
76
+ if path is not None:
77
+ cmd = f"{pkgmgr} {flag} {lib}"
78
+ result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
79
+ if result.wait() == 0:
80
+ found = True
81
+ else:
82
+ self.warning(f"{self.NAME}: please install the {lib} package with {tool}")
83
+ break
84
+ return found
85
+
86
+ def is_compatible(self, verbose=True):
87
+ # Check for the existence of libaio by using distutils
88
+ # to compile and link a test program that calls io_submit,
89
+ # which is a function provided by libaio that is used in the async_io op.
90
+ # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS
91
+ # respectively to specify the directories for libaio.h and libaio.so.
92
+ aio_compatible = self.has_function('io_pgetevents', ('aio', ))
93
+ if verbose and not aio_compatible:
94
+ self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.")
95
+
96
+ # Check for the libaio package via known package managers
97
+ # to print suggestions on which package to install.
98
+ self.check_for_libaio_pkg()
99
+
100
+ self.warning(
101
+ "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found."
102
+ )
103
+ return super().is_compatible(verbose) and aio_compatible
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import re
7
+ import os
8
+ try:
9
+ import torch_npu
10
+ except ImportError as e:
11
+ pass
12
+
13
+ try:
14
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
15
+ # if successful this also means we're doing a local install and not JIT compile path
16
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
17
+ from op_builder.builder import OpBuilder
18
+ except ImportError:
19
+ from deepspeed.ops.op_builder.builder import OpBuilder
20
+
21
+
22
+ class NPUOpBuilder(OpBuilder):
23
+ _ascend_path = None
24
+ _torch_npu_path = None
25
+ _cann_version = None
26
+
27
+ def __init__(self, name):
28
+ super().__init__(name)
29
+ self._ascend_path = self.installed_cann_path()
30
+ self._torch_npu_path = os.path.join(os.path.dirname(os.path.abspath(torch_npu.__file__)))
31
+ try:
32
+ self._cann_version = self.installed_cann_version(self.name)
33
+ except BaseException:
34
+ print(f"{self.name} ascend_cann is missing, npu ops cannot be compiled!")
35
+
36
+ def cann_defs(self):
37
+ if self._cann_version:
38
+ return '-D__ENABLE_CANN__'
39
+ return '-D__DISABLE_CANN__'
40
+
41
+ def installed_cann_path(self):
42
+ if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]):
43
+ return os.environ["ASCEND_HOME_PATH"]
44
+ return None
45
+
46
+ def installed_cann_version(self, name=""):
47
+ ascend_path = self.installed_cann_path()
48
+ assert ascend_path is not None, "CANN_HOME does not exist, unable to compile NPU op(s)"
49
+ cann_version = ""
50
+ for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)):
51
+ if cann_version:
52
+ break
53
+ install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)]
54
+ if install_files:
55
+ filepath = os.path.join(dirpath, install_files[0])
56
+ with open(filepath, "r") as f:
57
+ for line in f:
58
+ if line.find("version") != -1:
59
+ cann_version = line.strip().split("=")[-1]
60
+ break
61
+ return cann_version
62
+
63
+ def include_paths(self):
64
+ paths = super().include_paths()
65
+ paths += [os.path.join(self._ascend_path, 'include'), os.path.join(self._torch_npu_path, 'include')]
66
+ return paths
67
+
68
+ def cxx_args(self):
69
+ args = super().cxx_args()
70
+ args += ['-O3', '-std=c++17', '-g', '-Wno-reorder', '-fopenmp']
71
+ args += ['-fstack-protector-all', '-Wl,-z,relro,-z,now,-z,noexecstack', '-Wl,--disable-new-dtags,--rpath']
72
+ args += [
73
+ self.cann_defs(),
74
+ self.cpu_arch(),
75
+ self.simd_width(), '-L' + os.path.join(self._ascend_path, 'lib64'),
76
+ '-L' + os.path.join(self._torch_npu_path, 'lib')
77
+ ]
78
+ return args
79
+
80
+ def extra_ldflags(self):
81
+ flags = super().extra_ldflags()
82
+ flags += [
83
+ '-L' + os.path.join(self._ascend_path, 'lib64'), '-lascendcl',
84
+ '-L' + os.path.join(self._torch_npu_path, 'lib'), '-ltorch_npu'
85
+ ]
86
+ return flags
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import NPUOpBuilder
7
+
8
+
9
+ class CPUAdagradBuilder(NPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_CPU_ADAGRAD"
11
+ NAME = "cpu_adagrad"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.adagrad.{self.NAME}_op'
18
+
19
+ def sources(self):
20
+ return ['csrc/adagrad/cpu_adagrad.cpp']
21
+
22
+ def include_paths(self):
23
+ args = super().include_paths()
24
+ args += ['csrc/includes']
25
+ return args
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import NPUOpBuilder
7
+
8
+
9
+ class CPUAdamBuilder(NPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_CPU_ADAM"
11
+ NAME = "cpu_adam"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.adam.{self.NAME}_op'
18
+
19
+ def sources(self):
20
+ return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp']
21
+
22
+ def include_paths(self):
23
+ args = super().include_paths()
24
+ args += ['csrc/includes']
25
+ return args
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import NPUOpBuilder
7
+
8
+
9
+ class CPULionBuilder(NPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_CPU_LION"
11
+ NAME = "cpu_lion"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.lion.{self.NAME}_op'
18
+
19
+ def sources(self):
20
+ return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp']
21
+
22
+ def include_paths(self):
23
+ args = super().include_paths()
24
+ args += ['csrc/includes']
25
+ return args
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import NPUOpBuilder
7
+
8
+ try:
9
+ import torch_npu
10
+ except ImportError as e:
11
+ pass
12
+
13
+
14
+ class NPUFusedAdam:
15
+
16
+ @staticmethod
17
+ def multi_tensor_adam(chunk_size, noop_flag_buffer, tensor_lists, lr, beta1, beta2, epsilon, step, adam_w_mode,
18
+ bias_correction, weight_decay, *args):
19
+ bias_correction1 = beta1**step
20
+ bias_correction2 = beta2**step
21
+
22
+ # iteration group['params']
23
+ for i in range(len(tensor_lists[0])):
24
+ grad_flat = tensor_lists[0][i]
25
+ param_flat = tensor_lists[1][i]
26
+ m_flat = tensor_lists[2][i]
27
+ v_flat = tensor_lists[3][i]
28
+
29
+ if adam_w_mode:
30
+ param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam_w(
31
+ bias_correction1,
32
+ bias_correction2,
33
+ lr,
34
+ weight_decay,
35
+ beta1,
36
+ beta2,
37
+ epsilon,
38
+ grad_flat,
39
+ None, # max_grad_norm
40
+ False, # amsgrad
41
+ False, # maximize
42
+ out=(param_flat.data, m_flat, v_flat))
43
+ else:
44
+ param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam(
45
+ bias_correction1,
46
+ bias_correction2,
47
+ lr,
48
+ beta1,
49
+ beta2,
50
+ epsilon,
51
+ grad_flat,
52
+ False, # use_locking
53
+ False, # use_nesterov
54
+ out=(param_flat.data, m_flat, v_flat))
55
+
56
+
57
+ class FusedAdamBuilder(NPUOpBuilder):
58
+ BUILD_VAR = "DS_BUILD_FUSED_ADAM"
59
+ NAME = "fused_adam"
60
+
61
+ def __init__(self):
62
+ super().__init__(name=self.NAME)
63
+
64
+ def absolute_name(self):
65
+ return f'deepspeed.ops.adam.{self.NAME}_op'
66
+
67
+ def sources(self):
68
+ return []
69
+
70
+ def include_paths(self):
71
+ return []
72
+
73
+ def load(self, verbose=True):
74
+ return NPUFusedAdam
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from enum import IntEnum
7
+ from .builder import NPUOpBuilder
8
+
9
+ try:
10
+ import torch
11
+ import torch_npu
12
+ except ImportError as e:
13
+ pass
14
+
15
+
16
+ class ActivationFuncType(IntEnum):
17
+ UNKNOWN = 0
18
+ GELU = 1
19
+ ReLU = 2
20
+ GATED_GELU = 3
21
+ GATED_SILU = 4
22
+
23
+
24
+ class InferenceContext:
25
+ _workspace = None
26
+
27
+ _seed = 42
28
+ _curr_offset = 0
29
+ _stream = 0
30
+ _free_memory_size = 0
31
+ _num_tokens = 1
32
+ _attention_unfused_workspace_offset = 0
33
+ _workSpaceSize = 0
34
+
35
+ workSpaceSize = 0
36
+ kv_caches = None
37
+
38
+ @staticmethod
39
+ def reset_tokens(initial_tokens=1):
40
+ InferenceContext._num_tokens = initial_tokens
41
+
42
+ @staticmethod
43
+ def current_tokens():
44
+ return InferenceContext._num_tokens
45
+
46
+ @staticmethod
47
+ def GetWorkSpace():
48
+ return InferenceContext._workspace
49
+
50
+
51
+ class NPUInference:
52
+
53
+ @staticmethod
54
+ def layer_norm(inputs, gamma, beta, epsilon):
55
+ return torch.nn.functional.layer_norm(inputs, [inputs.shape[-1]], gamma, beta, eps=epsilon)
56
+
57
+ @staticmethod
58
+ def _qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
59
+ inp_norm = torch.nn.functional.layer_norm(inputs, (inputs.shape[2], ), gamma, beta, eps)
60
+ weight = weight.t() if transpose else weight
61
+ tmp = torch.matmul(inp_norm, weight)
62
+ if add_bias:
63
+ tmp += bias
64
+ output = [tmp, inp_norm]
65
+ return output
66
+
67
+ @staticmethod
68
+ def qkv_gemm_fp16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
69
+ return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose)
70
+
71
+ @staticmethod
72
+ def qkv_gemm_bf16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
73
+ return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose)
74
+
75
+ @staticmethod
76
+ def qkv_gemm_fp32(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
77
+ return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose)
78
+
79
+ @staticmethod
80
+ def _bias_add_transform_0213(vals, bias, hidden_dim, seq_length, seq_offset, heads, num_kv, rotary_dim,
81
+ rotate_half, rotate_every_two, rope_theta):
82
+ bsz, _, _ = vals.shape
83
+ q = vals[..., :hidden_dim].reshape(bsz, seq_length, heads, -1)
84
+ k = vals[..., hidden_dim:hidden_dim + num_kv * (hidden_dim // heads)].reshape(bsz, seq_length, num_kv, -1)
85
+ v = vals[..., hidden_dim + num_kv * (hidden_dim // heads):]
86
+
87
+ if rotary_dim > 0 and rotate_every_two:
88
+ # sin, cos may use cache
89
+ seq_id = torch.arange(0, seq_length).to("npu")
90
+ inv_freq = torch.arange(0, rotary_dim, 2) / rotary_dim
91
+ inv_freq = inv_freq.to("npu")
92
+ inv_freq = 1.0 / torch.pow(rope_theta, inv_freq)
93
+ inv_freq = torch.outer(seq_id, inv_freq)
94
+ sin = inv_freq.sin()
95
+ cos = inv_freq.cos()
96
+ # shape: [bsz=1, seq_len, heads=1, rotary_dim]
97
+ sin = sin.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1)
98
+ cos = cos.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1)
99
+
100
+ q_pos, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
101
+ k_pos, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
102
+
103
+ q_pos = torch_npu.npu_rotary_mul(q_pos, cos, sin)
104
+ q = torch.cat([q_pos, q_pass], dim=-1)
105
+ k_pos = torch_npu.npu_rotary_mul(k_pos, cos, sin)
106
+ k = torch.cat([k_pos, k_pass], dim=-1)
107
+
108
+ output = q.reshape(bsz, seq_length, -1).contiguous() # [b, s, H]
109
+ k_cache = k.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d]
110
+ v_cache = v.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d]
111
+ return output, k_cache, v_cache
112
+
113
+ @staticmethod
114
+ def _softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv,
115
+ norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
116
+ num_layers, alibi, rope_theta):
117
+ bsz, seq_len, k = query_key_value.size()
118
+ k = k // (heads + 2 * (num_kv if num_kv > 0 else heads))
119
+ hidden_dim = heads * k
120
+
121
+ is_promt = seq_len > 1
122
+ if not InferenceContext.kv_caches:
123
+ InferenceContext.kv_caches = [[None, None] for _ in range(num_layers)]
124
+ if is_promt:
125
+ InferenceContext.reset_tokens(seq_len)
126
+ InferenceContext.kv_caches[layer_id] = [None, None]
127
+
128
+ soft_len = InferenceContext.current_tokens()
129
+ workspace = InferenceContext.GetWorkSpace()
130
+ seq_offset = 0 if is_promt else soft_len - 1
131
+
132
+ q, k, v = NPUInference._bias_add_transform_0213(vals=query_key_value,
133
+ bias=None,
134
+ hidden_dim=hidden_dim,
135
+ seq_length=seq_len,
136
+ seq_offset=seq_offset,
137
+ heads=heads,
138
+ num_kv=num_kv if num_kv > 0 else heads,
139
+ rotary_dim=rotary_dim,
140
+ rotate_half=rotate_half,
141
+ rotate_every_two=rotate_every_two,
142
+ rope_theta=rope_theta)
143
+
144
+ if not is_promt:
145
+ k_cache, v_cache = InferenceContext.kv_caches[layer_id]
146
+ if k_cache is not None:
147
+ k = torch.cat([k_cache, k], dim=2)
148
+ v = torch.cat([v_cache, v], dim=2)
149
+ InferenceContext.kv_caches[layer_id] = [k, v]
150
+ seq_len = k.shape[2]
151
+
152
+ layer_scale = max(1, layer_id) if len(alibi.size()) > 1 else 1.0
153
+ alpha = norm_factor * norm_factor / layer_scale
154
+
155
+ output = torch_npu.npu_fusion_attention(q,
156
+ k.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(),
157
+ v.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(),
158
+ heads,
159
+ "BSH",
160
+ pse=None,
161
+ padding_mask=None,
162
+ atten_mask=attn_mask.bool(),
163
+ scale=alpha,
164
+ pre_tockens=65536,
165
+ next_tockens=65536,
166
+ keep_prob=1,
167
+ inner_precise=0)[0]
168
+
169
+ return output, k, v
170
+
171
+ @staticmethod
172
+ def softmax_context_fp16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv,
173
+ norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
174
+ num_layers, alibi, rope_theta):
175
+ return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two,
176
+ heads, num_kv, norm_factor, triangular_masking, local_attention,
177
+ window_size, no_masking, layer_id, num_layers, alibi, rope_theta)
178
+
179
+ @staticmethod
180
+ def softmax_context_bf16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv,
181
+ norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
182
+ num_layers, alibi, rope_theta):
183
+ return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two,
184
+ heads, num_kv, norm_factor, triangular_masking, local_attention,
185
+ window_size, no_masking, layer_id, num_layers, alibi, rope_theta)
186
+
187
+ @staticmethod
188
+ def softmax_context_fp32(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv,
189
+ norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
190
+ num_layers, alibi, rope_theta):
191
+ return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two,
192
+ heads, num_kv, norm_factor, triangular_masking, local_attention,
193
+ window_size, no_masking, layer_id, num_layers, alibi, rope_theta)
194
+
195
+ @staticmethod
196
+ def _vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode):
197
+ if transposed_mode:
198
+ return torch.matmul(input, weight.t())
199
+ return torch.matmul(input, weight)
200
+
201
+ @staticmethod
202
+ def vector_matmul_fp16(input, weight, async_op, q_scale, q_int8, transposed_mode):
203
+ return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode)
204
+
205
+ @staticmethod
206
+ def vector_matmul_bf16(input, weight, async_op, q_scale, q_int8, transposed_mode):
207
+ return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode)
208
+
209
+ @staticmethod
210
+ def vector_matmul_fp32(input, weight, async_op, q_scale, q_int8, transposed_mode):
211
+ return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode)
212
+
213
+ @staticmethod
214
+ def _mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm,
215
+ mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose):
216
+ if mlp_after_attn:
217
+ residual_add = torch.nn.functional.layer_norm(input + residual + input_bias, (input.shape[-1], ), gamma,
218
+ beta, eps)
219
+ else:
220
+ residual_add = torch.nn.functional.layer_norm(input, (input.shape[-1], ), gamma, beta, eps)
221
+
222
+ weight_interm = weight_interm.t() if transpose else weight_interm
223
+ tmp = torch.matmul(residual_add, weight_interm)
224
+ if mlp_act_func_type == ActivationFuncType.GELU:
225
+ tmp = torch.nn.functional.gelu(tmp + bias)
226
+ elif mlp_act_func_type == ActivationFuncType.ReLU:
227
+ tmp = torch.nn.functional.relu(tmp + bias)
228
+ else:
229
+ raise Exception('Unsupported ActivationFuncType {}'.format(mlp_act_func_type))
230
+ output = torch.matmul(tmp, weight_out.t())
231
+ return output, residual_add
232
+
233
+ @staticmethod
234
+ def mlp_gemm_fp16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm,
235
+ mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose):
236
+ return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
237
+ pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype,
238
+ mlp_act_func_type, transpose)
239
+
240
+ @staticmethod
241
+ def mlp_gemm_bf16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm,
242
+ mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose):
243
+ return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
244
+ pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype,
245
+ mlp_act_func_type, transpose)
246
+
247
+ @staticmethod
248
+ def mlp_gemm_fp32(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm,
249
+ mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose):
250
+ return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
251
+ pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype,
252
+ mlp_act_func_type, transpose)
253
+
254
+ @staticmethod
255
+ def _residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size,
256
+ mlp_after_attn, add_bias, pre_layer_norm):
257
+ if mlp_after_attn:
258
+ if pre_layer_norm:
259
+ tmp = (residual.float() + attention_output.float() + attention_bias.float() +
260
+ final_bias.float()) / mp_size + hidden_state.float()
261
+ else:
262
+ tmp = residual.float() + hidden_state.float() + final_bias.float()
263
+ else:
264
+ if add_bias:
265
+ residual += attention_bias.float()
266
+ tmp = hidden_state.float() + attention_output.float() + (residual.float() + final_bias.float()) / mp_size
267
+
268
+ input_dtype = hidden_state.dtype
269
+ residual.set_(tmp.to(input_dtype))
270
+
271
+ @staticmethod
272
+ def residual_add_bias_fp16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size,
273
+ mlp_after_attn, add_bias, pre_layer_norm):
274
+ return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias,
275
+ mp_size, mlp_after_attn, add_bias, pre_layer_norm)
276
+
277
+ @staticmethod
278
+ def residual_add_bias_bf16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size,
279
+ mlp_after_attn, add_bias, pre_layer_norm):
280
+ return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias,
281
+ mp_size, mlp_after_attn, add_bias, pre_layer_norm)
282
+
283
+ @staticmethod
284
+ def residual_add_bias_fp32(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size,
285
+ mlp_after_attn, add_bias, pre_layer_norm):
286
+ return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias,
287
+ mp_size, mlp_after_attn, add_bias, pre_layer_norm)
288
+
289
+
290
+ class InferenceBuilder(NPUOpBuilder):
291
+ BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
292
+ NAME = "transformer_inference"
293
+
294
+ def __init__(self):
295
+ super().__init__(name=self.NAME)
296
+
297
+ def absolute_name(self):
298
+ return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
299
+
300
+ def sources(self):
301
+ return []
302
+
303
+ def include_paths(self):
304
+ return []
305
+
306
+ def load(self, verbose=True):
307
+ return NPUInference
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import NPUOpBuilder
7
+
8
+
9
+ class NotImplementedBuilder(NPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED"
11
+ NAME = "deepspeed_not_implemented"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.comm.{self.NAME}_op'
19
+
20
+ def load(self, verbose=True):
21
+ raise ValueError("This op had not been implemented on NPU backend.")
22
+
23
+ def sources(self):
24
+ return []
25
+
26
+ def cxx_args(self):
27
+ return []
28
+
29
+ def extra_ldflags(self):
30
+ return []
31
+
32
+ def include_paths(self):
33
+ return []
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cpu_adam import CPUAdamBuilder
7
+ from .cpu_adagrad import CPUAdagradBuilder
8
+ from .fused_adam import FusedAdamBuilder
9
+ from .async_io import AsyncIOBuilder
parrot/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (378 Bytes). View file