ZTWHHH commited on
Commit
3be138c
·
verified ·
1 Parent(s): 7126629

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/libtcl8.6.so +3 -0
  3. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py +41 -0
  8. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py +124 -0
  9. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py +40 -0
  10. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py +213 -0
  20. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py +33 -0
  21. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py +6 -0
  23. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py +261 -0
  27. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py +31 -0
  28. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py +6 -0
  29. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py +94 -0
  34. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py +197 -0
  35. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py +30 -0
  36. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py +6 -0
  37. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py +77 -0
  42. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py +223 -0
  43. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py +30 -0
  44. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py +6 -0
  45. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/container.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/model.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/policy.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/container.py +82 -0
  50. parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/model.py +221 -0
.gitattributes CHANGED
@@ -182,3 +182,4 @@ parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 fil
182
  parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
183
  parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
184
  parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text
 
 
182
  parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
183
  parrot/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
184
  parrot/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text
185
+ parrot/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/libtcl8.6.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a7d41e52b5f49fb3c9cad8d669ce2c0c3d8372a746d7a44dd1427e9c1591c7
3
+ size 1983416
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (371 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+ from typing import Iterable, Tuple
8
+
9
+ import torch
10
+
11
+ #from .huggingface_engine import HuggingFaceCheckpointEngine
12
+
13
+ MEGATRON = 'megatron'
14
+ HUGGINGFACE = 'huggingface'
15
+
16
+
17
+ class CheckpointEngineBase(ABC):
18
+ """
19
+ Abstract interface for checkpoint engines to implement.
20
+
21
+ There is no ``__init__`` method here by design, since the creation of the checkpoint
22
+ engine will happen outside the policy/engine code. The tradeoff being made here is
23
+ that we will write different frontends for different checkpoint engines, but these
24
+ frontends can be tailored to the specific checkpoint engine/model source needs.
25
+ """
26
+
27
+ @abstractmethod
28
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
29
+ """
30
+ This method should create a generator of tuples of the form (name, parameter) for
31
+ all parameters in the model. The name should be the fully qualified name of the
32
+ parameter, and the parameter should be a torch.Tensor.
33
+
34
+ The expected use of a checkpoint engine is the following:
35
+ ```python
36
+ for name, parameter in checkpoint_engine.parameters():
37
+ container_map.map_param(name, parameter)
38
+ ```
39
+ For a concrete use example, see ``InferenceV2Policy``.
40
+ """
41
+ ...
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import json
8
+ import torch
9
+ from .base_engine import CheckpointEngineBase
10
+ from typing import Iterable, Tuple
11
+ from functools import partial
12
+
13
+ from ..logging import inference_logger
14
+
15
+
16
+ class HuggingFaceCheckpointEngine(CheckpointEngineBase):
17
+
18
+ def __init__(self, model_name_or_path: str, auth_token: str = None) -> None:
19
+ super().__init__()
20
+ from transformers import AutoConfig, GenerationConfig
21
+
22
+ self.model_name_or_path = model_name_or_path
23
+ self.auth_token = auth_token
24
+ self.model_config = AutoConfig.from_pretrained(self.model_name_or_path)
25
+ # Define this property here so we can use it in the model implementation
26
+ if not hasattr(self.model_config, "max_seq_length"):
27
+ if hasattr(self.model_config, "max_position_embeddings"):
28
+ self.model_config.max_seq_length = self.model_config.max_position_embeddings
29
+ else:
30
+ generation_config = GenerationConfig.from_pretrained(self.model_name_or_path)
31
+ self.model_config.max_seq_length = generation_config.max_length
32
+ self._local_checkpoint_dir = None
33
+ self._all_ckpt_paths = self._fetch_checkpoint_files()
34
+
35
+ def _fetch_checkpoint_files(self):
36
+ """
37
+ Fetch the checkpoint files from the HuggingFace Hub.
38
+ """
39
+ # TODO(jeff): for models like llama-2 the user will have to provide an auth `token`,
40
+ # currently coming from the ckpt engine init but maybe a catch all kwargs for other
41
+ # snapshot download parameters would be more flexible.
42
+
43
+ from huggingface_hub import snapshot_download, list_repo_tree
44
+
45
+ def model_has_safetensors(model_name_or_path: str) -> bool:
46
+ if os.path.isdir(model_name_or_path):
47
+ file_list = os.listdir(model_name_or_path)
48
+ else:
49
+ file_list = [rf.path for rf in list_repo_tree(model_name_or_path)]
50
+ for f in file_list:
51
+ if f.endswith(".safetensors"):
52
+ return True
53
+ return False
54
+
55
+ if os.path.isdir(self.model_name_or_path):
56
+ self._local_checkpoint_dir = self.model_name_or_path
57
+ else:
58
+ # We need to download the checkpoint files from HF
59
+ if model_has_safetensors(self.model_name_or_path):
60
+ # Prioritize downloading safetensors if they are available
61
+ allow_patterns = ["*.safetensors", "*.json"]
62
+ else:
63
+ # Fallback to bin files when safetensors are not present
64
+ allow_patterns = ["*.bin", "*.json", "*.pt"]
65
+ self._local_checkpoint_dir = snapshot_download(self.model_name_or_path,
66
+ allow_patterns=allow_patterns,
67
+ revision=None,
68
+ token=self.auth_token)
69
+
70
+ assert os.path.isdir(
71
+ self._local_checkpoint_dir
72
+ ), f"Checkpoint dir {self._local_checkpoint_dir} is not a directory, cannot load checkpoint."
73
+
74
+ # Set the appropriate file names based on whether we have safetensors or not
75
+ if model_has_safetensors(self._local_checkpoint_dir):
76
+ from safetensors.torch import load_file
77
+ model_param_json_fname = "model.safetensors.index.json"
78
+ model_file_fname = "model.safetensors"
79
+ self._checkpoint_load_fn = load_file
80
+ else:
81
+ model_param_json_fname = "pytorch_model.bin.index.json"
82
+ model_file_fname = "pytorch_model.bin"
83
+ self._checkpoint_load_fn = partial(torch.load, map_location="cpu")
84
+
85
+ model_param_json = os.path.join(self._local_checkpoint_dir, model_param_json_fname)
86
+
87
+ if not os.path.isfile(model_param_json):
88
+ # We don't need any json as all such HF models will have pytorch_model.bin
89
+ all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, model_file_fname)]
90
+ else:
91
+ param_map = json.load(open(model_param_json, "r"))
92
+
93
+ # weight_map -> { "lm_head.weight": "pytorch_model-00002-of-00002.bin", ... }
94
+ weight_map = param_map["weight_map"]
95
+
96
+ # unique set of all checkpoint files
97
+ all_checkpoint_files = set(weight_map.values())
98
+
99
+ # get absolute path of all unique checkpoint files
100
+ all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, f) for f in all_checkpoint_files]
101
+
102
+ return all_checkpoint_files
103
+
104
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
105
+ """
106
+ Generator of model parameters (satisfies the CheckpointEngineBase interface).
107
+ """
108
+ for checkpoint in self._all_ckpt_paths:
109
+ inference_logger().info(f"Loading checkpoint: {checkpoint}")
110
+ checkpoint_sd = self._checkpoint_load_fn(checkpoint)
111
+ param_keys = list(checkpoint_sd.keys())
112
+ for param_name in param_keys:
113
+ param = checkpoint_sd[param_name]
114
+ yield param_name, param
115
+
116
+ del checkpoint_sd
117
+
118
+
119
+ if __name__ == "__main__":
120
+ # To test, add your auth_token here and run `python huggingface_engine.py`
121
+ engine = HuggingFaceCheckpointEngine(model_name_or_path="meta-llama/Llama-2-7b-hf",
122
+ auth_token="hf_xxxxxxxxxxxxxxxxx")
123
+ for name, param in engine.parameters():
124
+ print(name, param.shape)
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Tuple
7
+ import torch
8
+
9
+ from .base_engine import CheckpointEngineBase
10
+
11
+
12
+ class InMemoryModelEngine(CheckpointEngineBase):
13
+ """
14
+ This "checkpoint" engine uses the existing interface to enable loading parameters into an
15
+ inference model from a model already instantiated in memory. In general, this is not the
16
+ recommended way to use the inference engine, and should only be used when absolutely necessary.
17
+
18
+ The primary limitation of this approach is that the model must be fully instantiated in memory.
19
+ In a tensor parallel scenario, this means that the model is either replicated many times in host
20
+ memory. Currently, it is also recommended to only use this approach for models held in host memory.
21
+
22
+ In order to free the memory held by this copy of the model, we delete the model in the first call
23
+ to `parameters`, so it is not safe to make this call twice.
24
+ """
25
+
26
+ def __init__(self, model: torch.nn.Module) -> None:
27
+ """
28
+ Create virtual checkpoint engine for the provided module.
29
+
30
+ Args:
31
+ model (torch.nn.Module): Model to load parameters from.
32
+ """
33
+ super().__init__()
34
+ self.model = model
35
+
36
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
37
+ for name, parameter in self.model.named_parameters():
38
+ yield name, parameter
39
+
40
+ del self.model
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (622 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc ADDED
Binary file (9.02 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from .. import *
15
+ from ...modules.configs import *
16
+ from ...modules.interfaces import *
17
+ from ...ragged import RaggedBatchWrapper
18
+
19
+ from .container import FalconNonTransformerContainer, FalconTransformerContainer
20
+
21
+
22
+ class FalconInferenceModel(DSTransformerModelBase):
23
+ """
24
+ Inference model implementation for ragged batching for Llama-2 models.
25
+ """
26
+
27
+ _non_transformer: Optional[FalconNonTransformerContainer]
28
+ """
29
+ Embed + unembed container. Specializing the type annotation.
30
+ """
31
+
32
+ _transformer: Optional[Iterable[FalconTransformerContainer]]
33
+ """
34
+ Per-layer transformer container. Specializing the type annotation.
35
+ """
36
+ """
37
+ Properties inherited from `DSInferenceModelBase`
38
+ """
39
+
40
+ @property
41
+ def max_sequence_length(self) -> int:
42
+ return self._config.max_seq_length
43
+
44
+ """
45
+ Properties inherited from `DSTransformerModelBase`
46
+ """
47
+
48
+ @property
49
+ def num_layers(self) -> int:
50
+ return self._config.num_hidden_layers
51
+
52
+ @property
53
+ def model_dim(self) -> int:
54
+ return self._config.hidden_size
55
+
56
+ @property
57
+ def vocab_size(self) -> int:
58
+ return self._config.vocab_size
59
+
60
+ @property
61
+ def head_size(self) -> int:
62
+ return self.model_dim // self.n_heads
63
+
64
+ @property
65
+ def n_heads(self) -> int:
66
+ return self._config.num_attention_heads
67
+
68
+ @property
69
+ def intermediate_dim(self) -> int:
70
+ return 4 * self._config.hidden_size
71
+
72
+ @property
73
+ def n_heads_kv(self) -> int:
74
+ return self._config.num_kv_heads if (self._config.new_decoder_architecture
75
+ or not self._config.multi_query) else 1
76
+
77
+ @property
78
+ def activation_dtype(self) -> DtypeEnum:
79
+ if self._config.torch_dtype == torch.float16:
80
+ return DtypeEnum.fp16
81
+ elif self._config.torch_dtype == torch.bfloat16:
82
+ return DtypeEnum.bf16
83
+ else:
84
+ raise NotImplementedError("Only fp16 and bf16 are supported")
85
+
86
+ @property
87
+ def mlp_activation_fn(self) -> ActivationType:
88
+ return ActivationType.GELU
89
+
90
+ @property
91
+ def norm_type(self) -> NormTypeEnum:
92
+ return NormTypeEnum.LayerNorm
93
+
94
+ @property
95
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
96
+ return PositionalEmbeddingType.rotate_half
97
+
98
+ @property
99
+ def positional_embedding_config(self) -> RotateHalfConfig:
100
+ """
101
+ The positional embedding configuration for the model.
102
+ """
103
+ return RotateHalfConfig()
104
+
105
+ """
106
+ Forward implementations
107
+ """
108
+
109
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
110
+ """
111
+ Performs the embedding lookup prior to running the transformer of the model.
112
+
113
+ Arguments:
114
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
115
+
116
+ Returns:
117
+ torch.Tensor: The embedded batch.
118
+ """
119
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
120
+
121
+ if embed.shape[-1] != self.model_dim:
122
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
123
+
124
+ return embed
125
+
126
+ def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
127
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
128
+ """
129
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
130
+ optimization to fuse the layer norm of the next layer into the current layer.
131
+
132
+ Arguments:
133
+ layer_idx (int): The index of the layer to execute.
134
+ residual (torch.Tensor): The residual tensor from the previous layer.
135
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
136
+ hidden states after pre normalization.
137
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
138
+ """
139
+ assert self.config.parallel_attn, "Only parallel attention implementation is supported"
140
+
141
+ cur_params = self._transformer[layer_idx]
142
+ kv_cache = self.state_manager.get_cache(layer_idx)
143
+
144
+ attn_ln_out = hidden_states
145
+ attn_hidden_state = self.qkv(attn_ln_out, cur_params.qkv_w, b=None)
146
+ attn_hidden_state = self.attn(attn_hidden_state, kv_cache, ragged_batch_info)
147
+ attention_output = self.attn_out(attn_hidden_state, cur_params.attn_out_w, b=None)
148
+
149
+ if self.config.new_decoder_architecture:
150
+ residual, mlp_ln_out = self.norm(residual,
151
+ None,
152
+ gamma=cur_params.ln_mlp_gamma,
153
+ beta=cur_params.ln_mlp_beta)
154
+ else:
155
+ mlp_ln_out = hidden_states
156
+
157
+ mlp_hidden_state = self.mlp_1(mlp_ln_out, cur_params.mlp_1_w, b=None)
158
+ mlp_output = self.mlp_2(mlp_hidden_state, cur_params.mlp_2_w, b=None)
159
+
160
+ mlp_output.add_(attention_output)
161
+
162
+ if self.tp_size > 1:
163
+ dist.all_reduce(mlp_output, group=self._base_mp_group)
164
+
165
+ if layer_idx != self.num_layers - 1:
166
+ next_params = self._transformer[layer_idx + 1]
167
+ residual, mlp_output = self.norm(residual,
168
+ mlp_output,
169
+ next_params.ln_attn_gamma,
170
+ beta=next_params.ln_attn_beta)
171
+ else:
172
+ # On last layer, we just need to perform the residual add. Adding into the residual
173
+ # here is safe.
174
+ residual.add_(mlp_output)
175
+
176
+ return residual, mlp_output
177
+
178
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
179
+ """
180
+ Performs unembedding of the hidden states to logits. This will only sample the final
181
+ token of each sequence.
182
+ """
183
+ logits = self.unembed(hidden_states,
184
+ self._non_transformer.word_unembed,
185
+ ragged_batch_info,
186
+ gamma=self._non_transformer.final_norm_gamma,
187
+ beta=self._non_transformer.final_norm_beta)
188
+
189
+ if self.tp_size > 1:
190
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
191
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
192
+
193
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
194
+
195
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
196
+
197
+ return full_logits
198
+ else:
199
+ return logits
200
+
201
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
202
+ residual = self._forward_embed(wrapped_batch)
203
+
204
+ residual, hidden_states = self.norm(residual,
205
+ None,
206
+ gamma=self._transformer[0].ln_attn_gamma,
207
+ beta=self._transformer[0].ln_attn_beta)
208
+
209
+ for layer_idx in range(self.num_layers):
210
+ residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
211
+ wrapped_batch)
212
+
213
+ return self._forward_unembed(residual, wrapped_batch)
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import FalconNonTransformerContainer, FalconTransformerContainer
11
+ from .container import FalconNewArchTransformerContainer
12
+ from .model import FalconInferenceModel
13
+
14
+
15
+ class FalconPolicy(InferenceV2Policy):
16
+
17
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> FalconInferenceModel:
18
+ return FalconInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
19
+
20
+ def build_container_map(self) -> ContainerMap:
21
+ map = ContainerMap()
22
+
23
+ trans_container_cls = FalconNewArchTransformerContainer if self._model_config.new_decoder_architecture else FalconTransformerContainer
24
+ transformer_containers = [trans_container_cls(self.model) for _ in range(self.model.num_layers)]
25
+
26
+ map.set_transformer_params(['transformer.h'], transformer_containers)
27
+
28
+ map.set_non_transformer_params(FalconNonTransformerContainer(self.model))
29
+
30
+ map.set_unmapped_params(
31
+ [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)])
32
+
33
+ return map
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (248 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import MixtralPolicy
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc ADDED
Binary file (8.83 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.55 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...config_v2 import RaggedInferenceEngineConfig
14
+ from ...inference_utils import ActivationType, DtypeEnum
15
+ from ...model_implementations import *
16
+ from ...modules.configs import *
17
+ from ...modules.interfaces import *
18
+ from ...ragged import RaggedBatchWrapper
19
+ from ..inference_model_base import (
20
+ DSModelImplementationConfig,
21
+ MPType,
22
+ )
23
+
24
+ from .container import MixtralNonTransformerContainer, MixtralTransformerContainer
25
+
26
+
27
+ class MixtralInferenceModel(DSMoETransformerModelBase):
28
+ """
29
+ Inference model implementation for Mixtral models.
30
+ """
31
+
32
+ _non_transformer: Optional[MixtralNonTransformerContainer]
33
+ """
34
+ Embed + unembed container. Specializing the type annotation.
35
+ """
36
+
37
+ _transformer: Optional[Iterable[MixtralTransformerContainer]]
38
+ """
39
+ Per-layer transformer container. Specializing the type annotation.
40
+ """
41
+ """
42
+ Properties ineherited from `DSInferenceModelBase`
43
+ """
44
+
45
+ @property
46
+ def max_sequence_length(self) -> int:
47
+ return self._config.max_position_embeddings
48
+
49
+ """
50
+ Properties ineherited from `DSTransformerModelBase`
51
+ """
52
+
53
+ @property
54
+ def num_layers(self) -> int:
55
+ return self._config.num_hidden_layers
56
+
57
+ @property
58
+ def model_dim(self) -> int:
59
+ return self._config.hidden_size
60
+
61
+ @property
62
+ def vocab_size(self) -> int:
63
+ return self._config.vocab_size
64
+
65
+ @property
66
+ def head_size(self) -> int:
67
+ return self.model_dim // self.n_heads
68
+
69
+ @property
70
+ def n_heads(self) -> int:
71
+ return self._config.num_attention_heads
72
+
73
+ @property
74
+ def intermediate_dim(self) -> int:
75
+ return self._config.intermediate_size
76
+
77
+ @property
78
+ def n_heads_kv(self) -> int:
79
+ return self._config.num_key_value_heads
80
+
81
+ @property
82
+ def activation_dtype(self) -> DtypeEnum:
83
+ if self._config.torch_dtype == torch.float16:
84
+ return DtypeEnum.fp16
85
+ elif self._config.torch_dtype == torch.bfloat16:
86
+ return DtypeEnum.bf16
87
+ else:
88
+ raise NotImplementedError("Only fp16 and bf16 are supported")
89
+
90
+ @property
91
+ def mlp_activation_fn(self) -> ActivationType:
92
+ activation = self._config.hidden_act.lower()
93
+ if activation == "gelu":
94
+ return ActivationType.GEGLU
95
+ elif activation == "relu":
96
+ return ActivationType.ReGLU
97
+ elif activation == "gegelu":
98
+ return ActivationType.GEGLU
99
+ elif activation == "silu":
100
+ return ActivationType.SiGLU
101
+ else:
102
+ raise NotImplementedError(f"Activation {activation} not supported")
103
+
104
+ @property
105
+ def norm_type(self) -> NormTypeEnum:
106
+ return NormTypeEnum.RMSNorm
107
+
108
+ @property
109
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
110
+ return PositionalEmbeddingType.rotate_half
111
+
112
+ @property
113
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
114
+ """
115
+ The positional embedding configuration for the model.
116
+ """
117
+ return RotateHalfConfig(theta_base=self._config.rope_theta)
118
+
119
+ """
120
+ Inherited from `DSMoETransformerModelBase`
121
+ """
122
+
123
+ @property
124
+ def n_experts(self) -> int:
125
+ return self._config.num_local_experts
126
+
127
+ @property
128
+ def n_top_k(self) -> int:
129
+ return self._config.num_experts_per_tok
130
+
131
+ @property
132
+ def normalize_expert_scores(self) -> bool:
133
+ return True
134
+
135
+ """
136
+ Model implementation
137
+ """
138
+
139
+ def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
140
+ base_mp_group: MPType) -> None:
141
+ """
142
+ Base implementation for initialization. By default, this will initialize
143
+ the traditional components of a transformer model:
144
+ - Embedding
145
+ - QKV projection
146
+ - Self attention
147
+ - Attention output projection
148
+ - Feed forward network
149
+ - Normalization
150
+ - Unembedding
151
+
152
+ Arguments:
153
+ config (DSModelImplementationConfig): Model-specific configuration. No assumptions
154
+ should be made about this config that are not closely tied to the specific
155
+ model implementation.
156
+ engine_config (RaggedInferenceEngineConfig): Engine configuration.
157
+ base_mp_group (MPType): Base communication group for Tensor-parallel inference.
158
+ """
159
+ super().__init__(config, engine_config, base_mp_group)
160
+
161
+ self.make_norm_layer()
162
+ self.make_qkv_layer()
163
+ self.make_attn_layer()
164
+ self.make_attn_out_layer()
165
+ self.make_moe_layer()
166
+ self.make_embedding_layer()
167
+ self.make_unembedding_layer()
168
+ self._kv_cache_config = None
169
+
170
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
171
+ """
172
+ Performs the embedding lookup prior to running the transformer of the model.
173
+
174
+ Arguments:
175
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
176
+
177
+ Returns:
178
+ torch.Tensor: The embedded batch.
179
+ """
180
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
181
+
182
+ if embed.shape[-1] != self.model_dim:
183
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
184
+
185
+ return embed
186
+
187
+ def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
188
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
189
+ """
190
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
191
+ optimization to fuse the layer norm of the next layer into the current layer.
192
+
193
+ Arguments:
194
+ layer_idx (int): The index of the layer to execute.
195
+ residual (torch.Tensor): The residual tensor from the previous layer.
196
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
197
+ hidden states after pre normalization.
198
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
199
+ """
200
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
201
+
202
+ cur_params = self._transformer[layer_idx]
203
+ kv_cache = self.state_manager.get_cache(layer_idx)
204
+
205
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w)
206
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
207
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w)
208
+
209
+ if self.tp_size > 1:
210
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
211
+
212
+ residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma)
213
+
214
+ hidden_states = self.moe(hidden_states, ragged_batch_info, cur_params.moe_gate, cur_params.moe_mlp_1,
215
+ cur_params.moe_mlp_2)
216
+
217
+ if self.tp_size > 1:
218
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
219
+
220
+ if layer_idx != self.num_layers - 1:
221
+ next_params = self._transformer[layer_idx + 1]
222
+ residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma)
223
+ else:
224
+ # On last layer, we just need to perform the residual add. Adding into the residual
225
+ # here is safe.
226
+ residual.add_(hidden_states)
227
+
228
+ return residual, hidden_states
229
+
230
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
231
+ """
232
+ Performs unembedding of the hidden states to logits. This will only sample the final
233
+ token of each sequence.
234
+ """
235
+ logits = self.unembed(hidden_states,
236
+ self._non_transformer.word_unembed,
237
+ ragged_batch_info,
238
+ gamma=self._non_transformer.final_norm)
239
+
240
+ if self.tp_size > 1:
241
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
242
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
243
+
244
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
245
+
246
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
247
+
248
+ return full_logits
249
+ else:
250
+ return logits
251
+
252
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
253
+
254
+ residual = self._forward_embed(wrapped_batch)
255
+
256
+ residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
257
+
258
+ for layer_idx in range(self.num_layers):
259
+ residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch)
260
+
261
+ return self._forward_unembed(residual, wrapped_batch)
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import MixtralTransformerContainer, MixtralNonTransformerContainer
11
+ from .model import MixtralInferenceModel
12
+
13
+
14
+ class MixtralPolicy(InferenceV2Policy):
15
+
16
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MixtralInferenceModel:
17
+ return MixtralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
18
+
19
+ def build_container_map(self) -> ContainerMap:
20
+
21
+ map = ContainerMap()
22
+
23
+ transformer_containers = [MixtralTransformerContainer(self.model) for _ in range(self.model.num_layers)]
24
+
25
+ map.set_transformer_params(['model.layers'], transformer_containers)
26
+
27
+ map.set_non_transformer_params(MixtralNonTransformerContainer(self.model))
28
+
29
+ map.set_unmapped_params([])
30
+
31
+ return map
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import OPTPolicy
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (240 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc ADDED
Binary file (6.14 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from ..common_parameters import *
9
+ from ..layer_container_base import LayerContainer
10
+ '''
11
+ # HF OPT model looks like this:
12
+
13
+ OPTForCausalLM(
14
+ (model): OPTModel(
15
+ (decoder): OPTDecoder(
16
+ (embed_tokens): Embedding(50272, 768, padding_idx=1)
17
+ (embed_positions): OPTLearnedPositionalEmbedding(2050, 768)
18
+ (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
19
+ (layers): ModuleList(
20
+ (0-11): 12 x OPTDecoderLayer(
21
+ (self_attn): OPTAttention(
22
+ (k_proj): Linear(in_features=768, out_features=768, bias=True)
23
+ (v_proj): Linear(in_features=768, out_features=768, bias=True)
24
+ (q_proj): Linear(in_features=768, out_features=768, bias=True)
25
+ (out_proj): Linear(in_features=768, out_features=768, bias=True)
26
+ )
27
+ (activation_fn): ReLU()
28
+ (self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
29
+ (fc1): Linear(in_features=768, out_features=3072, bias=True)
30
+ (fc2): Linear(in_features=3072, out_features=768, bias=True)
31
+ (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
32
+ )
33
+ )
34
+ )
35
+ )
36
+ (lm_head): Linear(in_features=768, out_features=50272, bias=False)
37
+ )
38
+
39
+ '''
40
+
41
+
42
+ class OPTTransformerContainer(LayerContainer):
43
+ """
44
+ Transformer layer container for the OPT model.
45
+ """
46
+ qkv_w: UnfusedQKVParameter
47
+ qkv_b: UnfusedQKVParameter
48
+ attn_out_w: AttentionOutputParameter
49
+ attn_out_b: AttentionOutputParameter
50
+ mlp_1_w: MLP1Parameter
51
+ mlp_1_b: MLP1Parameter
52
+ mlp_2_w: MLP2Parameter
53
+ mlp_2_b: MLP2Parameter
54
+ attn_norm_beta: NormParameter
55
+ attn_norm_gamma: NormParameter
56
+ mlp_norm_beta: NormParameter
57
+ mlp_norm_gamma: NormParameter
58
+
59
+ PARAM_MAPPING = {
60
+ "self_attn.q_proj.weight": "qkv_w.q_params",
61
+ "self_attn.q_proj.bias": "qkv_b.q_params",
62
+ "self_attn.k_proj.weight": "qkv_w.k_params",
63
+ "self_attn.k_proj.bias": "qkv_b.k_params",
64
+ "self_attn.v_proj.weight": "qkv_w.v_params",
65
+ "self_attn.v_proj.bias": "qkv_b.v_params",
66
+ "self_attn.out_proj.weight": "attn_out_w.params",
67
+ "self_attn.out_proj.bias": "attn_out_b.params",
68
+ "fc1.weight": "mlp_1_w.params",
69
+ "fc1.bias": "mlp_1_b.params",
70
+ "fc2.weight": "mlp_2_w.params",
71
+ "fc2.bias": "mlp_2_b.params",
72
+ "self_attn_layer_norm.weight": "attn_norm_gamma.params",
73
+ "self_attn_layer_norm.bias": "attn_norm_beta.params",
74
+ "final_layer_norm.weight": "mlp_norm_gamma.params",
75
+ "final_layer_norm.bias": "mlp_norm_beta.params",
76
+ }
77
+
78
+
79
+ class OPTNonTransformerContainer(LayerContainer):
80
+ """
81
+ Non-Transformer layer container for the OPT model.
82
+ """
83
+ word_emb: EmbeddingParameter
84
+ word_emb_pos: EmbeddingParameter
85
+ word_unembed: UnembedParameter
86
+ final_norm_w: NormParameter
87
+ final_norm_b: NormParameter
88
+
89
+ PARAM_MAPPING = {
90
+ "*decoder.embed_tokens.weight": ["word_emb.params", "word_unembed.params"],
91
+ "*decoder.embed_positions.weight": "word_emb_pos.params",
92
+ "*decoder.final_layer_norm.weight": "final_norm_w.params",
93
+ "*decoder.final_layer_norm.bias": "final_norm_b.params",
94
+ }
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from ...model_implementations import *
15
+ from ...modules.configs import *
16
+ from ...ragged import RaggedBatchWrapper
17
+ from .container import OPTNonTransformerContainer, OPTTransformerContainer
18
+
19
+ from ...modules.heuristics import instantiate_embed
20
+
21
+
22
+ class OPTInferenceModel(DSTransformerModelBase):
23
+ """
24
+ Inference model implementation for ragged batching for OPT models.
25
+ """
26
+
27
+ _non_transformer: Optional[OPTNonTransformerContainer]
28
+ """
29
+ Embed + unembed container. Specializing the type annotation.
30
+ """
31
+
32
+ _transformer: Optional[Iterable[OPTTransformerContainer]]
33
+ """
34
+ Per-layer transformer container. Specializing the type annotation.
35
+ """
36
+ """
37
+ Properties ineherited from `DSInferenceModelBase`
38
+ """
39
+
40
+ @property
41
+ def max_sequence_length(self) -> int:
42
+ return self._config.max_seq_length
43
+
44
+ """
45
+ Properties ineherited from `DSTransformerModelBase`
46
+ """
47
+
48
+ @property
49
+ def num_layers(self) -> int:
50
+ return self._config.num_hidden_layers
51
+
52
+ @property
53
+ def model_dim(self) -> int:
54
+ return self._config.hidden_size
55
+
56
+ @property
57
+ def vocab_size(self) -> int:
58
+ return self._config.vocab_size
59
+
60
+ @property
61
+ def head_size(self) -> int:
62
+ return self.model_dim // self.n_heads
63
+
64
+ @property
65
+ def n_heads(self) -> int:
66
+ return self._config.num_attention_heads
67
+
68
+ @property
69
+ def intermediate_dim(self) -> int:
70
+ return self._config.ffn_dim
71
+
72
+ @property
73
+ def activation_dtype(self) -> DtypeEnum:
74
+ if self._config.torch_dtype == torch.float16:
75
+ return DtypeEnum.fp16
76
+ elif self._config.torch_dtype == torch.bfloat16:
77
+ return DtypeEnum.bf16
78
+ else:
79
+ raise NotImplementedError("Only fp16 and bf16 are supported")
80
+
81
+ @property
82
+ def mlp_activation_fn(self) -> ActivationType:
83
+ return ActivationType.RELU
84
+
85
+ @property
86
+ def norm_type(self) -> NormTypeEnum:
87
+ return NormTypeEnum.LayerNorm
88
+
89
+ @property
90
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
91
+ return PositionalEmbeddingType.none
92
+
93
+ @property
94
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
95
+ return None
96
+
97
+ """
98
+ Overrides of ``DSTransformerModelBase`` methods
99
+ """
100
+
101
+ def make_embedding_layer(self) -> None:
102
+ """
103
+ Performs setup and creates embedding DSModule. Since OPT includes trained
104
+ positional embeddings, we will override the base model implementation.
105
+ """
106
+
107
+ embed_config = DSEmbeddingsConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
108
+ residual_dtype=self.activation_dtype,
109
+ embedding_dim=self.model_dim,
110
+ positional_embedding=True,
111
+ positional_offset=2)
112
+
113
+ self.embed = instantiate_embed(embed_config, self._engine_config)
114
+
115
+ """
116
+ Forward implementations
117
+ """
118
+
119
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
120
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb, self._non_transformer.word_emb_pos)
121
+ if embed.shape[-1] != self.model_dim:
122
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
123
+
124
+ return embed
125
+
126
+ def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
127
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
128
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
129
+
130
+ cur_params = self._transformer[layer_idx]
131
+ kv_cache = self.state_manager.get_cache(layer_idx)
132
+
133
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b)
134
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
135
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=cur_params.attn_out_b)
136
+
137
+ if self.tp_size > 1:
138
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
139
+
140
+ residual, hidden_states = self.norm(residual,
141
+ hidden_states,
142
+ cur_params.mlp_norm_gamma,
143
+ beta=cur_params.mlp_norm_beta)
144
+
145
+ # Should be configurable in the future
146
+ hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=cur_params.mlp_1_b)
147
+ hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=cur_params.mlp_2_b)
148
+
149
+ if self.tp_size > 1:
150
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
151
+
152
+ if layer_idx != self.num_layers - 1:
153
+ next_params = self._transformer[layer_idx + 1]
154
+ residual, hidden_states = self.norm(residual,
155
+ hidden_states,
156
+ next_params.attn_norm_gamma,
157
+ beta=next_params.attn_norm_beta)
158
+ else:
159
+ # On last layer, we just need to perform the residual add. Adding into the residual
160
+ # here is safe.
161
+ residual.add_(hidden_states)
162
+
163
+ return residual, hidden_states
164
+
165
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
166
+ logits = self.unembed(hidden_states,
167
+ self._non_transformer.word_unembed,
168
+ ragged_batch_info,
169
+ gamma=self._non_transformer.final_norm_w,
170
+ beta=self._non_transformer.final_norm_b)
171
+
172
+ if self.tp_size > 1:
173
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
174
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
175
+
176
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
177
+
178
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
179
+
180
+ return full_logits
181
+ else:
182
+ return logits
183
+
184
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
185
+
186
+ residual = self._forward_embed(wrapped_batch)
187
+
188
+ residual, hidden_states = self.norm(residual,
189
+ None,
190
+ self._transformer[0].attn_norm_gamma,
191
+ beta=self._transformer[0].attn_norm_beta)
192
+
193
+ for layer_idx in range(self.num_layers):
194
+ residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
195
+ wrapped_batch)
196
+
197
+ return self._forward_unembed(residual, wrapped_batch)
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import OPTNonTransformerContainer, OPTTransformerContainer
11
+ from .model import OPTInferenceModel
12
+
13
+
14
+ class OPTPolicy(InferenceV2Policy):
15
+
16
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> OPTInferenceModel:
17
+ return OPTInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
18
+
19
+ def build_container_map(self) -> ContainerMap:
20
+ map = ContainerMap()
21
+
22
+ transformer_containers = [OPTTransformerContainer(self.model) for _ in range(self.model.num_layers)]
23
+
24
+ map.set_transformer_params(['model.decoder.layers', 'decoder.layers'], transformer_containers)
25
+
26
+ map.set_non_transformer_params(OPTNonTransformerContainer(self.model))
27
+
28
+ map.set_unmapped_params(['lm_head.weight'])
29
+
30
+ return map
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import QwenPolicy
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (242 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.38 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from ..common_parameters import *
9
+ from ..layer_container_base import LayerContainer
10
+ '''
11
+ # HF Qwen model looks like this:
12
+
13
+ QWenLMHeadModel(
14
+ (transformer): QWenModel(
15
+ (wte): Embedding(151936, 4096)
16
+ (drop): Dropout(p=0.0, inplace=False)
17
+ (rotary_emb): RotaryEmbedding()
18
+ (h): ModuleList(
19
+ (0-31): 32 x QWenBlock(
20
+ (ln_1): RMSNorm()
21
+ (attn): QWenAttention(
22
+ (c_attn): Linear(in_features=4096, out_features=12288, bias=True)
23
+ (c_proj): Linear(in_features=4096, out_features=4096, bias=False)
24
+ (attn_dropout): Dropout(p=0.0, inplace=False)
25
+ )
26
+ (ln_2): RMSNorm()
27
+ (mlp): QWenMLP(
28
+ (w1): Linear(in_features=4096, out_features=11008, bias=False)
29
+ (w2): Linear(in_features=4096, out_features=11008, bias=False)
30
+ (c_proj): Linear(in_features=11008, out_features=4096, bias=False)
31
+ )
32
+ )
33
+ )
34
+ (ln_f): RMSNorm()
35
+ )
36
+ (lm_head): Linear(in_features=4096, out_features=151936, bias=False)
37
+ )
38
+ '''
39
+
40
+
41
+ class QwenTransformerContainer(LayerContainer):
42
+ """
43
+ Transformer layer container for the Qwen model.
44
+ """
45
+ qkv_w: FusedQKVParameter
46
+ qkv_b: FusedQKVParameter
47
+ attn_out_w: AttentionOutputParameter
48
+ mlp_1_w: GatedMLPParameter
49
+ mlp_2_w: MLP2Parameter
50
+ attn_norm_gamma: NormParameter
51
+ mlp_norm_gamma: NormParameter
52
+
53
+ PARAM_MAPPING = {
54
+ "attn.c_attn.weight": "qkv_w.params",
55
+ "attn.c_attn.bias": "qkv_b.params",
56
+ "attn.c_proj.weight": "attn_out_w.params",
57
+ "mlp.w1.weight": "mlp_1_w.up_params",
58
+ "mlp.w2.weight": "mlp_1_w.gate_params",
59
+ "mlp.c_proj.weight": "mlp_2_w.params",
60
+ "ln_1.weight": "attn_norm_gamma.params",
61
+ "ln_2.weight": "mlp_norm_gamma.params",
62
+ }
63
+
64
+
65
+ class QwenNonTransformerContainer(LayerContainer):
66
+ """
67
+ Non-Transformer layer container for the Qwen model.
68
+ """
69
+ word_emb: EmbeddingParameter
70
+ word_unembed: UnembedParameter
71
+ final_norm: NormParameter
72
+
73
+ PARAM_MAPPING = {
74
+ "transformer.wte.weight": "word_emb.params",
75
+ "transformer.ln_f.weight": "final_norm.params",
76
+ "lm_head.weight": "word_unembed.params",
77
+ }
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from .. import *
15
+ from ...modules.configs import *
16
+ from ...modules.interfaces import *
17
+ from ...modules import heuristics
18
+ from ...ragged import RaggedBatchWrapper
19
+
20
+ from .container import QwenNonTransformerContainer, QwenTransformerContainer
21
+
22
+
23
+ class QwenInferenceModel(DSTransformerModelBase):
24
+ """
25
+ Inference model implementation for ragged batching for Llama-2 models.
26
+ """
27
+
28
+ _non_transformer: Optional[QwenNonTransformerContainer]
29
+ """
30
+ Embed + unembed container. Specializing the type annotation.
31
+ """
32
+
33
+ _transformer: Optional[Iterable[QwenTransformerContainer]]
34
+ """
35
+ Per-layer transformer container. Specializing the type annotation.
36
+ """
37
+ """
38
+ Properties ineherited from `DSInferenceModelBase`
39
+ """
40
+
41
+ @property
42
+ def max_sequence_length(self) -> int:
43
+ return self._config.max_seq_length
44
+
45
+ """
46
+ Properties ineherited from `DSTransformerModelBase`
47
+ """
48
+
49
+ @property
50
+ def num_layers(self) -> int:
51
+ return self._config.num_hidden_layers
52
+
53
+ @property
54
+ def model_dim(self) -> int:
55
+ return self._config.hidden_size
56
+
57
+ @property
58
+ def vocab_size(self) -> int:
59
+ return self._config.vocab_size
60
+
61
+ @property
62
+ def head_size(self) -> int:
63
+ return self.model_dim // self.n_heads
64
+
65
+ @property
66
+ def n_heads(self) -> int:
67
+ return self._config.num_attention_heads
68
+
69
+ @property
70
+ def intermediate_dim(self) -> int:
71
+ return self._config.intermediate_size // 2
72
+
73
+ @property
74
+ def n_heads_kv(self) -> int:
75
+ return self._config.hidden_size // self._config.kv_channels
76
+
77
+ @property
78
+ def activation_dtype(self) -> DtypeEnum:
79
+ autoset_precision = self._config.bf16 + self._config.fp16 == 0
80
+ if autoset_precision:
81
+ return DtypeEnum.fp16
82
+ if self._config.fp16:
83
+ return DtypeEnum.fp16
84
+ elif self._config.bf16:
85
+ # TODO(ZonePG): bf16 inference results may be different from huggingface bf16,
86
+ # because in rms_norm, Qwen still use float() instead of bf16
87
+ return DtypeEnum.bf16
88
+ else:
89
+ raise NotImplementedError("Only fp16 and bf16 are supported")
90
+
91
+ @property
92
+ def mlp_activation_fn(self) -> ActivationType:
93
+ return ActivationType.SiGLU
94
+
95
+ @property
96
+ def norm_type(self) -> NormTypeEnum:
97
+ return NormTypeEnum.RMSNorm
98
+
99
+ @property
100
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
101
+ return PositionalEmbeddingType.rotate_half
102
+
103
+ @property
104
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
105
+ return RotateHalfConfig(theta_base=self._config.rotary_emb_base)
106
+
107
+ def make_norm_layer(self) -> None:
108
+ """
109
+ Instantiates the normalization layer for the model. This sets the `self.norm` attribute.
110
+
111
+ TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
112
+ but for now we'll just use the same one for all of them.
113
+ """
114
+ norm_config = DSNormConfig(
115
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
116
+ type=self.norm_type,
117
+ channels=self.model_dim,
118
+ residual_dtype=self.activation_dtype,
119
+ input_dtype=self.activation_dtype,
120
+ output_dtype=self.activation_dtype,
121
+ eps=self._config.layer_norm_epsilon,
122
+ )
123
+
124
+ self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config)
125
+
126
+ """
127
+ Forward implementations
128
+ """
129
+
130
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
131
+ """
132
+ Performs the embedding lookup prior to running the transformer of the model.
133
+
134
+ Arguments:
135
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
136
+
137
+ Returns:
138
+ torch.Tensor: The embedded batch.
139
+ """
140
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
141
+
142
+ if embed.shape[-1] != self.model_dim:
143
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
144
+
145
+ return embed
146
+
147
+ def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
148
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
149
+ """
150
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
151
+ optimization to fuse the layer norm of the next layer into the current layer.
152
+
153
+ Arguments:
154
+ layer_idx (int): The index of the layer to execute.
155
+ residual (torch.Tensor): The residual tensor from the previous layer.
156
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
157
+ hidden states after pre normalization.
158
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
159
+ """
160
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
161
+
162
+ cur_params = self._transformer[layer_idx]
163
+ kv_cache = self.state_manager.get_cache(layer_idx)
164
+
165
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b)
166
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
167
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
168
+
169
+ if self.tp_size > 1:
170
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
171
+
172
+ residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
173
+
174
+ # Should be configurable in the future
175
+ hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
176
+ hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
177
+
178
+ if self.tp_size > 1:
179
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
180
+
181
+ if layer_idx != self.num_layers - 1:
182
+ next_params = self._transformer[layer_idx + 1]
183
+ residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
184
+ else:
185
+ # On last layer, we just need to perform the residual add. Adding into the residual
186
+ # here is safe.
187
+ residual.add_(hidden_states)
188
+
189
+ return residual, hidden_states
190
+
191
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
192
+ """
193
+ Performs unembedding of the hidden states to logits. This will only sample the final
194
+ token of each sequence.
195
+ """
196
+ logits = self.unembed(hidden_states,
197
+ self._non_transformer.word_unembed,
198
+ ragged_batch_info,
199
+ gamma=self._non_transformer.final_norm)
200
+
201
+ if self.tp_size > 1:
202
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
203
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
204
+
205
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
206
+
207
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
208
+
209
+ return full_logits
210
+ else:
211
+ return logits
212
+
213
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
214
+
215
+ residual = self._forward_embed(wrapped_batch)
216
+
217
+ residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
218
+
219
+ for layer_idx in range(self.num_layers):
220
+ residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
221
+ wrapped_batch)
222
+
223
+ return self._forward_unembed(residual, wrapped_batch)
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import QwenNonTransformerContainer, QwenTransformerContainer
11
+ from .model import QwenInferenceModel
12
+
13
+
14
+ class QwenPolicy(InferenceV2Policy):
15
+
16
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> QwenInferenceModel:
17
+ return QwenInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
18
+
19
+ def build_container_map(self) -> ContainerMap:
20
+ map = ContainerMap()
21
+
22
+ transformer_containers = [QwenTransformerContainer(self.model) for _ in range(self.model.num_layers)]
23
+
24
+ map.set_transformer_params(['transformer.h'], transformer_containers)
25
+
26
+ map.set_non_transformer_params(QwenNonTransformerContainer(self.model))
27
+
28
+ map.set_unmapped_params(['transformer.rotary_emb.inv_freq'])
29
+
30
+ return map
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import Qwen2Policy
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/container.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/model.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/container.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from ..common_parameters import *
9
+ from ..layer_container_base import LayerContainer
10
+ '''
11
+ # HF Qwen2 model looks like this:
12
+
13
+ Qwen2ForCausalLM(
14
+ (model): Qwen2Model(
15
+ (embed_tokens): Embedding(151936, 1024)
16
+ (layers): ModuleList(
17
+ (0-23): 24 x Qwen2DecoderLayer(
18
+ (self_attn): Qwen2SdpaAttention(
19
+ (q_proj): Linear(in_features=1024, out_features=1024, bias=True)
20
+ (k_proj): Linear(in_features=1024, out_features=1024, bias=True)
21
+ (v_proj): Linear(in_features=1024, out_features=1024, bias=True)
22
+ (o_proj): Linear(in_features=1024, out_features=1024, bias=False)
23
+ (rotary_emb): Qwen2RotaryEmbedding()
24
+ )
25
+ (mlp): Qwen2MLP(
26
+ (gate_proj): Linear(in_features=1024, out_features=2816, bias=False)
27
+ (up_proj): Linear(in_features=1024, out_features=2816, bias=False)
28
+ (down_proj): Linear(in_features=2816, out_features=1024, bias=False)
29
+ (act_fn): SiLU()
30
+ )
31
+ (input_layernorm): Qwen2RMSNorm()
32
+ (post_attention_layernorm): Qwen2RMSNorm()
33
+ )
34
+ )
35
+ (norm): Qwen2RMSNorm()
36
+ )
37
+ (lm_head): Linear(in_features=1024, out_features=151936, bias=False)
38
+ )
39
+ '''
40
+
41
+
42
+ class Qwen2TransformerContainer(LayerContainer):
43
+ """
44
+ Transformer layer container for the Qwen2 model.
45
+ """
46
+ qkv_w: UnfusedQKVParameter
47
+ qkv_b: UnfusedQKVParameter
48
+ attn_out_w: AttentionOutputParameter
49
+ mlp_1_w: GatedMLPParameter
50
+ mlp_2_w: MLP2Parameter
51
+ attn_norm_gamma: NormParameter
52
+ mlp_norm_gamma: NormParameter
53
+
54
+ PARAM_MAPPING = {
55
+ "self_attn.q_proj.weight": "qkv_w.q_params",
56
+ "self_attn.k_proj.weight": "qkv_w.k_params",
57
+ "self_attn.v_proj.weight": "qkv_w.v_params",
58
+ "self_attn.q_proj.bias": "qkv_b.q_params",
59
+ "self_attn.k_proj.bias": "qkv_b.k_params",
60
+ "self_attn.v_proj.bias": "qkv_b.v_params",
61
+ "self_attn.o_proj.weight": "attn_out_w.params",
62
+ "mlp.gate_proj.weight": "mlp_1_w.gate_params",
63
+ "mlp.up_proj.weight": "mlp_1_w.up_params",
64
+ "mlp.down_proj.weight": "mlp_2_w.params",
65
+ "input_layernorm.weight": "attn_norm_gamma.params",
66
+ "post_attention_layernorm.weight": "mlp_norm_gamma.params",
67
+ }
68
+
69
+
70
+ class Qwen2NonTransformerContainer(LayerContainer):
71
+ """
72
+ Non-Transformer layer container for the Qwen2 model.
73
+ """
74
+ word_emb: EmbeddingParameter
75
+ word_unembed: UnembedParameter
76
+ final_norm: NormParameter
77
+
78
+ PARAM_MAPPING = {
79
+ "model.embed_tokens.weight": "word_emb.params",
80
+ "model.norm.weight": "final_norm.params",
81
+ "lm_head.weight": "word_unembed.params",
82
+ }
parrot/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/model.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from .. import *
15
+ from ...modules.configs import *
16
+ from ...modules.interfaces import *
17
+ from ...modules import heuristics
18
+ from ...ragged import RaggedBatchWrapper
19
+
20
+ from .container import Qwen2NonTransformerContainer, Qwen2TransformerContainer
21
+
22
+
23
+ class Qwen2InferenceModel(DSTransformerModelBase):
24
+ """
25
+ Inference model implementation for ragged batching for Llama-2 models.
26
+ """
27
+
28
+ _non_transformer: Optional[Qwen2NonTransformerContainer]
29
+ """
30
+ Embed + unembed container. Specializing the type annotation.
31
+ """
32
+
33
+ _transformer: Optional[Iterable[Qwen2TransformerContainer]]
34
+ """
35
+ Per-layer transformer container. Specializing the type annotation.
36
+ """
37
+ """
38
+ Properties ineherited from `DSInferenceModelBase`
39
+ """
40
+
41
+ @property
42
+ def max_sequence_length(self) -> int:
43
+ return self._config.max_seq_length
44
+
45
+ """
46
+ Properties ineherited from `DSTransformerModelBase`
47
+ """
48
+
49
+ @property
50
+ def num_layers(self) -> int:
51
+ return self._config.num_hidden_layers
52
+
53
+ @property
54
+ def model_dim(self) -> int:
55
+ return self._config.hidden_size
56
+
57
+ @property
58
+ def vocab_size(self) -> int:
59
+ return self._config.vocab_size
60
+
61
+ @property
62
+ def head_size(self) -> int:
63
+ return self.model_dim // self.n_heads
64
+
65
+ @property
66
+ def n_heads(self) -> int:
67
+ return self._config.num_attention_heads
68
+
69
+ @property
70
+ def intermediate_dim(self) -> int:
71
+ return self._config.intermediate_size
72
+
73
+ @property
74
+ def n_heads_kv(self) -> int:
75
+ return self._config.num_key_value_heads
76
+
77
+ @property
78
+ def activation_dtype(self) -> DtypeEnum:
79
+ # TODO(ZonePG): bf16 inference results may be different from huggingface bf16,
80
+ # because in rms_norm, Qwen still use float() instead of bf16
81
+ # if self._config.torch_dtype == torch.float16:
82
+ # return DtypeEnum.fp16
83
+ # elif self._config.torch_dtype == torch.bfloat16:
84
+ # return DtypeEnum.bf16
85
+ # else:
86
+ # raise NotImplementedError("Only fp16 and bf16 are supported")
87
+ return DtypeEnum.fp16
88
+
89
+ @property
90
+ def mlp_activation_fn(self) -> ActivationType:
91
+ return ActivationType.SiGLU
92
+
93
+ @property
94
+ def norm_type(self) -> NormTypeEnum:
95
+ return NormTypeEnum.RMSNorm
96
+
97
+ @property
98
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
99
+ return PositionalEmbeddingType.rotate_half
100
+
101
+ @property
102
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
103
+ return RotateHalfConfig(theta_base=self._config.rope_theta)
104
+
105
+ def make_norm_layer(self) -> None:
106
+ """
107
+ Instantiates the normalization layer for the model. This sets the `self.norm` attribute.
108
+
109
+ TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
110
+ but for now we'll just use the same one for all of them.
111
+ """
112
+ norm_config = DSNormConfig(
113
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
114
+ type=self.norm_type,
115
+ channels=self.model_dim,
116
+ residual_dtype=self.activation_dtype,
117
+ input_dtype=self.activation_dtype,
118
+ output_dtype=self.activation_dtype,
119
+ eps=self._config.rms_norm_eps,
120
+ )
121
+
122
+ self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config)
123
+
124
+ """
125
+ Forward implementations
126
+ """
127
+
128
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
129
+ """
130
+ Performs the embedding lookup prior to running the transformer of the model.
131
+
132
+ Arguments:
133
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
134
+
135
+ Returns:
136
+ torch.Tensor: The embedded batch.
137
+ """
138
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
139
+
140
+ if embed.shape[-1] != self.model_dim:
141
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
142
+
143
+ return embed
144
+
145
+ def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
146
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
147
+ """
148
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
149
+ optimization to fuse the layer norm of the next layer into the current layer.
150
+
151
+ Arguments:
152
+ layer_idx (int): The index of the layer to execute.
153
+ residual (torch.Tensor): The residual tensor from the previous layer.
154
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
155
+ hidden states after pre normalization.
156
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
157
+ """
158
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
159
+
160
+ cur_params = self._transformer[layer_idx]
161
+ kv_cache = self.state_manager.get_cache(layer_idx)
162
+
163
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b)
164
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
165
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
166
+
167
+ if self.tp_size > 1:
168
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
169
+
170
+ residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
171
+
172
+ # Should be configurable in the future
173
+ hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
174
+ hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
175
+
176
+ if self.tp_size > 1:
177
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
178
+
179
+ if layer_idx != self.num_layers - 1:
180
+ next_params = self._transformer[layer_idx + 1]
181
+ residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
182
+ else:
183
+ # On last layer, we just need to perform the residual add. Adding into the residual
184
+ # here is safe.
185
+ residual.add_(hidden_states)
186
+
187
+ return residual, hidden_states
188
+
189
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
190
+ """
191
+ Performs unembedding of the hidden states to logits. This will only sample the final
192
+ token of each sequence.
193
+ """
194
+ logits = self.unembed(hidden_states,
195
+ self._non_transformer.word_unembed,
196
+ ragged_batch_info,
197
+ gamma=self._non_transformer.final_norm)
198
+
199
+ if self.tp_size > 1:
200
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
201
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
202
+
203
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
204
+
205
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
206
+
207
+ return full_logits
208
+ else:
209
+ return logits
210
+
211
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
212
+
213
+ residual = self._forward_embed(wrapped_batch)
214
+
215
+ residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
216
+
217
+ for layer_idx in range(self.num_layers):
218
+ residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
219
+ wrapped_batch)
220
+
221
+ return self._forward_unembed(residual, wrapped_batch)