ZTWHHH commited on
Commit
1b8c8b1
·
verified ·
1 Parent(s): bd51c9e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_parameter.cpython-310.pyc +0 -0
  3. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/blocked_allocator.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_wrapper.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/sequence_descriptor.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/fast_host_buffer.cu +18 -0
  8. vlmpy310/lib/python3.10/site-packages/av.libs/libavformat-071c54bd.so.61.7.100 +3 -0
  9. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py +6 -0
  10. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py +32 -0
  11. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h +17 -0
  12. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h +640 -0
  13. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h +58 -0
  14. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h +1115 -0
  15. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h +778 -0
  16. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py +13 -0
  17. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  18. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py +6 -0
  19. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc +0 -0
  20. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc +0 -0
  21. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp +53 -0
  22. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h +21 -0
  23. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py +50 -0
  24. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py +6 -0
  25. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc +0 -0
  26. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc +0 -0
  27. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h +39 -0
  28. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp +101 -0
  29. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h +16 -0
  30. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py +107 -0
  31. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h +74 -0
  32. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc +0 -0
  33. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc +0 -0
  34. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp +101 -0
  35. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cuh +26 -0
  36. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py +67 -0
  37. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu +137 -0
  38. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h +21 -0
  39. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py +8 -0
  40. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h +65 -0
  41. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py +73 -0
  42. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py +76 -0
  43. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py +6 -0
  44. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc +0 -0
  45. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc +0 -0
  46. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp +45 -0
  47. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh +22 -0
  48. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h +20 -0
  49. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py +52 -0
  50. vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu +86 -0
.gitattributes CHANGED
@@ -1159,3 +1159,4 @@ vlmpy310/lib/python3.10/site-packages/notebook/static/3f6d3488cf65374f6f67.woff
1159
  vlmpy310/lib/python3.10/site-packages/notebook/static/e4299464e7b012968eed.eot filter=lfs diff=lfs merge=lfs -text
1160
  llava_next/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
1161
  vlmpy310/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
1159
  vlmpy310/lib/python3.10/site-packages/notebook/static/e4299464e7b012968eed.eot filter=lfs diff=lfs merge=lfs -text
1160
  llava_next/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
1161
  vlmpy310/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
1162
+ vlmpy310/lib/python3.10/site-packages/av.libs/libavformat-071c54bd.so.61.7.100 filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_parameter.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc ADDED
Binary file (4.49 kB). View file
 
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/blocked_allocator.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_wrapper.cpython-310.pyc ADDED
Binary file (7.96 kB). View file
 
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/sequence_descriptor.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
llava_next/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/fast_host_buffer.cu ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "ds_kernel_utils.h"
7
+ #include "fast_host_buffer.h"
8
+
9
+ void* get_cuda_fast_buffer(int64_t size)
10
+ {
11
+ void* buffer_ptr;
12
+ // Host allocation flags that should minimize the host -> accelerator copy latency
13
+ unsigned int alloc_flags =
14
+ cudaHostAllocPortable | cudaHostAllocMapped | cudaHostAllocWriteCombined;
15
+
16
+ cudaHostAlloc(&buffer_ptr, size, alloc_flags);
17
+ return buffer_ptr;
18
+ }
vlmpy310/lib/python3.10/site-packages/av.libs/libavformat-071c54bd.so.61.7.100 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed69b25c954e3e589fe26484acd11ef42cb2ce616b64e5e2d3964a94c4b46721
3
+ size 2782073
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .ds_kernel import DSKernelBase
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+
8
+
9
+ class DSKernelBase(ABC):
10
+
11
+ @abstractmethod
12
+ def __init__(self, *args, **kwargs):
13
+ """
14
+ If necessary trigger compilation and warmup
15
+ Autotuning of the kernel would happen at this stage to
16
+ eliminate any potential hangs that might occur mid-deployment
17
+ Validate that the desired run configuration is compatible.
18
+
19
+ It is not necessary to call super on this method.
20
+ """
21
+ raise NotImplementedError()
22
+
23
+ @abstractmethod
24
+ def __call__(self, *args, **kwargs):
25
+ """
26
+ However the kernel needs to be called, it can be called here. Auto-tuning
27
+ should never be performed here.
28
+
29
+ All inputs/outputs should be passed as arguments to this function. No allocations
30
+ should be performed here.
31
+ """
32
+ raise NotImplementedError()
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ enum ActivationType {
9
+ GELU = 0,
10
+ RELU = 1,
11
+ SILU = 2,
12
+ GEGLU = 3,
13
+ ReGLU = 4,
14
+ SiGLU = 5,
15
+ IDENTITY = 6,
16
+ InvalidType = -1
17
+ };
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+
10
+ #include <stdint.h>
11
+
12
+ #ifdef BF16_AVAILABLE
13
+ #include <cuda_bf16.h>
14
+ #endif
15
+
16
+ namespace conversion {
17
+
18
+ // Basic primitive for constructing conversions
19
+ template <typename TO, typename FROM>
20
+ DS_D_INLINE TO to(FROM val)
21
+ {
22
+ return to(val);
23
+ }
24
+
25
+ // Specializations
26
+
27
+ /********************* Identity Conversions *********************/
28
+ /*
29
+ Identity conversions are useful in templated functions where we might have
30
+ a fixed destination type. For example, I might have a kernel that accepts
31
+ __half, __nv_bfloat16, and float but always want to do the core computation
32
+ at floating point:
33
+
34
+ T mem_value = input[idx];
35
+ float compute_value = conversion::to<float, T>(mem_value);
36
+
37
+ In practice, we should be able to elide the second template parameter:
38
+ float compute_val = conversion::to<float>(mem_value);
39
+
40
+ In this case, we need an implementation to handle the T = float case
41
+
42
+ NOTE: The type inferencing system appears to be unable to handle inferring the first
43
+ template parameter, even in the trivial case.
44
+ */
45
+
46
+ // Floating point types
47
+ template <>
48
+ DS_D_INLINE double to(double val)
49
+ {
50
+ return val;
51
+ }
52
+ template <>
53
+ DS_D_INLINE float to(float val)
54
+ {
55
+ return val;
56
+ }
57
+ template <>
58
+ DS_D_INLINE __half to(__half val)
59
+ {
60
+ return val;
61
+ }
62
+ #ifdef BF16_AVAILABLE
63
+ template <>
64
+ DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val)
65
+ {
66
+ return val;
67
+ }
68
+ #endif
69
+
70
+ // Integer types
71
+ template <>
72
+ DS_D_INLINE int8_t to(int8_t val)
73
+ {
74
+ return val;
75
+ }
76
+ template <>
77
+ DS_D_INLINE uint8_t to(uint8_t val)
78
+ {
79
+ return val;
80
+ }
81
+ template <>
82
+ DS_D_INLINE int16_t to(int16_t val)
83
+ {
84
+ return val;
85
+ }
86
+ template <>
87
+ DS_D_INLINE uint16_t to(uint16_t val)
88
+ {
89
+ return val;
90
+ }
91
+ template <>
92
+ DS_D_INLINE int32_t to(int32_t val)
93
+ {
94
+ return val;
95
+ }
96
+ template <>
97
+ DS_D_INLINE uint32_t to(uint32_t val)
98
+ {
99
+ return val;
100
+ }
101
+ template <>
102
+ DS_D_INLINE int64_t to(int64_t val)
103
+ {
104
+ return val;
105
+ }
106
+ template <>
107
+ DS_D_INLINE uint64_t to(uint64_t val)
108
+ {
109
+ return val;
110
+ }
111
+
112
+ // TODO: evaluate if we want bools
113
+
114
+ /********************* To Double Conversions *********************/
115
+
116
+ // * to double variants
117
+
118
+ // Would normally like to not use C cast, but this is an important enough conversion
119
+ // to keep
120
+ template <>
121
+ DS_D_INLINE double to(float val)
122
+ {
123
+ #ifdef PTX_AVAILABLE
124
+ double ret_val;
125
+ asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val));
126
+ return ret_val;
127
+ #else
128
+ return double(val);
129
+ #endif
130
+ }
131
+ // Note: there is a CVT instruction for __half -> double, but there's no inline interface
132
+ // for passing a single half value
133
+ template <>
134
+ DS_D_INLINE double to(__half val)
135
+ {
136
+ return to<double>(__half2float(val));
137
+ }
138
+ template <>
139
+ DS_D_INLINE double to(int64_t val)
140
+ {
141
+ return __ll2double_rn(val);
142
+ }
143
+ template <>
144
+ DS_D_INLINE double to(int32_t val)
145
+ {
146
+ return __int2double_rn(val);
147
+ }
148
+ template <>
149
+ DS_D_INLINE double to(int16_t val)
150
+ {
151
+ return __int2double_rn(val);
152
+ }
153
+ template <>
154
+ DS_D_INLINE double to(int8_t val)
155
+ {
156
+ return __int2double_rn(val);
157
+ }
158
+ template <>
159
+ DS_D_INLINE double to(uint64_t val)
160
+ {
161
+ return __ull2double_rn(val);
162
+ }
163
+ template <>
164
+ DS_D_INLINE double to(uint32_t val)
165
+ {
166
+ return __uint2double_rn(val);
167
+ }
168
+ template <>
169
+ DS_D_INLINE double to(uint16_t val)
170
+ {
171
+ return __uint2double_rn(val);
172
+ }
173
+ template <>
174
+ DS_D_INLINE double to(uint8_t val)
175
+ {
176
+ return __uint2double_rn(val);
177
+ }
178
+
179
+ // Same applies here
180
+ #ifdef BF16_AVAILABLE
181
+ template <>
182
+ DS_D_INLINE double to(__nv_bfloat16 val)
183
+ {
184
+ return to<double>(__bfloat162float(val));
185
+ }
186
+ #endif
187
+
188
+ /********************* To Float Conversions *********************/
189
+
190
+ template <>
191
+ DS_D_INLINE float to(double val)
192
+ {
193
+ return __double2float_rn(val);
194
+ }
195
+ template <>
196
+ DS_D_INLINE float to(__half val)
197
+ {
198
+ return __half2float(val);
199
+ }
200
+ template <>
201
+ DS_D_INLINE float to(int64_t val)
202
+ {
203
+ return __ll2float_rn(val);
204
+ }
205
+ template <>
206
+ DS_D_INLINE float to(int32_t val)
207
+ {
208
+ return __int2float_rn(val);
209
+ }
210
+ template <>
211
+ DS_D_INLINE float to(int16_t val)
212
+ {
213
+ return __int2float_rn(val);
214
+ }
215
+ template <>
216
+ DS_D_INLINE float to(int8_t val)
217
+ {
218
+ return __int2float_rn(val);
219
+ }
220
+ template <>
221
+ DS_D_INLINE float to(uint64_t val)
222
+ {
223
+ return __ull2float_rn(val);
224
+ }
225
+ template <>
226
+ DS_D_INLINE float to(uint32_t val)
227
+ {
228
+ return __uint2float_rn(val);
229
+ }
230
+ template <>
231
+ DS_D_INLINE float to(uint16_t val)
232
+ {
233
+ return __uint2float_rn(val);
234
+ }
235
+ template <>
236
+ DS_D_INLINE float to(uint8_t val)
237
+ {
238
+ return __uint2float_rn(val);
239
+ }
240
+
241
+ #ifdef BF16_AVAILABLE
242
+ template <>
243
+ DS_D_INLINE float to(__nv_bfloat16 val)
244
+ {
245
+ return __bfloat162float(val);
246
+ }
247
+ #endif
248
+
249
+ /********************* To Float2 Conversions *********************/
250
+ template <>
251
+ DS_D_INLINE float2 to(__half2 val)
252
+ {
253
+ return __half22float2(val);
254
+ }
255
+
256
+ #ifdef BF16_AVAILABLE
257
+ template <>
258
+ DS_D_INLINE float2 to(__nv_bfloat162 val)
259
+ {
260
+ return __bfloat1622float2(val);
261
+ }
262
+ #endif
263
+
264
+ /********************* To Half Conversions *********************/
265
+ template <>
266
+ DS_D_INLINE __half to(double val)
267
+ {
268
+ #ifdef __HIP_PLATFORM_AMD__
269
+ float val_f = __double2float_rn(val);
270
+ return __float2half(val_f);
271
+ #else
272
+ return __double2half(val);
273
+ #endif
274
+ }
275
+ template <>
276
+ DS_D_INLINE __half to(float val)
277
+ {
278
+ return __float2half(val);
279
+ }
280
+ template <>
281
+ DS_D_INLINE __half to(int64_t val)
282
+ {
283
+ return __ll2half_rn(val);
284
+ }
285
+ template <>
286
+ DS_D_INLINE __half to(int32_t val)
287
+ {
288
+ return __int2half_rn(val);
289
+ }
290
+ template <>
291
+ DS_D_INLINE __half to(int16_t val)
292
+ {
293
+ return __short2half_rn(val);
294
+ }
295
+ template <>
296
+ DS_D_INLINE __half to(int8_t val)
297
+ {
298
+ return __int2half_rn(val);
299
+ }
300
+ template <>
301
+ DS_D_INLINE __half to(uint64_t val)
302
+ {
303
+ return __ull2half_rn(val);
304
+ }
305
+ template <>
306
+ DS_D_INLINE __half to(uint32_t val)
307
+ {
308
+ return __uint2half_rn(val);
309
+ }
310
+ template <>
311
+ DS_D_INLINE __half to(uint16_t val)
312
+ {
313
+ return __ushort2half_rn(val);
314
+ }
315
+ template <>
316
+ DS_D_INLINE __half to(uint8_t val)
317
+ {
318
+ return __uint2half_rn(val);
319
+ }
320
+
321
+ #ifdef BF16_AVAILABLE
322
+ // No direct conversion
323
+ template <>
324
+ DS_D_INLINE __half to(__nv_bfloat16 val)
325
+ {
326
+ return to<__half>(to<float>(val));
327
+ }
328
+ #endif
329
+
330
+ /********************* To Half2 Conversions *********************/
331
+ template <>
332
+ DS_D_INLINE __half2 to(float2 val)
333
+ {
334
+ return __float22half2_rn(val);
335
+ }
336
+ template <>
337
+ DS_D_INLINE __half2 to(float val)
338
+ {
339
+ return __float2half2_rn(val);
340
+ }
341
+
342
+ #ifdef BF16_AVAILABLE
343
+ // No direct conversion
344
+ template <>
345
+ DS_D_INLINE __half2 to(__nv_bfloat162 val)
346
+ {
347
+ return to<__half2>(to<float2>(val));
348
+ }
349
+ #endif
350
+
351
+ /********************* To BF16 Conversions *********************/
352
+ #ifdef BF16_AVAILABLE
353
+ template <>
354
+ DS_D_INLINE __nv_bfloat16 to(double val)
355
+ {
356
+ return __double2bfloat16(val);
357
+ }
358
+ template <>
359
+ DS_D_INLINE __nv_bfloat16 to(float val)
360
+ {
361
+ return __float2bfloat16(val);
362
+ }
363
+ template <>
364
+ DS_D_INLINE __nv_bfloat16 to(int64_t val)
365
+ {
366
+ return __ll2bfloat16_rn(val);
367
+ }
368
+ template <>
369
+ DS_D_INLINE __nv_bfloat16 to(int32_t val)
370
+ {
371
+ return __int2bfloat16_rn(val);
372
+ }
373
+ template <>
374
+ DS_D_INLINE __nv_bfloat16 to(int16_t val)
375
+ {
376
+ return __short2bfloat16_rn(val);
377
+ }
378
+ template <>
379
+ DS_D_INLINE __nv_bfloat16 to(int8_t val)
380
+ {
381
+ return __int2bfloat16_rn(val);
382
+ }
383
+ template <>
384
+ DS_D_INLINE __nv_bfloat16 to(uint64_t val)
385
+ {
386
+ return __ull2bfloat16_rn(val);
387
+ }
388
+ template <>
389
+ DS_D_INLINE __nv_bfloat16 to(uint32_t val)
390
+ {
391
+ return __uint2bfloat16_rn(val);
392
+ }
393
+ template <>
394
+ DS_D_INLINE __nv_bfloat16 to(uint16_t val)
395
+ {
396
+ return __ushort2bfloat16_rn(val);
397
+ }
398
+ template <>
399
+ DS_D_INLINE __nv_bfloat16 to(uint8_t val)
400
+ {
401
+ return __uint2bfloat16_rn(val);
402
+ }
403
+ #endif
404
+
405
+ /********************* To BF162 Conversions *********************/
406
+ #ifdef BF16_AVAILABLE
407
+ template <>
408
+ DS_D_INLINE __nv_bfloat162 to(float2 val)
409
+ {
410
+ return __float22bfloat162_rn(val);
411
+ }
412
+ template <>
413
+ DS_D_INLINE __nv_bfloat162 to(float val)
414
+ {
415
+ return __float2bfloat162_rn(val);
416
+ }
417
+ template <>
418
+ DS_D_INLINE __nv_bfloat162 to(__half2 val)
419
+ {
420
+ return to<__nv_bfloat162>(to<float2>(val));
421
+ }
422
+ #endif
423
+
424
+ /********************* To INT64_T Conversions *********************/
425
+ template <>
426
+ DS_D_INLINE int64_t to(double val)
427
+ {
428
+ return __double2ll_rn(val);
429
+ }
430
+ template <>
431
+ DS_D_INLINE int64_t to(float val)
432
+ {
433
+ return __float2ll_rn(val);
434
+ }
435
+ template <>
436
+ DS_D_INLINE int64_t to(__half val)
437
+ {
438
+ return __half2ll_rn(val);
439
+ }
440
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
441
+ // to demand an PTX at this time
442
+
443
+ #ifdef BF16_AVAILABLE
444
+ template <>
445
+ DS_D_INLINE int64_t to(__nv_bfloat16 val)
446
+ {
447
+ return __bfloat162ll_rn(val);
448
+ }
449
+ #endif
450
+
451
+ /********************* To INT32_T Conversions *********************/
452
+ template <>
453
+ DS_D_INLINE int32_t to(double val)
454
+ {
455
+ return __double2int_rn(val);
456
+ }
457
+ template <>
458
+ DS_D_INLINE int32_t to(float val)
459
+ {
460
+ return __float2int_rn(val);
461
+ }
462
+ template <>
463
+ DS_D_INLINE int32_t to(__half val)
464
+ {
465
+ return __half2int_rn(val);
466
+ }
467
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
468
+ // to demand an PTX at this time
469
+
470
+ #ifdef BF16_AVAILABLE
471
+ template <>
472
+ DS_D_INLINE int32_t to(__nv_bfloat16 val)
473
+ {
474
+ return __bfloat162int_rn(val);
475
+ }
476
+ #endif
477
+
478
+ /********************* To INT16_T Conversions *********************/
479
+ template <>
480
+ DS_D_INLINE int16_t to(double val)
481
+ {
482
+ return __double2int_rn(val);
483
+ }
484
+ template <>
485
+ DS_D_INLINE int16_t to(float val)
486
+ {
487
+ return __float2int_rn(val);
488
+ }
489
+ template <>
490
+ DS_D_INLINE int16_t to(__half val)
491
+ {
492
+ return __half2int_rn(val);
493
+ }
494
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
495
+ // to demand an PTX at this time
496
+
497
+ #ifdef BF16_AVAILABLE
498
+ template <>
499
+ DS_D_INLINE int16_t to(__nv_bfloat16 val)
500
+ {
501
+ return __bfloat162int_rn(val);
502
+ }
503
+ #endif
504
+
505
+ /********************* To INT8_T Conversions *********************/
506
+ template <>
507
+ DS_D_INLINE int8_t to(double val)
508
+ {
509
+ return __double2int_rn(val);
510
+ }
511
+ template <>
512
+ DS_D_INLINE int8_t to(float val)
513
+ {
514
+ return __float2int_rn(val);
515
+ }
516
+ template <>
517
+ DS_D_INLINE int8_t to(__half val)
518
+ {
519
+ return __half2int_rn(val);
520
+ }
521
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
522
+ // to demand an PTX at this time
523
+
524
+ #ifdef BF16_AVAILABLE
525
+ template <>
526
+ DS_D_INLINE int8_t to(__nv_bfloat16 val)
527
+ {
528
+ return __bfloat162int_rn(val);
529
+ }
530
+ #endif
531
+
532
+ /********************* To UINT64_T Conversions *********************/
533
+ template <>
534
+ DS_D_INLINE uint64_t to(double val)
535
+ {
536
+ return __double2ull_rn(val);
537
+ }
538
+ template <>
539
+ DS_D_INLINE uint64_t to(float val)
540
+ {
541
+ return __float2ull_rn(val);
542
+ }
543
+ template <>
544
+ DS_D_INLINE uint64_t to(__half val)
545
+ {
546
+ return __half2ull_rn(val);
547
+ }
548
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
549
+ // to demand an PTX at this time
550
+
551
+ #ifdef BF16_AVAILABLE
552
+ template <>
553
+ DS_D_INLINE uint64_t to(__nv_bfloat16 val)
554
+ {
555
+ return __bfloat162ull_rn(val);
556
+ }
557
+ #endif
558
+
559
+ /********************* To UINT32_T Conversions *********************/
560
+ template <>
561
+ DS_D_INLINE uint32_t to(double val)
562
+ {
563
+ return __double2uint_rn(val);
564
+ }
565
+ template <>
566
+ DS_D_INLINE uint32_t to(float val)
567
+ {
568
+ return __float2uint_rn(val);
569
+ }
570
+ template <>
571
+ DS_D_INLINE uint32_t to(__half val)
572
+ {
573
+ return __half2uint_rn(val);
574
+ }
575
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
576
+ // to demand an PTX at this time
577
+
578
+ #ifdef BF16_AVAILABLE
579
+ template <>
580
+ DS_D_INLINE uint32_t to(__nv_bfloat16 val)
581
+ {
582
+ return __bfloat162uint_rn(val);
583
+ }
584
+ #endif
585
+
586
+ /********************* To UINT16_T Conversions *********************/
587
+ template <>
588
+ DS_D_INLINE uint16_t to(double val)
589
+ {
590
+ return __double2uint_rn(val);
591
+ }
592
+ template <>
593
+ DS_D_INLINE uint16_t to(float val)
594
+ {
595
+ return __float2uint_rn(val);
596
+ }
597
+ template <>
598
+ DS_D_INLINE uint16_t to(__half val)
599
+ {
600
+ return __half2uint_rn(val);
601
+ }
602
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
603
+ // to demand an PTX at this time
604
+
605
+ #ifdef BF16_AVAILABLE
606
+ template <>
607
+ DS_D_INLINE uint16_t to(__nv_bfloat16 val)
608
+ {
609
+ return __bfloat162uint_rn(val);
610
+ }
611
+ #endif
612
+
613
+ /********************* To UINT8_T Conversions *********************/
614
+ template <>
615
+ DS_D_INLINE uint8_t to(double val)
616
+ {
617
+ return __double2uint_rn(val);
618
+ }
619
+ template <>
620
+ DS_D_INLINE uint8_t to(float val)
621
+ {
622
+ return __float2uint_rn(val);
623
+ }
624
+ template <>
625
+ DS_D_INLINE uint8_t to(__half val)
626
+ {
627
+ return __half2uint_rn(val);
628
+ }
629
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
630
+ // to demand an PTX at this time
631
+
632
+ #ifdef BF16_AVAILABLE
633
+ template <>
634
+ DS_D_INLINE uint8_t to(__nv_bfloat16 val)
635
+ {
636
+ return __bfloat162uint_rn(val);
637
+ }
638
+ #endif
639
+
640
+ } // namespace conversion
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Centralized header file for preprocessor macros and constants
8
+ used throughout the codebase.
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include <cuda.h>
14
+ #include <cuda_fp16.h>
15
+
16
+ #ifdef BF16_AVAILABLE
17
+ #include <cuda_bf16.h>
18
+ #endif
19
+
20
+ #define DS_HD_INLINE __host__ __device__ __forceinline__
21
+ #define DS_D_INLINE __device__ __forceinline__
22
+
23
+ #ifdef __HIP_PLATFORM_AMD__
24
+
25
+ // constexpr variant of warpSize for templating
26
+ constexpr int hw_warp_size = ROCM_WAVEFRONT_SIZE;
27
+ #define HALF_PRECISION_AVAILABLE = 1
28
+ #include <hip/hip_cooperative_groups.h>
29
+ #include <hip/hip_fp16.h>
30
+
31
+ #else // !__HIP_PLATFORM_AMD__
32
+
33
+ // constexpr variant of warpSize for templating
34
+ constexpr int hw_warp_size = 32;
35
+
36
+ #if __CUDA_ARCH__ >= 530
37
+ #define HALF_PRECISION_AVAILABLE = 1
38
+ #define PTX_AVAILABLE
39
+ #endif // __CUDA_ARCH__ >= 530
40
+
41
+ #if __CUDA_ARCH__ >= 800
42
+ #define ASYNC_COPY_AVAILABLE
43
+ #endif // __CUDA_ARCH__ >= 800
44
+
45
+ #include <cooperative_groups.h>
46
+ #include <cuda_fp16.h>
47
+
48
+ #endif //__HIP_PLATFORM_AMD__
49
+
50
+ inline int next_pow2(const int val)
51
+ {
52
+ int rounded_val = val - 1;
53
+ rounded_val |= rounded_val >> 1;
54
+ rounded_val |= rounded_val >> 2;
55
+ rounded_val |= rounded_val >> 4;
56
+ rounded_val |= rounded_val >> 8;
57
+ return rounded_val + 1;
58
+ }
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include "ds_kernel_utils.h"
10
+
11
+ /////////////////////////////// Memory Access Utils ///////////////////////////////
12
+ namespace mem_access {
13
+
14
+ enum class LoadPolicy {
15
+ CacheAll, // Cache at all levels
16
+ CacheGlobal, // Cache at L2 only
17
+ CacheStreaming // Cache with evict first policy
18
+ };
19
+
20
+ enum class StorePolicy {
21
+ Writeback, // Cache in L1, write-back on eviction
22
+ CacheGlobal, // Bypass L1, write-back on eviction
23
+ CacheStreaming // Allocate cache line with evict first policy
24
+ };
25
+
26
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
27
+ __device__ __forceinline__ void load_global(void* dst, const void* src);
28
+
29
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
30
+ __device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access);
31
+
32
+ // Shared accesses have no cache policy
33
+ template <int AccessSize>
34
+ __device__ __forceinline__ void load_shared(void* dst, const void* src);
35
+
36
+ template <int AccessSize>
37
+ __device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access);
38
+
39
+ template <int AccessSize, StorePolicy policy = StorePolicy::Writeback>
40
+ __device__ __forceinline__ void store_global(void* dst, const void* src);
41
+
42
+ // Shared accesses have no cache policy
43
+ template <int AccessSize>
44
+ __device__ __forceinline__ void store_shared(void* dst, const void* src);
45
+
46
+ #ifdef ASYNC_COPY_AVAILABLE
47
+ template <int AccessSize>
48
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl);
49
+
50
+ template <int AccessSize>
51
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate);
52
+
53
+ template <int AccessSize>
54
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate);
55
+
56
+ __device__ __forceinline__ void memcpy_async_fence();
57
+
58
+ template <int stages>
59
+ __device__ __forceinline__ void memcpy_async_wait();
60
+
61
+ template <int stages>
62
+ __device__ __forceinline__ void tail_complete_wait(int remaining_stages);
63
+ #endif
64
+
65
+ // Util for tracking pipeline buffers
66
+ // TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE
67
+ template <int max>
68
+ class BufferTracker {
69
+ public:
70
+ int current_state;
71
+
72
+ __device__ __forceinline__ BufferTracker() : current_state(0) {}
73
+
74
+ __device__ __forceinline__ int get()
75
+ {
76
+ int return_val = current_state++;
77
+ current_state = (current_state == max ? 0 : current_state);
78
+ return return_val;
79
+ }
80
+ };
81
+
82
+ __device__ __forceinline__ uint32_t lane_id()
83
+ {
84
+ #ifdef PTX_AVAILABLE
85
+ unsigned int lane_id;
86
+ asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id));
87
+ return lane_id;
88
+ #else
89
+ return threadIdx.x & (warpSize - 1); // Portable
90
+ #endif
91
+ }
92
+
93
+ /////////// Load Global ///////////
94
+ template <>
95
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src)
96
+ {
97
+ uint4* data = reinterpret_cast<uint4*>(dst);
98
+ #ifdef PTX_AVAILABLE
99
+ asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n"
100
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
101
+ : "l"(src));
102
+ #else
103
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
104
+ data[0] = src_cast[0];
105
+ #endif
106
+ }
107
+
108
+ template <>
109
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access)
110
+ {
111
+ uint4* data = reinterpret_cast<uint4*>(dst);
112
+ #ifdef PTX_AVAILABLE
113
+ asm volatile(
114
+ "{\n"
115
+ "\t.reg .pred p;\n"
116
+ "\tsetp.ne.b32 p, %5, 0;\n"
117
+ "\tmov.b32 %0, 0;\n"
118
+ "\tmov.b32 %1, 0;\n"
119
+ "\tmov.b32 %2, 0;\n"
120
+ "\tmov.b32 %3, 0;\n"
121
+ "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
122
+ "}\n"
123
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
124
+ : "l"(src), "r"((int)do_access));
125
+ #else
126
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
127
+ if (do_access) {
128
+ data[0] = src_cast[0];
129
+ } else {
130
+ data[0].x = 0;
131
+ data[0].y = 0;
132
+ data[0].z = 0;
133
+ data[0].w = 0;
134
+ }
135
+ #endif
136
+ }
137
+
138
+ template <>
139
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src)
140
+ {
141
+ uint4* data = reinterpret_cast<uint4*>(dst);
142
+ #ifdef PTX_AVAILABLE
143
+ asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
144
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
145
+ : "l"(src));
146
+ #else
147
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
148
+ data[0] = src_cast[0];
149
+ #endif
150
+ }
151
+
152
+ template <>
153
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst,
154
+ const void* src,
155
+ bool do_access)
156
+ {
157
+ uint4* data = reinterpret_cast<uint4*>(dst);
158
+ #ifdef PTX_AVAILABLE
159
+ asm volatile(
160
+ "{\n"
161
+ "\t.reg .pred p;\n"
162
+ "\tsetp.ne.b32 p, %5, 0;\n"
163
+ "\tmov.b32 %0, 0;\n"
164
+ "\tmov.b32 %1, 0;\n"
165
+ "\tmov.b32 %2, 0;\n"
166
+ "\tmov.b32 %3, 0;\n"
167
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
168
+ "}\n"
169
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
170
+ : "l"(src), "r"((int)do_access));
171
+ #else
172
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
173
+ if (do_access) {
174
+ data[0] = src_cast[0];
175
+ } else {
176
+ data[0].x = 0;
177
+ data[0].y = 0;
178
+ data[0].z = 0;
179
+ data[0].w = 0;
180
+ }
181
+ #endif
182
+ }
183
+
184
+ template <>
185
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
186
+ const void* src)
187
+ {
188
+ uint4* data = reinterpret_cast<uint4*>(dst);
189
+ #ifdef PTX_AVAILABLE
190
+ asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n"
191
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
192
+ : "l"(src));
193
+ #else
194
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
195
+ data[0] = src_cast[0];
196
+ #endif
197
+ }
198
+
199
+ template <>
200
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
201
+ const void* src,
202
+ bool do_access)
203
+ {
204
+ uint4* data = reinterpret_cast<uint4*>(dst);
205
+ #ifdef PTX_AVAILABLE
206
+ asm volatile(
207
+ "{\n"
208
+ "\t.reg .pred p;\n"
209
+ "\tsetp.ne.b32 p, %5, 0;\n"
210
+ "\tmov.b32 %0, 0;\n"
211
+ "\tmov.b32 %1, 0;\n"
212
+ "\tmov.b32 %2, 0;\n"
213
+ "\tmov.b32 %3, 0;\n"
214
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
215
+ "}\n"
216
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
217
+ : "l"(src), "r"((int)do_access));
218
+ #else
219
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
220
+ if (do_access) {
221
+ data[0] = src_cast[0];
222
+ } else {
223
+ data[0].x = 0;
224
+ data[0].y = 0;
225
+ data[0].z = 0;
226
+ data[0].w = 0;
227
+ }
228
+ #endif
229
+ }
230
+
231
+ template <>
232
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src)
233
+ {
234
+ uint2* data = reinterpret_cast<uint2*>(dst);
235
+ #ifdef PTX_AVAILABLE
236
+ asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n"
237
+ : "=r"(data[0].x), "=r"(data[0].y)
238
+ : "l"(src));
239
+ #else
240
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
241
+ data[0] = src_cast[0];
242
+ #endif
243
+ }
244
+
245
+ template <>
246
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access)
247
+ {
248
+ uint2* data = reinterpret_cast<uint2*>(dst);
249
+ #ifdef PTX_AVAILABLE
250
+ asm volatile(
251
+ "{\n"
252
+ "\t.reg .pred p;\n"
253
+ "\tsetp.ne.b32 p, %3, 0;\n"
254
+ "\tmov.b32 %0, 0;\n"
255
+ "\tmov.b32 %1, 0;\n"
256
+ "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n"
257
+ "}\n"
258
+ : "=r"(data[0].x), "=r"(data[0].y)
259
+ : "l"(src), "r"((int)do_access));
260
+ #else
261
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
262
+ if (do_access) {
263
+ data[0] = src_cast[0];
264
+ } else {
265
+ data[0].x = 0;
266
+ data[0].y = 0;
267
+ }
268
+ #endif
269
+ }
270
+
271
+ template <>
272
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src)
273
+ {
274
+ uint2* data = reinterpret_cast<uint2*>(dst);
275
+ #ifdef PTX_AVAILABLE
276
+ asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
277
+ : "=r"(data[0].x), "=r"(data[0].y)
278
+ : "l"(src));
279
+ #else
280
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
281
+ data[0] = src_cast[0];
282
+ #endif
283
+ }
284
+
285
+ template <>
286
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst,
287
+ const void* src,
288
+ bool do_access)
289
+ {
290
+ uint2* data = reinterpret_cast<uint2*>(dst);
291
+ #ifdef PTX_AVAILABLE
292
+ asm volatile(
293
+ "{\n"
294
+ "\t.reg .pred p;\n"
295
+ "\tsetp.ne.b32 p, %3, 0;\n"
296
+ "\tmov.b32 %0, 0;\n"
297
+ "\tmov.b32 %1, 0;\n"
298
+ "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
299
+ "}\n"
300
+ : "=r"(data[0].x), "=r"(data[0].y)
301
+ : "l"(src), "r"((int)do_access));
302
+ #else
303
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
304
+ if (do_access) {
305
+ data[0] = src_cast[0];
306
+ } else {
307
+ data[0].x = 0;
308
+ data[0].y = 0;
309
+ }
310
+ #endif
311
+ }
312
+
313
+ template <>
314
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
315
+ const void* src)
316
+ {
317
+ uint2* data = reinterpret_cast<uint2*>(dst);
318
+ #ifdef PTX_AVAILABLE
319
+ asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
320
+ : "=r"(data[0].x), "=r"(data[0].y)
321
+ : "l"(src));
322
+ #else
323
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
324
+ data[0] = src_cast[0];
325
+ #endif
326
+ }
327
+
328
+ template <>
329
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
330
+ const void* src,
331
+ bool do_access)
332
+ {
333
+ uint2* data = reinterpret_cast<uint2*>(dst);
334
+ #ifdef PTX_AVAILABLE
335
+ asm volatile(
336
+ "{\n"
337
+ "\t.reg .pred p;\n"
338
+ "\tsetp.ne.b32 p, %3, 0;\n"
339
+ "\tmov.b32 %0, 0;\n"
340
+ "\tmov.b32 %1, 0;\n"
341
+ "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
342
+ "}\n"
343
+ : "=r"(data[0].x), "=r"(data[0].y)
344
+ : "l"(src), "r"((int)do_access));
345
+ #else
346
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
347
+ if (do_access) {
348
+ data[0] = src_cast[0];
349
+ } else {
350
+ data[0].x = 0;
351
+ data[0].y = 0;
352
+ }
353
+ #endif
354
+ }
355
+
356
+ template <>
357
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src)
358
+ {
359
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
360
+ #ifdef PTX_AVAILABLE
361
+ asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
362
+ #else
363
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
364
+ data[0] = src_cast[0];
365
+ #endif
366
+ }
367
+
368
+ template <>
369
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access)
370
+ {
371
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
372
+ #ifdef PTX_AVAILABLE
373
+ asm volatile(
374
+ "{\n"
375
+ "\t.reg .pred p;\n"
376
+ "\tsetp.ne.b32 p, %2, 0;\n"
377
+ "\tmov.b32 %0, 0;\n"
378
+ "\t@p ld.global.u32 {%0}, [%1];\n"
379
+ "}\n"
380
+ : "=r"(data[0])
381
+ : "l"(src), "r"((int)do_access));
382
+ #else
383
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
384
+ if (do_access) {
385
+ data[0] = src_cast[0];
386
+ } else {
387
+ data[0] = 0;
388
+ }
389
+ #endif
390
+ }
391
+
392
+ template <>
393
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src)
394
+ {
395
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
396
+ #ifdef PTX_AVAILABLE
397
+ asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
398
+ #else
399
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
400
+ data[0] = src_cast[0];
401
+ #endif
402
+ }
403
+
404
+ template <>
405
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst,
406
+ const void* src,
407
+ bool do_access)
408
+ {
409
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
410
+ #ifdef PTX_AVAILABLE
411
+ asm volatile(
412
+ "{\n"
413
+ "\t.reg .pred p;\n"
414
+ "\tsetp.ne.b32 p, %2, 0;\n"
415
+ "\tmov.b32 %0, 0;\n"
416
+ "\t@p ld.global.cg.u32 {%0}, [%1];\n"
417
+ "}\n"
418
+ : "=r"(data[0])
419
+ : "l"(src), "r"((int)do_access));
420
+ #else
421
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
422
+ if (do_access) {
423
+ data[0] = src_cast[0];
424
+ } else {
425
+ data[0] = 0;
426
+ }
427
+ #endif
428
+ }
429
+
430
+ template <>
431
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
432
+ const void* src)
433
+ {
434
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
435
+ #ifdef PTX_AVAILABLE
436
+ asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
437
+ #else
438
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
439
+ data[0] = src_cast[0];
440
+ #endif
441
+ }
442
+
443
+ template <>
444
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
445
+ const void* src,
446
+ bool do_access)
447
+ {
448
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
449
+ #ifdef PTX_AVAILABLE
450
+ asm volatile(
451
+ "{\n"
452
+ "\t.reg .pred p;\n"
453
+ "\tsetp.ne.b32 p, %2, 0;\n"
454
+ "\tmov.b32 %0, 0;\n"
455
+ "\t@p ld.global.cs.u32 {%0}, [%1];\n"
456
+ "}\n"
457
+ : "=r"(data[0])
458
+ : "l"(src), "r"((int)do_access));
459
+ #else
460
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
461
+ if (do_access) {
462
+ data[0] = src_cast[0];
463
+ } else {
464
+ data[0] = 0;
465
+ }
466
+ #endif
467
+ }
468
+
469
+ template <>
470
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src)
471
+ {
472
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
473
+ #ifdef PTX_AVAILABLE
474
+ asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
475
+ #else
476
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
477
+ data[0] = src_cast[0];
478
+ #endif
479
+ }
480
+
481
+ template <>
482
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access)
483
+ {
484
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
485
+ #ifdef PTX_AVAILABLE
486
+ asm volatile(
487
+ "{\n"
488
+ "\t.reg .pred p;\n"
489
+ "\tsetp.ne.b32 p, %2, 0;\n"
490
+ "\tmov.u16 %0, 0;\n"
491
+ "\t@p ld.global.u16 {%0}, [%1];\n"
492
+ "}\n"
493
+ : "=h"(*data)
494
+ : "l"(src), "r"((int)do_access));
495
+ #else
496
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
497
+ if (do_access) {
498
+ data[0] = src_cast[0];
499
+ } else {
500
+ data[0] = 0;
501
+ }
502
+ #endif
503
+ }
504
+
505
+ template <>
506
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src)
507
+ {
508
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
509
+ #ifdef PTX_AVAILABLE
510
+ asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
511
+ #else
512
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
513
+ data[0] = src_cast[0];
514
+ #endif
515
+ }
516
+
517
+ template <>
518
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst,
519
+ const void* src,
520
+ bool do_access)
521
+ {
522
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
523
+ #ifdef PTX_AVAILABLE
524
+ asm volatile(
525
+ "{\n"
526
+ "\t.reg .pred p;\n"
527
+ "\tsetp.ne.b32 p, %2, 0;\n"
528
+ "\tmov.u16 %0, 0;\n"
529
+ "\t@p ld.global.cg.u16 {%0}, [%1];\n"
530
+ "}\n"
531
+ : "=h"(*data)
532
+ : "l"(src), "r"((int)do_access));
533
+ #else
534
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
535
+ if (do_access) {
536
+ data[0] = src_cast[0];
537
+ } else {
538
+ data[0] = 0;
539
+ }
540
+ #endif
541
+ }
542
+
543
+ template <>
544
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
545
+ const void* src)
546
+ {
547
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
548
+ #ifdef PTX_AVAILABLE
549
+ asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
550
+ #else
551
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
552
+ data[0] = src_cast[0];
553
+ #endif
554
+ }
555
+
556
+ template <>
557
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
558
+ const void* src,
559
+ bool do_access)
560
+ {
561
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
562
+ #ifdef PTX_AVAILABLE
563
+ asm volatile(
564
+ "{\n"
565
+ "\t.reg .pred p;\n"
566
+ "\tsetp.ne.b32 p, %2, 0;\n"
567
+ "\tmov.u16 %0, 0;\n"
568
+ "\t@p ld.global.cs.u16 {%0}, [%1];\n"
569
+ "}\n"
570
+ : "=h"(*data)
571
+ : "l"(src), "r"((int)do_access));
572
+ #else
573
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
574
+ if (do_access) {
575
+ data[0] = src_cast[0];
576
+ } else {
577
+ data[0] = 0;
578
+ }
579
+ #endif
580
+ }
581
+
582
+ /////////// Load Shared ///////////
583
+ namespace internal {
584
+
585
+ #ifdef PTX_AVAILABLE
586
+ __device__ __forceinline__ unsigned convert_to_shared(const void* ptr)
587
+ {
588
+ #if __CUDACC_VER_MAJOR__ >= 11
589
+ // In CUDA 11 we have a builtin intrinsic
590
+ return __cvta_generic_to_shared(ptr);
591
+ #else
592
+ unsigned ret_val;
593
+ asm volatile(
594
+ "{\n"
595
+ "\t.reg .u64 p1;\n"
596
+ "\tcvta.to.shared.u64 p1, %1\n"
597
+ "\tcvt.u32.u64 %0, p1;\n"
598
+ "}\n"
599
+ : "=r"(ret_val)
600
+ : "l"(ptr));
601
+ return ret_val;
602
+ #endif
603
+ }
604
+ #endif
605
+
606
+ } // namespace internal
607
+
608
+ template <>
609
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src)
610
+ {
611
+ uint4* data = reinterpret_cast<uint4*>(dst);
612
+ #ifdef PTX_AVAILABLE
613
+ unsigned src_shr = internal::convert_to_shared(src);
614
+
615
+ asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
616
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
617
+ : "r"(src_shr));
618
+ #else
619
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
620
+ data[0] = src_cast[0];
621
+ #endif
622
+ }
623
+
624
+ template <>
625
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access)
626
+ {
627
+ uint4* data = reinterpret_cast<uint4*>(dst);
628
+ #ifdef PTX_AVAILABLE
629
+ unsigned src_shr = internal::convert_to_shared(src);
630
+
631
+ asm volatile(
632
+ "{\n"
633
+ "\t.reg .pred p;\n"
634
+ "\tsetp.ne.b32 p, %5, 0;\n"
635
+ "\tmov.b32 %0, 0;\n"
636
+ "\tmov.b32 %1, 0;\n"
637
+ "\tmov.b32 %2, 0;\n"
638
+ "\tmov.b32 %3, 0;\n"
639
+ "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
640
+ "}\n"
641
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
642
+ : "r"(src_shr), "r"((int)do_access));
643
+ #else
644
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
645
+ if (do_access) {
646
+ data[0] = src_cast[0];
647
+ } else {
648
+ data[0].x = 0;
649
+ data[0].y = 0;
650
+ data[0].z = 0;
651
+ data[0].w = 0;
652
+ }
653
+ #endif
654
+ }
655
+
656
+ template <>
657
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src)
658
+ {
659
+ uint2* data = reinterpret_cast<uint2*>(dst);
660
+ #ifdef PTX_AVAILABLE
661
+ unsigned src_shr = internal::convert_to_shared(src);
662
+
663
+ asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
664
+ : "=r"(data[0].x), "=r"(data[0].y)
665
+ : "r"(src_shr));
666
+ #else
667
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
668
+ data[0] = src_cast[0];
669
+ #endif
670
+ }
671
+
672
+ template <>
673
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access)
674
+ {
675
+ uint2* data = reinterpret_cast<uint2*>(dst);
676
+ #ifdef PTX_AVAILABLE
677
+ unsigned src_shr = internal::convert_to_shared(src);
678
+
679
+ asm volatile(
680
+ "{\n"
681
+ "\t.reg .pred p;\n"
682
+ "\tsetp.ne.b32 p, %3, 0;\n"
683
+ "\tmov.b32 %0, 0;\n"
684
+ "\tmov.b32 %1, 0;\n"
685
+ "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n"
686
+ "}\n"
687
+ : "=r"(data[0].x), "=r"(data[0].y)
688
+ : "r"(src_shr), "r"((int)do_access));
689
+ #else
690
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
691
+ if (do_access) {
692
+ data[0] = src_cast[0];
693
+ } else {
694
+ data[0].x = 0;
695
+ data[0].y = 0;
696
+ }
697
+ #endif
698
+ }
699
+
700
+ template <>
701
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src)
702
+ {
703
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
704
+ #ifdef PTX_AVAILABLE
705
+ unsigned src_shr = internal::convert_to_shared(src);
706
+
707
+ asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr));
708
+ #else
709
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
710
+ data[0] = src_cast[0];
711
+ #endif
712
+ }
713
+
714
+ template <>
715
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access)
716
+ {
717
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
718
+ #ifdef PTX_AVAILABLE
719
+ unsigned src_shr = internal::convert_to_shared(src);
720
+
721
+ asm volatile(
722
+ "{\n"
723
+ "\t.reg .pred p;\n"
724
+ "\tsetp.ne.b32 p, %2, 0;\n"
725
+ "\tmov.b32 %0, 0;\n"
726
+ "\t@p ld.shared.u32 %0, [%1];\n"
727
+ "}\n"
728
+ : "=r"(data[0])
729
+ : "r"(src_shr), "r"((int)do_access));
730
+ #else
731
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
732
+ if (do_access) {
733
+ data[0] = src_cast[0];
734
+ } else {
735
+ data[0] = 0;
736
+ }
737
+ #endif
738
+ }
739
+
740
+ /////////// Store Global ///////////
741
+
742
+ template <>
743
+ __device__ __forceinline__ void store_global<16>(void* dst, const void* src)
744
+ {
745
+ const uint4* data = reinterpret_cast<const uint4*>(src);
746
+ #ifdef PTX_AVAILABLE
747
+ asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n"
748
+ :
749
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
750
+ : "memory");
751
+ #else
752
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
753
+ dst_cast[0] = data[0];
754
+ #endif
755
+ }
756
+
757
+ template <>
758
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst,
759
+ const void* src)
760
+ {
761
+ const uint4* data = reinterpret_cast<const uint4*>(src);
762
+ #ifdef PTX_AVAILABLE
763
+ asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n"
764
+ :
765
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
766
+ : "memory");
767
+ #else
768
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
769
+ dst_cast[0] = data[0];
770
+ #endif
771
+ }
772
+
773
+ template <>
774
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst,
775
+ const void* src)
776
+ {
777
+ const uint4* data = reinterpret_cast<const uint4*>(src);
778
+ #ifdef PTX_AVAILABLE
779
+ asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n"
780
+ :
781
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
782
+ : "memory");
783
+ #else
784
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
785
+ dst_cast[0] = data[0];
786
+ #endif
787
+ }
788
+
789
+ template <>
790
+ __device__ __forceinline__ void store_global<8>(void* dst, const void* src)
791
+ {
792
+ const uint2* data = reinterpret_cast<const uint2*>(src);
793
+ #ifdef PTX_AVAILABLE
794
+ asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n"
795
+ :
796
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
797
+ #else
798
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
799
+ dst_cast[0] = data[0];
800
+ #endif
801
+ }
802
+
803
+ template <>
804
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst,
805
+ const void* src)
806
+ {
807
+ const uint2* data = reinterpret_cast<const uint2*>(src);
808
+ #ifdef PTX_AVAILABLE
809
+ asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n"
810
+ :
811
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
812
+ #else
813
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
814
+ dst_cast[0] = data[0];
815
+ #endif
816
+ }
817
+
818
+ template <>
819
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst,
820
+ const void* src)
821
+ {
822
+ const uint2* data = reinterpret_cast<const uint2*>(src);
823
+ #ifdef PTX_AVAILABLE
824
+ asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n"
825
+ :
826
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
827
+ #else
828
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
829
+ dst_cast[0] = data[0];
830
+ #endif
831
+ }
832
+
833
+ template <>
834
+ __device__ __forceinline__ void store_global<4>(void* dst, const void* src)
835
+ {
836
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
837
+ #ifdef PTX_AVAILABLE
838
+ asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
839
+ #else
840
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
841
+ dst_cast[0] = data[0];
842
+ #endif
843
+ }
844
+
845
+ template <>
846
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst,
847
+ const void* src)
848
+ {
849
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
850
+ #ifdef PTX_AVAILABLE
851
+ asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
852
+ #else
853
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
854
+ dst_cast[0] = data[0];
855
+ #endif
856
+ }
857
+
858
+ template <>
859
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst,
860
+ const void* src)
861
+ {
862
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
863
+ #ifdef PTX_AVAILABLE
864
+ asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
865
+ #else
866
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
867
+ dst_cast[0] = data[0];
868
+ #endif
869
+ }
870
+
871
+ /////////// Store Shared ///////////
872
+
873
+ template <>
874
+ __device__ __forceinline__ void store_shared<16>(void* dst, const void* src)
875
+ {
876
+ const uint4* data = reinterpret_cast<const uint4*>(src);
877
+ #ifdef PTX_AVAILABLE
878
+ unsigned dst_int = internal::convert_to_shared(dst);
879
+
880
+ asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
881
+ :
882
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w));
883
+ #else
884
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
885
+ dst_cast[0] = data[0];
886
+ #endif
887
+ }
888
+
889
+ template <>
890
+ __device__ __forceinline__ void store_shared<8>(void* dst, const void* src)
891
+ {
892
+ const uint2* data = reinterpret_cast<const uint2*>(src);
893
+ #ifdef PTX_AVAILABLE
894
+ unsigned dst_int = internal::convert_to_shared(dst);
895
+
896
+ asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
897
+ :
898
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y));
899
+ #else
900
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
901
+ dst_cast[0] = data[0];
902
+ #endif
903
+ }
904
+
905
+ template <>
906
+ __device__ __forceinline__ void store_shared<4>(void* dst, const void* src)
907
+ {
908
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
909
+ #ifdef PTX_AVAILABLE
910
+ unsigned dst_int = internal::convert_to_shared(dst);
911
+
912
+ asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data));
913
+ #else
914
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
915
+ dst_cast[0] = data[0];
916
+ #endif
917
+ }
918
+
919
+ /////////// Asynchronous Memory Copy ///////////
920
+
921
+ #ifdef ASYNC_COPY_AVAILABLE
922
+ template <int AccessSize>
923
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl)
924
+ {
925
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
926
+ unsigned shr_int = internal::convert_to_shared(shr);
927
+
928
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n"
929
+ :
930
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize));
931
+ }
932
+
933
+ template <int AccessSize>
934
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate)
935
+ {
936
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
937
+ unsigned shr_int = internal::convert_to_shared(shr);
938
+
939
+ asm volatile(
940
+ "{\n"
941
+ " .reg .pred p;\n"
942
+ " setp.ne.b32 p, %0, 0;\n"
943
+ " @p cp.async.ca.shared.global [%1], [%2], %3;\n"
944
+ "}\n"
945
+ :
946
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize));
947
+ }
948
+
949
+ template <int AccessSize>
950
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate)
951
+ {
952
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
953
+ unsigned shr_int = internal::convert_to_shared(shr);
954
+ int bytes_to_copy = (predicate ? AccessSize : 0);
955
+
956
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n"
957
+ :
958
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
959
+ }
960
+
961
+ template <int AccessSize>
962
+ __device__ __forceinline__ void memcpy_async_zero_nop(void* shr,
963
+ const void* gbl,
964
+ bool zero_predicate,
965
+ bool nop_predicate)
966
+ {
967
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
968
+ unsigned shr_int = internal::convert_to_shared(shr);
969
+ int bytes_to_copy = (zero_predicate ? AccessSize : 0);
970
+
971
+ asm volatile(
972
+ "{\n"
973
+ " .reg .pred p;\n"
974
+ " setp.ne.b32 p, %0, 0;\n"
975
+ " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n"
976
+ "}\n"
977
+ :
978
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
979
+ }
980
+
981
+ // Cache global variants. Separate interface to require deliberate use of them.
982
+ __device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl)
983
+ {
984
+ unsigned shr_int = internal::convert_to_shared(shr);
985
+
986
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl));
987
+ }
988
+
989
+ __device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate)
990
+ {
991
+ unsigned shr_int = internal::convert_to_shared(shr);
992
+
993
+ asm volatile(
994
+ "{\n"
995
+ " .reg .pred p;\n"
996
+ " setp.ne.b32 p, %0, 0;\n"
997
+ " @p cp.async.cg.shared.global [%1], [%2], 16;\n"
998
+ "}\n"
999
+ :
1000
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl));
1001
+ }
1002
+
1003
+ __device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate)
1004
+ {
1005
+ unsigned shr_int = internal::convert_to_shared(shr);
1006
+ int bytes_to_copy = (predicate ? 16 : 0);
1007
+
1008
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n"
1009
+ :
1010
+ : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1011
+ }
1012
+
1013
+ __device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr,
1014
+ const void* gbl,
1015
+ bool zero_predicate,
1016
+ bool nop_predicate)
1017
+ {
1018
+ unsigned shr_int = internal::convert_to_shared(shr);
1019
+ int bytes_to_copy = (zero_predicate ? 16 : 0);
1020
+
1021
+ asm volatile(
1022
+ "{\n"
1023
+ " .reg .pred p;\n"
1024
+ " setp.ne.b32 p, %0, 0;\n"
1025
+ " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n"
1026
+ "}\n"
1027
+ :
1028
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1029
+ }
1030
+
1031
+ __device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); }
1032
+
1033
+ template <int stages>
1034
+ __device__ __forceinline__ void memcpy_async_wait()
1035
+ {
1036
+ static_assert(stages <= 8);
1037
+
1038
+ asm volatile("cp.async.wait_group %0;\n" : : "n"(stages));
1039
+ }
1040
+
1041
+ // TODO: The tail complete should be a known compile time artifact, should try and induce this
1042
+ // without all of the branches from the call-site. This is a hacky solution.
1043
+ template <>
1044
+ __device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages)
1045
+ {
1046
+ if (remaining_stages == 0) memcpy_async_wait<0>();
1047
+ }
1048
+
1049
+ template <>
1050
+ __device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages)
1051
+ {
1052
+ if (remaining_stages == 1)
1053
+ memcpy_async_wait<1>();
1054
+ else if (remaining_stages == 0)
1055
+ memcpy_async_wait<0>();
1056
+ }
1057
+
1058
+ template <>
1059
+ __device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages)
1060
+ {
1061
+ if (remaining_stages == 2)
1062
+ memcpy_async_wait<2>();
1063
+ else if (remaining_stages == 1)
1064
+ memcpy_async_wait<1>();
1065
+ else if (remaining_stages == 0)
1066
+ memcpy_async_wait<0>();
1067
+ }
1068
+
1069
+ template <>
1070
+ __device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages)
1071
+ {
1072
+ if (remaining_stages == 3)
1073
+ memcpy_async_wait<3>();
1074
+ else if (remaining_stages == 2)
1075
+ memcpy_async_wait<2>();
1076
+ else if (remaining_stages == 1)
1077
+ memcpy_async_wait<1>();
1078
+ else if (remaining_stages == 0)
1079
+ memcpy_async_wait<0>();
1080
+ }
1081
+
1082
+ template <>
1083
+ __device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages)
1084
+ {
1085
+ if (remaining_stages == 4)
1086
+ memcpy_async_wait<4>();
1087
+ else if (remaining_stages == 3)
1088
+ memcpy_async_wait<3>();
1089
+ else if (remaining_stages == 2)
1090
+ memcpy_async_wait<2>();
1091
+ else if (remaining_stages == 1)
1092
+ memcpy_async_wait<1>();
1093
+ else if (remaining_stages == 0)
1094
+ memcpy_async_wait<0>();
1095
+ }
1096
+
1097
+ template <>
1098
+ __device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages)
1099
+ {
1100
+ if (remaining_stages == 5)
1101
+ memcpy_async_wait<5>();
1102
+ else if (remaining_stages == 4)
1103
+ memcpy_async_wait<4>();
1104
+ else if (remaining_stages == 3)
1105
+ memcpy_async_wait<3>();
1106
+ else if (remaining_stages == 2)
1107
+ memcpy_async_wait<2>();
1108
+ else if (remaining_stages == 1)
1109
+ memcpy_async_wait<1>();
1110
+ else if (remaining_stages == 0)
1111
+ memcpy_async_wait<0>();
1112
+ }
1113
+ #endif
1114
+
1115
+ } // namespace mem_access
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "conversion_utils.h"
9
+ #include "ds_kernel_utils.h"
10
+ #include "memory_access_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+
14
+ namespace reduce {
15
+
16
+ enum class ROpType {
17
+ // Addition
18
+ Add,
19
+
20
+ // Maximum reduction
21
+ Max,
22
+
23
+ // Minimum reduction
24
+ Min,
25
+ };
26
+
27
+ constexpr int max_threads = 1024;
28
+ constexpr int max_warps = max_threads / hw_warp_size;
29
+
30
+ /*
31
+ High level API. The API takes in a set of operations and variables
32
+ and performs that reduction operation on that variable. The reductions
33
+ of each of the arguments are completely independent of each other (
34
+ i.e., the val1-op1 combination has no impact on val2-op2).
35
+
36
+ Example usage:
37
+ ``` cpp
38
+ float max_val;
39
+ float min_val;
40
+ reduce::block<rop::Max, rop::Min>(tb, warp, max_val, min_val);
41
+ ```
42
+
43
+ TODO(cmikeh2): In theory, we might be able to do this sequentially with
44
+ device functions and rely on the assembler correctly behaving. My initial
45
+ instinct is this won't work, but if it does it would reduce implementation
46
+ cost significantly.
47
+
48
+ TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic
49
+ currently supports this (more incidentally than anything else). It is not
50
+ uncommon in something like softmax or a fused attention kernel to map multiple
51
+ reductions to a thread block, but each reduction itself is only scoped
52
+ to part of the threads (i.e block size = 512, 128 threads per reduction).
53
+ */
54
+ template <ROpType Op, int warp_bound = max_warps>
55
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val);
56
+
57
+ template <ROpType Op1, ROpType Op2, int warp_bound = max_warps>
58
+ DS_D_INLINE void block(cg::thread_block& tb,
59
+ cg::thread_block_tile<hw_warp_size>& warp,
60
+ float& val1,
61
+ float& val2);
62
+
63
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound = max_warps>
64
+ DS_D_INLINE void block(cg::thread_block& tb,
65
+ cg::thread_block_tile<hw_warp_size>& warp,
66
+ float& val1,
67
+ float& val2,
68
+ float& val3);
69
+
70
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound = max_warps>
71
+ DS_D_INLINE void block(cg::thread_block& tb,
72
+ cg::thread_block_tile<hw_warp_size>& warp,
73
+ float& val1,
74
+ float& val2,
75
+ float& val3,
76
+ float& val4);
77
+
78
+ /*
79
+ The partitioned block is a special case of the above where in the warps of a threadblock are
80
+ partitioned into separate independent reductions. For example, I might have an 8 warp thread block
81
+ in which each pair of warps is processing an independent piece of data. I would then reduce that
82
+ data with the something like the following:
83
+ ``` cpp
84
+ float max_val;
85
+ reduce::partitioned_block<rop::Max, 2>(tb, warp, max_val);
86
+ ```
87
+ After which, each pair of warps would have coherent data with each other. Note, this API will not
88
+ provide correct results if the number of warps per partition is not a power of 2.
89
+ */
90
+ template <ROpType Op, int num_threads>
91
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
92
+ cg::thread_block_tile<hw_warp_size>& warp,
93
+ float& val);
94
+
95
+ template <ROpType Op1, ROpType Op2, int num_threads>
96
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
97
+ cg::thread_block_tile<hw_warp_size>& warp,
98
+ float& val1,
99
+ float& val2);
100
+
101
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
102
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
103
+ cg::thread_block_tile<hw_warp_size>& warp,
104
+ float& val1,
105
+ float& val2,
106
+ float& val3);
107
+
108
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
109
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
110
+ cg::thread_block_tile<hw_warp_size>& warp,
111
+ float& val1,
112
+ float& val2,
113
+ float& val3,
114
+ float& val4);
115
+
116
+ /*
117
+ Single element reduction primitives. Used inside serial collection
118
+ loops.
119
+
120
+ Example usage:
121
+ using rop = reduce::OpType;
122
+ float min = init<rop::Min>();
123
+ for (int i = 0; i < 4; i++) {
124
+ min = reduce::element<rop::Min>(min, data[i]);
125
+ }
126
+ */
127
+
128
+ template <ROpType Op, typename T>
129
+ DS_D_INLINE T element(const T lhs, const T rhs);
130
+
131
+ template <ROpType OType, typename T = float>
132
+ DS_D_INLINE T init();
133
+
134
+ /********************** Internal reduction APIs **********************/
135
+
136
+ /*
137
+ Single element "reductions". TODO(cmikeh2): this sort of "op" concept
138
+ should be refactored into its own implementation at some point. This interface
139
+ may be easily expanded for new types/operations, but the typical reductions
140
+ we need are covered with min/max/add on float.
141
+
142
+ NOTE: there is no mean reduction because that relies on knowledge of how
143
+ many values were already reduced into each scalar. Implementing this on top
144
+ of reduce should be straightforward (can just wrap the sum reduction) and
145
+ would be a good extension of the header.
146
+ */
147
+
148
+ DS_D_INLINE int _warp_rank()
149
+ {
150
+ const int thread_rank =
151
+ threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
152
+ return thread_rank / hw_warp_size;
153
+ }
154
+
155
+ /* Float element reduce implementations */
156
+ template <>
157
+ DS_D_INLINE float element<ROpType::Add>(const float lhs, const float rhs)
158
+ {
159
+ return lhs + rhs;
160
+ }
161
+
162
+ template <>
163
+ DS_D_INLINE float element<ROpType::Max>(const float lhs, const float rhs)
164
+ {
165
+ return fmaxf(lhs, rhs);
166
+ }
167
+
168
+ template <>
169
+ DS_D_INLINE float element<ROpType::Min>(const float lhs, const float rhs)
170
+ {
171
+ return fminf(lhs, rhs);
172
+ }
173
+
174
+ /* __half element reduce implementation */
175
+ template <>
176
+ DS_D_INLINE __half element<ROpType::Add>(const __half lhs, const __half rhs)
177
+ {
178
+ return lhs + rhs;
179
+ }
180
+
181
+ template <>
182
+ DS_D_INLINE __half element<ROpType::Max>(const __half lhs, const __half rhs)
183
+ {
184
+ #if __CUDA_ARCH__ >= 800
185
+ // Intrinsic limited to Ampere + newer
186
+ return __hmax(lhs, rhs);
187
+ #else
188
+ return (lhs > rhs) ? lhs : rhs;
189
+ #endif
190
+ }
191
+
192
+ template <>
193
+ DS_D_INLINE __half element<ROpType::Min>(const __half lhs, const __half rhs)
194
+ {
195
+ #if __CUDA_ARCH__ >= 800
196
+ // Intrinsic limited to Ampere + newer
197
+ return __hmin(lhs, rhs);
198
+ #else
199
+ return (lhs < rhs) ? lhs : rhs;
200
+ #endif
201
+ }
202
+
203
+ /* __half2 element reduce implementation */
204
+ template <>
205
+ DS_D_INLINE __half2 element<ROpType::Add>(const __half2 lhs, const __half2 rhs)
206
+ {
207
+ return lhs + rhs;
208
+ }
209
+
210
+ template <>
211
+ DS_D_INLINE __half2 element<ROpType::Max>(const __half2 lhs, const __half2 rhs)
212
+ {
213
+ #if __CUDA_ARCH__ >= 800
214
+ return __hmax2(lhs, rhs);
215
+ #else
216
+ __half2 ret_val;
217
+ ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x;
218
+ ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y;
219
+ return ret_val;
220
+ #endif
221
+ }
222
+
223
+ template <>
224
+ DS_D_INLINE __half2 element<ROpType::Min>(const __half2 lhs, const __half2 rhs)
225
+ {
226
+ #if __CUDA_ARCH__ >= 800
227
+ return __hmin2(lhs, rhs);
228
+ #else
229
+ __half2 ret_val;
230
+ ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x;
231
+ ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y;
232
+ return ret_val;
233
+ #endif
234
+ }
235
+
236
+ template <>
237
+ DS_D_INLINE int32_t element<ROpType::Add>(const int32_t lhs, const int32_t rhs)
238
+ {
239
+ return lhs + rhs;
240
+ }
241
+
242
+ template <>
243
+ DS_D_INLINE int32_t element<ROpType::Max>(const int32_t lhs, const int32_t rhs)
244
+ {
245
+ return (lhs > rhs) ? lhs : rhs;
246
+ }
247
+
248
+ template <>
249
+ DS_D_INLINE int32_t element<ROpType::Min>(const int32_t lhs, const int32_t rhs)
250
+ {
251
+ return (lhs < rhs) ? lhs : rhs;
252
+ }
253
+
254
+ template <>
255
+ DS_D_INLINE uint32_t element<ROpType::Add>(const uint32_t lhs, const uint32_t rhs)
256
+ {
257
+ return lhs + rhs;
258
+ }
259
+
260
+ template <>
261
+ DS_D_INLINE uint32_t element<ROpType::Max>(const uint32_t lhs, const uint32_t rhs)
262
+ {
263
+ return (lhs > rhs) ? lhs : rhs;
264
+ }
265
+
266
+ template <>
267
+ DS_D_INLINE uint32_t element<ROpType::Min>(const uint32_t lhs, const uint32_t rhs)
268
+ {
269
+ return (lhs < rhs) ? lhs : rhs;
270
+ }
271
+
272
+ template <>
273
+ DS_D_INLINE int64_t element<ROpType::Add>(const int64_t lhs, const int64_t rhs)
274
+ {
275
+ return lhs + rhs;
276
+ }
277
+
278
+ template <>
279
+ DS_D_INLINE int64_t element<ROpType::Max>(const int64_t lhs, const int64_t rhs)
280
+ {
281
+ return (lhs > rhs) ? lhs : rhs;
282
+ }
283
+
284
+ template <>
285
+ DS_D_INLINE int64_t element<ROpType::Min>(const int64_t lhs, const int64_t rhs)
286
+ {
287
+ return (lhs < rhs) ? lhs : rhs;
288
+ }
289
+
290
+ /*
291
+ Reduction initialization primitives
292
+ */
293
+ template <>
294
+ DS_D_INLINE float init<ROpType::Add>()
295
+ {
296
+ return 0.0f;
297
+ }
298
+
299
+ template <>
300
+ DS_D_INLINE float init<ROpType::Min>()
301
+ {
302
+ // Positive infinity
303
+ return INFINITY;
304
+ }
305
+
306
+ template <>
307
+ DS_D_INLINE float init<ROpType::Max>()
308
+ {
309
+ // Negative infinity
310
+ return -INFINITY;
311
+ }
312
+
313
+ template <>
314
+ DS_D_INLINE __half init<ROpType::Add>()
315
+ {
316
+ constexpr __half_raw zero = {0x0000};
317
+ return __half(zero);
318
+ }
319
+
320
+ template <>
321
+ DS_D_INLINE __half init<ROpType::Min>()
322
+ {
323
+ constexpr __half_raw inf = {0x7C00};
324
+ return __half(inf);
325
+ }
326
+
327
+ template <>
328
+ DS_D_INLINE __half init<ROpType::Max>()
329
+ {
330
+ constexpr __half_raw neg_inf = {0xFC00};
331
+ return __half(neg_inf);
332
+ }
333
+
334
+ template <>
335
+ DS_D_INLINE __half2 init<ROpType::Add>()
336
+ {
337
+ #ifdef __HIP_PLATFORM_AMD__
338
+ return __half2{_Float16_2{0x0000, 0x0000}};
339
+ #else
340
+ constexpr __half2_raw zero = {0x0000, 0x0000};
341
+ return __half2(zero);
342
+ #endif
343
+ }
344
+
345
+ template <>
346
+ DS_D_INLINE __half2 init<ROpType::Min>()
347
+ {
348
+ #ifdef __HIP_PLATFORM_AMD__
349
+ return __half2{_Float16_2{0x7C00, 0x7C00}};
350
+ #else
351
+ constexpr __half2_raw inf = {0x7C00, 0x7C00};
352
+ return __half2(inf);
353
+ #endif
354
+ }
355
+
356
+ template <>
357
+ DS_D_INLINE __half2 init<ROpType::Max>()
358
+ {
359
+ #ifdef __HIP_PLATFORM_AMD__
360
+ return __half2{_Float16_2{0xFC00, 0xFC00}};
361
+ #else
362
+ constexpr __half2_raw neg_inf = {0xFC00, 0xFC00};
363
+ return __half2(neg_inf);
364
+ #endif
365
+ }
366
+
367
+ template <>
368
+ DS_D_INLINE int32_t init<ROpType::Add>()
369
+ {
370
+ return 0;
371
+ }
372
+
373
+ template <>
374
+ DS_D_INLINE int32_t init<ROpType::Min>()
375
+ {
376
+ return 0x7FFFFFFF;
377
+ }
378
+
379
+ template <>
380
+ DS_D_INLINE int32_t init<ROpType::Max>()
381
+ {
382
+ return 0x80000000;
383
+ }
384
+
385
+ template <>
386
+ DS_D_INLINE uint32_t init<ROpType::Add>()
387
+ {
388
+ return 0;
389
+ }
390
+
391
+ template <>
392
+ DS_D_INLINE uint32_t init<ROpType::Min>()
393
+ {
394
+ return 0xFFFFFFFF;
395
+ }
396
+
397
+ template <>
398
+ DS_D_INLINE uint32_t init<ROpType::Max>()
399
+ {
400
+ return 0;
401
+ }
402
+
403
+ template <>
404
+ DS_D_INLINE int64_t init<ROpType::Add>()
405
+ {
406
+ return 0;
407
+ }
408
+
409
+ template <>
410
+ DS_D_INLINE int64_t init<ROpType::Min>()
411
+ {
412
+ return 0x7FFFFFFFFFFFFFFF;
413
+ }
414
+
415
+ template <>
416
+ DS_D_INLINE int64_t init<ROpType::Max>()
417
+ {
418
+ return 0x8000000000000000;
419
+ }
420
+
421
+ template <>
422
+ DS_D_INLINE uint64_t init<ROpType::Add>()
423
+ {
424
+ return 0;
425
+ }
426
+
427
+ template <>
428
+ DS_D_INLINE uint64_t init<ROpType::Min>()
429
+ {
430
+ return 0xFFFFFFFFFFFFFFFF;
431
+ }
432
+
433
+ template <>
434
+ DS_D_INLINE uint64_t init<ROpType::Max>()
435
+ {
436
+ return 0;
437
+ }
438
+
439
+ template <ROpType Op, typename T>
440
+ DS_D_INLINE void init(T* data)
441
+ {
442
+ data[0] = init<Op, T>();
443
+ }
444
+
445
+ template <ROpType Op1, ROpType Op2, typename T>
446
+ DS_D_INLINE void init(T* data)
447
+ {
448
+ data[0] = init<Op1, T>();
449
+ data[1] = init<Op2, T>();
450
+ }
451
+
452
+ template <ROpType Op1, ROpType Op2, ROpType Op3, typename T>
453
+ DS_D_INLINE void init(T* data)
454
+ {
455
+ data[0] = init<Op1, T>();
456
+ data[1] = init<Op2, T>();
457
+ data[2] = init<Op3, T>();
458
+ }
459
+
460
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, typename T>
461
+ DS_D_INLINE void init(T* data)
462
+ {
463
+ data[0] = init<Op1, T>();
464
+ data[1] = init<Op2, T>();
465
+ data[2] = init<Op3, T>();
466
+ data[3] = init<Op4, T>();
467
+ }
468
+
469
+ /*
470
+ Warp reduction primitives
471
+
472
+ `reduction_width` is an unsafe template parameter, that is that
473
+ when using `reduction_width` < hw_warp_size the warp is partitioned
474
+ into `hw_warp_size` / `reduction_width` groups of partial sums.
475
+
476
+ If someone can figure out how to use variadic templates in a reasonable way
477
+ here (fold is C++17 only and I don't think helps and recursion feels like
478
+ huge overkill that harms readability) that would be wonderful.
479
+ */
480
+
481
+ template <typename T, ROpType Op, int reduce_width = hw_warp_size>
482
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
483
+ {
484
+ #pragma unroll
485
+ for (int i = 1; i < reduce_width; i *= 2) {
486
+ data[0] = element<Op>(data[0], warp.shfl_xor(data[0], i));
487
+ }
488
+ }
489
+
490
+ template <typename T, ROpType Op1, ROpType Op2, int reduce_width = hw_warp_size>
491
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
492
+ {
493
+ #pragma unroll
494
+ for (int i = 1; i < reduce_width; i *= 2) {
495
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
496
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
497
+ }
498
+ }
499
+
500
+ template <typename T, ROpType Op1, ROpType Op2, ROpType Op3, int reduce_width = hw_warp_size>
501
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
502
+ {
503
+ #pragma unroll
504
+ for (int i = 1; i < reduce_width; i *= 2) {
505
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
506
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
507
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
508
+ }
509
+ }
510
+
511
+ template <typename T,
512
+ ROpType Op1,
513
+ ROpType Op2,
514
+ ROpType Op3,
515
+ ROpType Op4,
516
+ int reduce_width = hw_warp_size>
517
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
518
+ {
519
+ #pragma unroll
520
+ for (int i = 1; i < reduce_width; i *= 2) {
521
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
522
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
523
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
524
+ data[3] = element<Op4>(data[3], warp.shfl_xor(data[3], i));
525
+ }
526
+ }
527
+
528
+ /*
529
+ Implementation for primary block reduction that serves both `block` and
530
+ `partitioned_block`.
531
+
532
+ Total warps refers to the reduction width of the reduction, not
533
+ the number of warps in the block (which may exceed that
534
+ if the block is partitioned or if we do a conservative bound at
535
+ compile time).
536
+ */
537
+ template <typename T, int total_warps, ROpType... Ops>
538
+ DS_D_INLINE void _block(cg::thread_block& tb,
539
+ cg::thread_block_tile<hw_warp_size>& warp_arg,
540
+ T* data)
541
+ {
542
+ constexpr int elems = sizeof...(Ops);
543
+ constexpr int bytes = sizeof(T);
544
+ // Unused when `partition_size == 1` or total_warps == 1
545
+ __shared__ T reduce_buffer[max_warps * elems];
546
+
547
+ #ifdef __HIP_PLATFORM_AMD__
548
+ const int total_threads = blockDim.x * blockDim.y * blockDim.z;
549
+ const int running_warps = total_threads / hw_warp_size;
550
+ #else
551
+ const int running_warps = warp_arg.meta_group_size();
552
+ #endif
553
+
554
+ // Always perform warp-scope reduction
555
+ _warp<T, Ops...>(warp_arg, data);
556
+
557
+ // If max_warps == 1 let's skip the runtime check
558
+ if (total_warps != 1) {
559
+ if (warp_arg.thread_rank() == 0) {
560
+ #pragma unroll
561
+ for (int i = 0; i < elems; i++) {
562
+ mem_access::store_shared<bytes>(reduce_buffer + elems * _warp_rank() + i, data + i);
563
+ }
564
+ }
565
+
566
+ // Synchronization inside block-uniform conditional is safe
567
+ tb.sync();
568
+
569
+ if (_warp_rank() == 0) {
570
+ if (warp_arg.thread_rank() < running_warps) {
571
+ #pragma unroll
572
+ for (int i = 0; i < elems; i++) {
573
+ mem_access::load_shared<bytes>(
574
+ data + i, reduce_buffer + elems * warp_arg.thread_rank() + i);
575
+ }
576
+ } else {
577
+ init<Ops...>(data);
578
+ }
579
+
580
+ _warp<T, Ops..., total_warps>(warp_arg, data);
581
+
582
+ #pragma unroll
583
+ for (int i = 0; i < elems; i++) {
584
+ mem_access::store_shared<bytes>(reduce_buffer + elems * warp_arg.thread_rank() + i,
585
+ data + i);
586
+ }
587
+ }
588
+
589
+ // Synchronization inside block-uniform conditional is safe
590
+ tb.sync();
591
+
592
+ #pragma unroll
593
+ for (int i = 0; i < elems; i++) {
594
+ mem_access::load_shared<bytes>(data + i, reduce_buffer + _warp_rank() * elems + i);
595
+ }
596
+ }
597
+ }
598
+
599
+ /*
600
+ Main API implementations. For the most part, they just convert the individual
601
+ variables into arrays, which makes working with them easier with a single
602
+ implementation. In theory, we could use the `_block` implementation as another
603
+ option, but the nature of using a pointer is a little less safe and this allows
604
+ us to obfuscate the details of the partitioned implementation.
605
+ */
606
+ template <ROpType Op, int warp_bound>
607
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val)
608
+ {
609
+ _block<float, warp_bound, Op>(tb, warp, &val);
610
+ }
611
+
612
+ template <ROpType Op1, ROpType Op2, int warp_bound>
613
+ DS_D_INLINE void block(cg::thread_block& tb,
614
+ cg::thread_block_tile<hw_warp_size>& warp,
615
+ float& val1,
616
+ float& val2)
617
+ {
618
+ float data[2] = {val1, val2};
619
+ _block<float, warp_bound, Op1, Op2>(tb, warp, data);
620
+ val1 = data[0];
621
+ val2 = data[1];
622
+ }
623
+
624
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound>
625
+ DS_D_INLINE void block(cg::thread_block& tb,
626
+ cg::thread_block_tile<hw_warp_size>& warp,
627
+ float& val1,
628
+ float& val2,
629
+ float& val3)
630
+ {
631
+ float data[3] = {val1, val2, val3};
632
+ _block<float, warp_bound, Op1, Op2, Op3>(tb, warp, data);
633
+ val1 = data[0];
634
+ val2 = data[1];
635
+ val3 = data[2];
636
+ }
637
+
638
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound>
639
+ DS_D_INLINE void block(cg::thread_block& tb,
640
+ cg::thread_block_tile<hw_warp_size>& warp,
641
+ float& val1,
642
+ float& val2,
643
+ float& val3,
644
+ float& val4)
645
+ {
646
+ float data[4] = {val1, val2, val3, val4};
647
+ _block<float, warp_bound, Op1, Op2, Op3, Op4>(tb, warp, data);
648
+ val1 = data[0];
649
+ val2 = data[1];
650
+ val3 = data[2];
651
+ val4 = data[3];
652
+ }
653
+
654
+ /*
655
+ Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order
656
+ to shorten block scale reduction length.
657
+ */
658
+ template <ROpType Op, int num_threads>
659
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
660
+ cg::thread_block_tile<hw_warp_size>& warp,
661
+ float& val)
662
+ {
663
+ if (num_threads <= hw_warp_size) {
664
+ _warp<float, Op, num_threads>(warp, &val);
665
+ } else {
666
+ constexpr int num_warps = num_threads / hw_warp_size;
667
+ _block<float, num_warps, Op>(tb, warp, &val);
668
+ }
669
+ }
670
+
671
+ template <ROpType Op1, ROpType Op2, int num_threads>
672
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
673
+ cg::thread_block_tile<hw_warp_size>& warp,
674
+ float& val1,
675
+ float& val2)
676
+ {
677
+ float data[2] = {val1, val2};
678
+
679
+ if (num_threads <= hw_warp_size) {
680
+ _warp<float, Op1, Op2, num_threads>(warp, data);
681
+ } else {
682
+ constexpr int num_warps = num_threads / hw_warp_size;
683
+ _block<float, num_warps, Op1, Op2>(tb, warp, data);
684
+ }
685
+
686
+ val1 = data[0];
687
+ val2 = data[1];
688
+ }
689
+
690
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
691
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
692
+ cg::thread_block_tile<hw_warp_size>& warp,
693
+ float& val1,
694
+ float& val2,
695
+ float& val3)
696
+ {
697
+ float data[3] = {val1, val2, val3};
698
+
699
+ if (num_threads <= hw_warp_size) {
700
+ _warp<float, Op1, Op2, Op3, num_threads>(warp, data);
701
+ } else {
702
+ constexpr int num_warps = num_threads / hw_warp_size;
703
+ _block<float, num_warps, Op1, Op2, Op3>(tb, warp, data);
704
+ }
705
+
706
+ val1 = data[0];
707
+ val2 = data[1];
708
+ val3 = data[2];
709
+ }
710
+
711
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
712
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
713
+ cg::thread_block_tile<hw_warp_size>& warp,
714
+ float& val1,
715
+ float& val2,
716
+ float& val3,
717
+ float& val4)
718
+ {
719
+ float data[4] = {val1, val2, val3, val4};
720
+
721
+ if (num_threads <= hw_warp_size) {
722
+ _warp<float, Op1, Op2, Op3, Op4, num_threads>(warp, data);
723
+ } else {
724
+ constexpr int num_warps = num_threads / hw_warp_size;
725
+ _block<float, num_warps, Op1, Op2, Op3, Op4>(tb, warp, data);
726
+ }
727
+
728
+ val1 = data[0];
729
+ val2 = data[1];
730
+ val3 = data[2];
731
+ val4 = data[3];
732
+ }
733
+
734
+ /*
735
+ Arg-reduce is a specialization of the above. We only support this with a single reduction
736
+ parameter. This only works for max/min reductions.
737
+ */
738
+
739
+ __align__(8) struct IdxReduceResult {
740
+ /*
741
+ NOTE: ORDERING MATTERS HERE! The idx is the least significant set of bits
742
+ and the val is the most significant. Changing the order of this declaration
743
+ will break the code.
744
+ */
745
+ int idx;
746
+ float val;
747
+ };
748
+
749
+ template <ROpType Op, int warpBound>
750
+ DS_D_INLINE IdxReduceResult
751
+ idx_reduce(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float val, int idx)
752
+ {
753
+ IdxReduceResult res = {idx, val};
754
+
755
+ // Clear out the nan. This shouldn't be an issue for our initial applications
756
+ if (isnan(val)) res.val = init<Op>();
757
+
758
+ // Can do float compares as integers. By packing the index into the lower bits
759
+ // we can just do a single int64 rather than a branch, compare, and select.
760
+ // One side benefit of this is that it is by nature a stable algorithm and
761
+ // will always bias ties to the higher index.
762
+ int64_t* res_as_int = reinterpret_cast<int64_t*>(&res);
763
+
764
+ // The way floating point compare works is normally to perform a sign comparison
765
+ // and if they match, then do a comparison of the rest of the bits as unsigned
766
+ // integers. Since we are bundling these, that means for negative values we need
767
+ // to reverse the sort order, which we can do with an XOR.
768
+ if (val < 0) { *res_as_int ^= 0x7fffffff00000000; }
769
+
770
+ _block<int64_t, warpBound, Op>(tb, warp, res_as_int);
771
+
772
+ // Sign bit is preserved, so we can check if we need to invert the mantissa back
773
+ if (res.val < 0) { *res_as_int ^= 0x7fffffff00000000; }
774
+
775
+ return res;
776
+ }
777
+
778
+ } // namespace reduce
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .atom_builder import *
7
+ from .blocked_flash import *
8
+ from .embed import *
9
+ from .linear_blocked_kv_rotary import *
10
+ from .logits_gather import *
11
+ from .moe_gather import *
12
+ from .moe_scatter import *
13
+ from .top_k_gating import *
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (396 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .atom_builder import *
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (237 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "atom_builder.h"
7
+ #include "attention_atom.h"
8
+ #include "ragged_dtypes.h"
9
+
10
+ int32_t build_atoms(torch::Tensor& atoms_ten,
11
+ torch::Tensor& batch_metadata,
12
+ torch::Tensor& seq_metadata,
13
+ torch::Tensor& kv_ptrs,
14
+ const int32_t q_block_size,
15
+ const int32_t kv_block_size)
16
+ {
17
+ const RaggedBatchDescriptor* batch_desc =
18
+ reinterpret_cast<const RaggedBatchDescriptor*>(batch_metadata.data_ptr());
19
+
20
+ const InflightSeqDescriptor* seq_desc =
21
+ reinterpret_cast<const InflightSeqDescriptor*>(seq_metadata.data_ptr());
22
+
23
+ int32_t** kv_ptr_list = reinterpret_cast<int32_t**>(kv_ptrs.data_ptr());
24
+
25
+ AttentionAtom* atoms = reinterpret_cast<AttentionAtom*>(atoms_ten.data_ptr());
26
+
27
+ int32_t n_atoms = 0;
28
+ for (int i = 0; i < batch_desc->n_sequences; i++) {
29
+ const int seq_atoms = (seq_desc[i].n_tokens + q_block_size - 1) / q_block_size;
30
+ int32_t cur_start_idx = seq_desc[i].start_idx;
31
+ int32_t global_start_idx = seq_desc[i].seen_tokens;
32
+ int32_t remaining_toks = seq_desc[i].n_tokens;
33
+
34
+ for (int j = 0; j < seq_atoms; j++) {
35
+ atoms[n_atoms].block_idx_list = kv_ptr_list[i];
36
+ atoms[n_atoms].q_start_idx = cur_start_idx;
37
+ atoms[n_atoms].q_len = std::min(remaining_toks, q_block_size);
38
+ atoms[n_atoms].global_q_idx = global_start_idx;
39
+
40
+ const int32_t end_toks = global_start_idx + atoms[n_atoms].q_len;
41
+ // TODO(cmikeh2): This logic needs to be changed for sparse implementations
42
+ atoms[n_atoms].kv_blocks = (end_toks + kv_block_size - 1) / kv_block_size;
43
+ atoms[n_atoms].total_extent = end_toks;
44
+
45
+ cur_start_idx += atoms[n_atoms].q_len;
46
+ global_start_idx += atoms[n_atoms].q_len;
47
+ remaining_toks -= atoms[n_atoms].q_len;
48
+ n_atoms++;
49
+ }
50
+ }
51
+
52
+ return n_atoms;
53
+ }
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <torch/extension.h>
9
+
10
+ /*
11
+ Construct the attention atoms given the ragged metadata for the current batch.
12
+ This could largely be done at the Python level, but since we pack the KV ptr
13
+ alongside the int32_t metadata, it gets very ugly to handle the mixed-width
14
+ data structures (since we're packing them in a single tensor).
15
+ */
16
+ int32_t build_atoms(torch::Tensor& atoms_ten,
17
+ torch::Tensor& batch_metadata,
18
+ torch::Tensor& seq_metadata,
19
+ torch::Tensor& kv_ptrs,
20
+ const int32_t q_block_size,
21
+ const int32_t kv_block_size);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Tuple
7
+
8
+ import torch
9
+
10
+ from ... import DSKernelBase
11
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
12
+ from ....ragged import RaggedBatchWrapper
13
+
14
+
15
+ class AtomBuilder(DSKernelBase):
16
+ """
17
+ C++ implementation to populate the attention atoms for the blocked attention
18
+ kernel.
19
+ """
20
+
21
+ def __init__(self) -> None:
22
+ """
23
+ Triggers compilation of the C++ implementation.
24
+ """
25
+ inf_module = RaggedOpsBuilder().load()
26
+ self.kernel = inf_module.build_atoms
27
+
28
+ def __call__(self, atoms: torch.Tensor, ragged_batch: RaggedBatchWrapper, q_block_size: int,
29
+ kv_block_size: int) -> Tuple[torch.Tensor, int]:
30
+ """
31
+ Populates the attention atoms for the blocked attention kernel.
32
+
33
+ Args:
34
+ atoms (torch.Tensor): Pre-allocated int32 tensor of shape [max_atoms, 8]
35
+ ragged_batch (torch.Tensor): Wrapper for the ragged batch.
36
+ q_block_size (int): The block size for the queries (as determined by the
37
+ attention implementation)
38
+ kv_block_size (int): The block size for the keys/values (as determined by the
39
+ attention implementation)
40
+
41
+ Returns:
42
+
43
+ """
44
+ if atoms.device != torch.device("cpu"):
45
+ raise RuntimeError("AtomBuilder must be called on tensors")
46
+
47
+ n_atoms = self.kernel(atoms, ragged_batch.batch_metadata_buffer(on_device=False),
48
+ ragged_batch.inflight_seq_descriptors(on_device=False),
49
+ ragged_batch.kv_ptrs(on_device=False), q_block_size, kv_block_size)
50
+ return atoms, n_atoms
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .blocked_flash import *
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (239 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cstdint>
9
+ #include "cuda.h"
10
+
11
+ struct AttentionAtom {
12
+ /*
13
+ The attention atom describes the workload of a particular query. The attention
14
+ kernel will execute each ``AttentionAtom`` for each head of the model.
15
+ */
16
+
17
+ // Pointer to a list of KV block indices.
18
+ int32_t* block_idx_list;
19
+
20
+ // Index of first token in the ragged batch associated with this atom.
21
+ int32_t q_start_idx;
22
+
23
+ // Number of tokens in the ragged batch associated with this atom.
24
+ int32_t q_len;
25
+
26
+ // Number of key/value blocks associated with this atom. All but the last are
27
+ // assumed to be fully dense.
28
+ int32_t kv_blocks;
29
+
30
+ // Number of tokens in the last key/value block.
31
+ int32_t total_extent;
32
+
33
+ // Global index of the first token in the atom. For example, in a prompt continuation
34
+ // in which we have already processed 768 tokens, this would be 768.
35
+ int32_t global_q_idx;
36
+
37
+ // Unused
38
+ int32_t unused;
39
+ };
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /******************************************************************************
7
+ * Copyright (c) 2023, Tri Dao.
8
+ ******************************************************************************/
9
+
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <c10/cuda/CUDAGuard.h>
12
+ #include <torch/extension.h>
13
+
14
+ #include "blocked_flash.h"
15
+ #include "flash.h"
16
+
17
+ #define CHECK_SHAPE(x, ...) \
18
+ TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \
19
+ #x " must have shape (" #__VA_ARGS__ ")")
20
+
21
+ void flash_attn_by_atoms(at::Tensor& out,
22
+ at::Tensor& q,
23
+ at::Tensor& k,
24
+ at::Tensor& v,
25
+ at::Tensor& attention_atoms,
26
+ const float softmax_scale,
27
+ const bool is_causal)
28
+ {
29
+ auto dprops = at::cuda::getCurrentDeviceProperties();
30
+
31
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
32
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
33
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
34
+
35
+ auto q_dtype = q.dtype();
36
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
37
+ "FlashAttention only support fp16 and bf16 data type");
38
+ if (q_dtype == torch::kBFloat16) {
39
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
40
+ }
41
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
42
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
43
+
44
+ TORCH_CHECK(q.is_cuda(), "Input tensor must be on CUDA device");
45
+ TORCH_CHECK(k.is_cuda(), "Input tensor must be on CUDA device");
46
+ TORCH_CHECK(v.is_cuda(), "Input tensor must be on CUDA device");
47
+
48
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
49
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
50
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
51
+
52
+ const int total_q = q.size(0);
53
+ const int head_size = k.size(-1);
54
+ const int num_heads_kv = k.size(-2);
55
+ const int num_heads_q = q.size(-1) / head_size;
56
+
57
+ TORCH_CHECK(head_size <= 256, "head_size must be <= 256");
58
+ TORCH_CHECK(head_size % 8 == 0, "head_size must be divisible by 8");
59
+ TORCH_CHECK(num_heads_q % num_heads_kv == 0, "num_heads_q must be divisible by num_heads_kv");
60
+
61
+ Flash_fwd_params params;
62
+
63
+ params.is_bf16 = q.dtype() == torch::kBFloat16;
64
+
65
+ // Set the pointers and strides.
66
+ params.q_ptr = q.data_ptr();
67
+ params.k_ptr = k.data_ptr();
68
+ params.v_ptr = v.data_ptr();
69
+ params.o_ptr = out.data_ptr();
70
+ params.atoms = reinterpret_cast<AttentionAtom*>(attention_atoms.data_ptr());
71
+
72
+ // All stride are in elements, not bytes.
73
+ params.q_row_stride = q.stride(0);
74
+ params.k_row_stride = k.stride(1);
75
+ params.v_row_stride = v.stride(1);
76
+ params.o_row_stride = out.stride(0);
77
+
78
+ // Assume heads are contiguous.
79
+ params.q_head_stride = head_size;
80
+ params.k_head_stride = head_size;
81
+ params.v_head_stride = head_size;
82
+ params.o_head_stride = head_size;
83
+
84
+ // Head params
85
+ params.h = num_heads_q;
86
+ params.h_k = num_heads_kv;
87
+ params.h_h_k_ratio = num_heads_q / num_heads_kv;
88
+ params.d = head_size;
89
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
90
+ params.d_rounded = round_multiple(head_size, 32);
91
+ params.num_atoms = attention_atoms.size(0);
92
+
93
+ // Set the different scale values.
94
+ params.scale_softmax = softmax_scale;
95
+ params.scale_softmax_log2 = softmax_scale * M_LOG2E;
96
+
97
+ params.is_causal = is_causal;
98
+
99
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
100
+ run_mha_fwd(params, stream);
101
+ }
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <torch/extension.h>
9
+
10
+ void flash_attn_by_atoms(at::Tensor& out,
11
+ at::Tensor& q,
12
+ at::Tensor& k,
13
+ at::Tensor& v,
14
+ at::Tensor& attention_atoms,
15
+ const float softmax_scale,
16
+ const bool is_causal);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from deepspeed.accelerator import get_accelerator
9
+ from ....inference_utils import DtypeEnum
10
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
11
+
12
+ from ... import DSKernelBase
13
+
14
+
15
+ def get_q_block_size(head_size: int) -> int:
16
+ """
17
+ Returns the query block size required by the kernel given a head size.
18
+ """
19
+ cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda
20
+
21
+ if cc_major < 8:
22
+ raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0")
23
+
24
+ if head_size <= 64:
25
+ return 128
26
+ elif head_size <= 160:
27
+ if cc_minor != 0:
28
+ return 64
29
+ else:
30
+ return 128
31
+ elif head_size == 192:
32
+ return 128
33
+ elif head_size == 224:
34
+ if cc_minor != 0:
35
+ return 64
36
+ else:
37
+ return 128
38
+ else:
39
+ if cc_major == 8 and cc_minor == 0:
40
+ return 128
41
+ else:
42
+ return 64
43
+
44
+
45
+ def get_kv_block_size(head_size: int) -> int:
46
+ """
47
+ Return preferred granulatity for blocked KV-cache implementation.
48
+ """
49
+ cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda
50
+
51
+ if cc_major < 8:
52
+ raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0")
53
+
54
+ if head_size <= 64:
55
+ return 128
56
+ elif head_size != 160 or cc_minor != 0:
57
+ return 64
58
+ else:
59
+ return 32
60
+
61
+
62
+ class BlockedFlashAttn(DSKernelBase):
63
+ """
64
+ Modified implementation of flash-attn-2 tuned for inference on blocked KV-cache and wider
65
+ range of input sequence lengths.
66
+ """
67
+
68
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
69
+
70
+ def __init__(self, head_size: int, dtype: DtypeEnum) -> None:
71
+ """
72
+ Triggers any compilation of the kernels.
73
+ """
74
+ if not isinstance(dtype, DtypeEnum):
75
+ dtype = DtypeEnum(dtype)
76
+
77
+ if dtype not in BlockedFlashAttn.supported_dtypes:
78
+ raise ValueError("Unsupported data type: {}, supported data types are {}".format(
79
+ dtype, BlockedFlashAttn.supported_dtypes))
80
+
81
+ # For testing, need to revert to 32
82
+ if head_size % 16 != 0:
83
+ raise ValueError("Head size must be divisible by 32 (configured with {})".format(head_size))
84
+
85
+ inf_module = RaggedOpsBuilder().load()
86
+ self.kernel = inf_module.flash_attn_by_atoms
87
+
88
+ def __call__(self, out: torch.Tensor, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, atoms: torch.Tensor,
89
+ softmax_scale: float) -> torch.Tensor:
90
+ """
91
+ Flash attention implementation atop a blocked KV-cache. Atoms should be pre-populated.
92
+ See attention_atom.h for further details on the structure of the information.
93
+
94
+ Arguments:
95
+ out (torch.Tensor): Output tensor of shape [tokens, hidden_size]
96
+ q (torch.Tensor): Query tensor of shape [tokens, hidden_size]
97
+ k (torch.Tensor): Key cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension.
98
+ v (torch.Tensor): Value cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension.
99
+ atoms (torch.Tensor): Atom information tensor of shape [num_atoms, 8] and type int32.
100
+ Not all data is readable in this format. See attention_atom.h for further details.
101
+ softmax_scale (float): Softmax scale factor.
102
+
103
+ Returns:
104
+ out (torch.Tensor): Output tensor of shape [tokens, hidden_size]
105
+ """
106
+ self.kernel(out, q, k, v, atoms, softmax_scale, True)
107
+ return out
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /******************************************************************************
7
+ Copyright (c) 2023, Tri Dao.
8
+ ******************************************************************************/
9
+
10
+ #pragma once
11
+
12
+ #include <cuda.h>
13
+ #include <vector>
14
+
15
+ #include "attention_atom.h"
16
+
17
+ constexpr int TOTAL_DIM = 0;
18
+ constexpr int H_DIM = 1;
19
+ constexpr int D_DIM = 2;
20
+
21
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
22
+
23
+ struct Qkv_params {
24
+ using index_t = uint32_t;
25
+ // The QKV matrices.
26
+ void* __restrict__ q_ptr;
27
+ void* __restrict__ k_ptr;
28
+ void* __restrict__ v_ptr;
29
+
30
+ // The stride between rows of the Q, K and V matrices.
31
+ index_t q_row_stride;
32
+ index_t k_row_stride;
33
+ index_t v_row_stride;
34
+ index_t q_head_stride;
35
+ index_t k_head_stride;
36
+ index_t v_head_stride;
37
+
38
+ // The number of heads.
39
+ int h, h_k;
40
+ // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be
41
+ // different from nheads (query).
42
+ int h_h_k_ratio; // precompute h / h_k,
43
+ };
44
+
45
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ struct Flash_fwd_params : public Qkv_params {
48
+ // The O matrix (output).
49
+ void* __restrict__ o_ptr;
50
+
51
+ // The attention metadata
52
+ AttentionAtom* __restrict__ atoms;
53
+
54
+ // Total attention atoms
55
+ int num_atoms;
56
+
57
+ // The stride between rows of O.
58
+ index_t o_row_stride;
59
+ index_t o_head_stride;
60
+
61
+ // The dimensions
62
+ int d, d_rounded;
63
+
64
+ // The scaling factors for the kernel.
65
+ float scale_softmax;
66
+ float scale_softmax_log2;
67
+
68
+ bool is_bf16;
69
+ bool is_causal;
70
+ };
71
+
72
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
73
+
74
+ void run_mha_fwd(Flash_fwd_params& params, cudaStream_t stream);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (252 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc ADDED
Binary file (2.88 kB). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "embed.h"
7
+ #include "ragged_kernel_helpers.h"
8
+
9
+ #ifdef BF16_AVAILABLE
10
+ #define DISPATCH_FOR_FLOAT(DTYPE, ...) \
11
+ [&] { \
12
+ if (DTYPE == torch::kFloat32) { \
13
+ using float_t = float; \
14
+ return __VA_ARGS__(); \
15
+ } else if (DTYPE == torch::kFloat16) { \
16
+ using float_t = __half; \
17
+ return __VA_ARGS__(); \
18
+ } else if (DTYPE == torch::kBFloat16) { \
19
+ using float_t = __nv_bfloat16; \
20
+ return __VA_ARGS__(); \
21
+ } else { \
22
+ TORCH_CHECK(false, "Unsupported dispatch type"); \
23
+ } \
24
+ }()
25
+ #else
26
+ #define DISPATCH_FOR_FLOAT(DTYPE, ...) \
27
+ [&] { \
28
+ if (DTYPE == torch::kFloat32) { \
29
+ using float_t = float; \
30
+ return __VA_ARGS__(); \
31
+ } else if (DTYPE == torch::kFloat16) { \
32
+ using float_t = __half; \
33
+ return __VA_ARGS__(); \
34
+ } else { \
35
+ TORCH_CHECK(false, "Unsupported dispatch type"); \
36
+ } \
37
+ }()
38
+ #endif
39
+
40
+ #define DISPATCH_FOR_INT(DTYPE, ...) \
41
+ [&] { \
42
+ if (DTYPE == torch::kInt32) { \
43
+ using int_t = int32_t; \
44
+ return __VA_ARGS__(); \
45
+ } else if (DTYPE == torch::kInt64) { \
46
+ using int_t = int64_t; \
47
+ return __VA_ARGS__(); \
48
+ } else { \
49
+ TORCH_CHECK(false, "Unsupported dispatch type"); \
50
+ } \
51
+ }()
52
+
53
+ /*
54
+ Embeddings kernel aware of ragged batch structure.
55
+ */
56
+ void ragged_embed(torch::Tensor& embedded_tokens,
57
+ torch::Tensor& input_ids,
58
+ torch::Tensor& embedding_weight,
59
+ c10::optional<torch::Tensor>& position_embedding_weight,
60
+ int32_t pos_embed_offset,
61
+ torch::Tensor& batch_metadata,
62
+ torch::Tensor& seq_metadata,
63
+ torch::Tensor& tokens_to_seq,
64
+ torch::Tensor& kv_ptrs)
65
+ {
66
+ // We don't care about KV cache here, so just hardcoding 0s for block_size/num_blocks
67
+ BatchWrapperCPP batch_wrapper =
68
+ make_cpp_batch_wrapper(batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, 0, 0);
69
+
70
+ const int32_t n_tokens = input_ids.numel();
71
+ const int32_t embed_dim = embedding_weight.size(1);
72
+ const int32_t vocab_size = embedding_weight.size(0);
73
+
74
+ DISPATCH_FOR_INT(input_ids.scalar_type(), [&] {
75
+ DISPATCH_FOR_FLOAT(embedding_weight.scalar_type(), [&] {
76
+ float_t* pos_embed_ptr = nullptr;
77
+ int32_t max_position_embed_idx = 0;
78
+ if (position_embedding_weight.has_value()) {
79
+ TORCH_CHECK(
80
+ position_embedding_weight.value().options().dtype() ==
81
+ embedding_weight.options().dtype(),
82
+ "position_embedding_weight and embedding_weight must have the same dtype");
83
+ pos_embed_ptr =
84
+ reinterpret_cast<float_t*>(position_embedding_weight.value().data_ptr());
85
+ max_position_embed_idx = position_embedding_weight.value().size(0) - 1;
86
+ }
87
+
88
+ launch_ragged_embed_kernel((float_t*)embedded_tokens.data_ptr(),
89
+ (const int_t*)input_ids.data_ptr(),
90
+ (const float_t*)embedding_weight.data_ptr(),
91
+ pos_embed_ptr,
92
+ batch_wrapper,
93
+ n_tokens,
94
+ embed_dim,
95
+ vocab_size,
96
+ max_position_embed_idx,
97
+ pos_embed_offset,
98
+ at::cuda::getCurrentCUDAStream());
99
+ });
100
+ });
101
+ }
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cuh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ #ifdef BF16_AVAILABLE
12
+ #include <cuda_bf16.h>
13
+ #endif
14
+
15
+ template <typename TokenType, typename EmbedType>
16
+ void launch_ragged_embed_kernel(EmbedType* embedded_tokens,
17
+ const TokenType* input_ids,
18
+ const EmbedType* embedding_weight,
19
+ const EmbedType* position_weight,
20
+ const BatchWrapperCPP batch_desc,
21
+ const int32_t n_tokens,
22
+ const int32_t embed_dim,
23
+ const int32_t vocab_size,
24
+ const int32_t max_position_embed_idx,
25
+ const int32_t position_embed_offset,
26
+ cudaStream_t stream);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Optional
7
+
8
+ import torch
9
+
10
+ from ... import DSKernelBase
11
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
12
+ from ....inference_utils import elem_size
13
+ from ....ragged import RaggedBatchWrapper
14
+
15
+
16
+ class RaggedEmbeddingKernel(DSKernelBase):
17
+ """
18
+ Ragged-aware CUDA kernel implementation for an embedding lookup. This will only lookup
19
+ the necessary tokens for a padded batch (i.e. if we are CGed and running with a slightly
20
+ larger batch size than the actual tokens).
21
+ """
22
+
23
+ supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
24
+ supported_token_dtypes = [torch.int32, torch.int64]
25
+
26
+ def __init__(self, embed_dtype: torch.dtype, token_dtype: torch.dtype, embed_dim: int) -> None:
27
+ """
28
+ Args:
29
+ fp_dtype (torch.dtype): Data type of the embedding table and output dtype.
30
+ Supported values are torch.float16, torch.bfloat16, and torch.float32.
31
+ token_dtype (torch.dtype): Data type of the token ids. Supported values are
32
+ torch.int32 and torch.int64.
33
+ embed_dim (int): Embedding dimension. Must be aligned to 16 bytes.
34
+ """
35
+ if embed_dtype not in RaggedEmbeddingKernel.supported_dtypes:
36
+ raise ValueError("Unsupported embedding data type: {}, supported_dtypes are {}".format(
37
+ embed_dtype, RaggedEmbeddingKernel.supported_dtypes))
38
+
39
+ if token_dtype not in RaggedEmbeddingKernel.supported_token_dtypes:
40
+ raise ValueError("Unsupported token data type: {}, supported_dtypes are {}".format(
41
+ token_dtype, RaggedEmbeddingKernel.supported_token_dtypes))
42
+
43
+ if elem_size(embed_dtype) * embed_dim % 16 != 0:
44
+ raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(embed_dim))
45
+
46
+ inf_module = RaggedOpsBuilder().load()
47
+ self.kernel = inf_module.ragged_embed
48
+
49
+ def __call__(self,
50
+ embedded_tokens: torch.Tensor,
51
+ ragged_wrapper: RaggedBatchWrapper,
52
+ embedding_weight: torch.Tensor,
53
+ position_embed_weight: Optional[torch.Tensor] = None,
54
+ position_embed_offset: int = 0) -> torch.Tensor:
55
+ """
56
+ Ragged aware embedding lookup.
57
+
58
+ Args:
59
+ embedded_tokens (torch.Tensor): Output tensor of shape [num_tokens, embed_dim]
60
+ ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch.
61
+ embedding_weight (torch.Tensor): Embedding table of shape [vocab_size, embed_dim]
62
+ """
63
+ self.kernel(embedded_tokens, ragged_wrapper.input_ids(),
64
+ embedding_weight, position_embed_weight, position_embed_offset,
65
+ ragged_wrapper.batch_metadata_buffer(), ragged_wrapper.inflight_seq_descriptors(),
66
+ ragged_wrapper.tokens_to_seq(), ragged_wrapper.kv_ptrs())
67
+ return embedded_tokens
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "ds_kernel_utils.h"
7
+ #include "embed.cuh"
8
+ #include "memory_access_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ namespace embed {
12
+
13
+ constexpr int granularity = 16;
14
+ constexpr int threads = 512;
15
+
16
+ } // namespace embed
17
+
18
+ template <typename TokenType, typename EmbedType>
19
+ __global__ void ragged_embed_kernel(EmbedType* embedded_tokens,
20
+ const TokenType* input_ids,
21
+ const EmbedType* embedding_weight,
22
+ const EmbedType* position_weight,
23
+ const BatchWrapperCPP batch_desc,
24
+ const int32_t embed_dim,
25
+ const int32_t vocab_size,
26
+ const int32_t max_position_embed_idx,
27
+ const int32_t position_embed_offset)
28
+ {
29
+ constexpr int T_vector = embed::granularity / sizeof(EmbedType);
30
+
31
+ const int32_t token_idx = blockIdx.y;
32
+
33
+ // It's possible our batch is padded (under CG conditions typically)
34
+ if (token_idx >= batch_desc.batch_metadata->n_tokens) return;
35
+
36
+ TokenType token_value = input_ids[token_idx];
37
+
38
+ if (token_value >= vocab_size || token_value < 0) {
39
+ // TODO(cmikeh2): This is invalid, but not sure how we want to handle it being invalid
40
+ // yet.
41
+ return;
42
+ }
43
+
44
+ const EmbedType* embedding_row = embedding_weight + token_value * embed_dim;
45
+ EmbedType* dest_row = embedded_tokens + token_idx * embed_dim;
46
+
47
+ const int channel_offset = (threadIdx.x + embed::threads * blockIdx.x) * T_vector;
48
+
49
+ if (channel_offset < embed_dim) {
50
+ EmbedType reg_buf[T_vector];
51
+
52
+ mem_access::load_global<embed::granularity>(reg_buf, embedding_row + channel_offset);
53
+
54
+ if (position_weight != nullptr) {
55
+ // Map the token to its global idx (indirect memory accesses aren't great but whatever)
56
+ const int32_t seq_idx = batch_desc.tokens_to_seq[token_idx];
57
+ const InflightSeqDescriptor seq_desc = batch_desc.seq_metadata[seq_idx];
58
+ int32_t pos_emb_idx = seq_desc.seen_tokens + (token_idx - seq_desc.start_idx);
59
+
60
+ // Position embed offset is an OPT-specific feature I think?
61
+ pos_emb_idx = pos_emb_idx + position_embed_offset;
62
+
63
+ // This clamping is technically
64
+ pos_emb_idx = (pos_emb_idx < 0) ? 0 : pos_emb_idx;
65
+ pos_emb_idx = (pos_emb_idx >= max_position_embed_idx) ? max_position_embed_idx
66
+ : pos_emb_idx;
67
+
68
+ const EmbedType* position_embedding_row = position_weight + pos_emb_idx * embed_dim;
69
+
70
+ EmbedType pos_buf[T_vector];
71
+ mem_access::load_global<embed::granularity>(pos_buf,
72
+ position_embedding_row + channel_offset);
73
+
74
+ #pragma unroll
75
+ for (int i = 0; i < T_vector; i++) { reg_buf[i] += pos_buf[i]; }
76
+ }
77
+
78
+ mem_access::store_global<embed::granularity>(dest_row + channel_offset, reg_buf);
79
+ }
80
+ }
81
+
82
+ template <typename TokenType, typename EmbedType>
83
+ void launch_ragged_embed_kernel(EmbedType* embedded_tokens,
84
+ const TokenType* input_ids,
85
+ const EmbedType* embedding_weight,
86
+ const EmbedType* position_weight,
87
+ const BatchWrapperCPP batch_desc,
88
+ const int32_t n_tokens,
89
+ const int32_t embed_dim,
90
+ const int32_t vocab_size,
91
+ const int32_t max_position_embed_idx,
92
+ const int32_t position_embed_offset,
93
+ cudaStream_t stream)
94
+ {
95
+ constexpr int T_vector = embed::granularity / sizeof(EmbedType);
96
+ constexpr int elems_per_block = embed::threads * T_vector;
97
+ const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block;
98
+
99
+ const dim3 grid_dim(parallel_blocks, n_tokens, 1);
100
+ const dim3 block_dim(embed::threads, 1, 1);
101
+
102
+ ragged_embed_kernel<TokenType, EmbedType>
103
+ <<<grid_dim, block_dim, 0, stream>>>(embedded_tokens,
104
+ input_ids,
105
+ embedding_weight,
106
+ position_weight,
107
+ batch_desc,
108
+ embed_dim,
109
+ vocab_size,
110
+ max_position_embed_idx,
111
+ position_embed_offset);
112
+ }
113
+
114
+ #define INSTANTIATE_EMBED_FOR_TYPES(TOKEN_TYPE, EMBED_TYPE) \
115
+ template void launch_ragged_embed_kernel<TOKEN_TYPE, EMBED_TYPE>( \
116
+ EMBED_TYPE * embedded_tokens, \
117
+ const TOKEN_TYPE* input_ids, \
118
+ const EMBED_TYPE* embedding_weight, \
119
+ const EMBED_TYPE* position_weight, \
120
+ const BatchWrapperCPP batch_descriptor, \
121
+ const int32_t n_tokens, \
122
+ const int32_t embed_dim, \
123
+ const int32_t vocab_size, \
124
+ const int32_t max_position_embed_idx, \
125
+ const int32_t position_embed_offset, \
126
+ cudaStream_t stream);
127
+
128
+ INSTANTIATE_EMBED_FOR_TYPES(int32_t, float)
129
+ INSTANTIATE_EMBED_FOR_TYPES(int64_t, float)
130
+
131
+ INSTANTIATE_EMBED_FOR_TYPES(int32_t, __half)
132
+ INSTANTIATE_EMBED_FOR_TYPES(int64_t, __half)
133
+
134
+ #ifdef BF16_AVAILABLE
135
+ INSTANTIATE_EMBED_FOR_TYPES(int32_t, __nv_bfloat16)
136
+ INSTANTIATE_EMBED_FOR_TYPES(int64_t, __nv_bfloat16)
137
+ #endif
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #define TOP_K_SWITCH(N_TOP_K, ...) \
7
+ [&] { \
8
+ if (1 == N_TOP_K) { \
9
+ constexpr int CONST_TOP_K = 1; \
10
+ __VA_ARGS__(); \
11
+ } else if (2 == N_TOP_K) { \
12
+ constexpr int CONST_TOP_K = 2; \
13
+ __VA_ARGS__(); \
14
+ } else if (4 == N_TOP_K) { \
15
+ constexpr int CONST_TOP_K = 4; \
16
+ __VA_ARGS__(); \
17
+ } else if (8 == N_TOP_K) { \
18
+ constexpr int CONST_TOP_K = 8; \
19
+ __VA_ARGS__(); \
20
+ } \
21
+ }()
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .blocked_kv_rotary import *
7
+ from .blocked_trained_kv_rotary import *
8
+ from .linear_blocked_kv_copy import *
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "blocked_kv_rotary.cuh"
11
+
12
+ /*
13
+ Rotary position embeddings + copy into KV cache. This implementation assumes
14
+ that the inverse frequencies should be ready from global memory rather than
15
+ synthesized in the kernel.
16
+
17
+ Arguments:
18
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
19
+ q: [n_tokens, n_q_heads * head_size]
20
+ k: [n_tokens, n_kv_heads * head_size]
21
+ v: [n_tokens, n_kv_heads * head_size]
22
+ inv_freq: [max_seq_len, head_size // 2]
23
+ */
24
+ void kv_trained_rotary_embeddings(torch::Tensor& kv_cache,
25
+ torch::Tensor& q,
26
+ torch::Tensor& k,
27
+ torch::Tensor& v,
28
+ torch::Tensor& inv_freq,
29
+ torch::Tensor& batch_metadata,
30
+ torch::Tensor& seq_metadata,
31
+ torch::Tensor& tokens_to_seq,
32
+ torch::Tensor& kv_ptrs);
33
+
34
+ /*
35
+ Rotary position embeddings + copy into KV cache. This implementation assumes
36
+ that the inverse frequencies should be synthesized in the kernel.
37
+
38
+ Arguments:
39
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
40
+ q: [n_tokens, n_q_heads * head_size]
41
+ k: [n_tokens, n_kv_heads * head_size]
42
+ v: [n_tokens, n_kv_heads * head_size]
43
+ */
44
+ void kv_rotary_embeddings(torch::Tensor& kv_cache,
45
+ torch::Tensor& q,
46
+ torch::Tensor& k,
47
+ torch::Tensor& v,
48
+ const int32_t rotary_dim,
49
+ const float theta_base,
50
+ torch::Tensor& batch_metadata,
51
+ torch::Tensor& seq_metadata,
52
+ torch::Tensor& tokens_to_seq,
53
+ torch::Tensor& kv_ptrs);
54
+
55
+ /*
56
+ Copy into linear KV cache.
57
+ */
58
+ void linear_kv_copy(torch::Tensor& kv_cache,
59
+ torch::Tensor& q,
60
+ torch::Tensor& k,
61
+ torch::Tensor& v,
62
+ torch::Tensor& batch_metadata,
63
+ torch::Tensor& seq_metadata,
64
+ torch::Tensor& tokens_to_seq,
65
+ torch::Tensor& kv_ptrs);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ....inference_utils import DtypeEnum
9
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
10
+ from ....ragged import RaggedBatchWrapper
11
+ from ... import DSKernelBase
12
+
13
+
14
+ class BlockedRotaryEmbeddings(DSKernelBase):
15
+ """
16
+ CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
17
+ before copying into a blocked KV cache.
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
21
+ supported_head_sizes = [64, 80, 96, 128]
22
+ supported_q_ratios = [1, 2, 4, 5, 6, 7, 8, 16, 29, 35, 36, 71]
23
+
24
+ def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype, rotary_dim: int,
25
+ theta_base: float) -> None:
26
+ """
27
+ Args:
28
+ head_size: The size of the attention head.
29
+ q_ratio: Ratio of q heads to kv heads (for GQA)
30
+ dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
31
+ """
32
+
33
+ q_ratio = n_q_heads // n_kv_heads
34
+
35
+ if head_size not in BlockedRotaryEmbeddings.supported_head_sizes:
36
+ raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
37
+ head_size, BlockedRotaryEmbeddings.supported_head_sizes))
38
+
39
+ if q_ratio not in BlockedRotaryEmbeddings.supported_q_ratios:
40
+ raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
41
+ q_ratio, BlockedRotaryEmbeddings.supported_q_ratios))
42
+
43
+ if not isinstance(dtype, DtypeEnum):
44
+ dtype = DtypeEnum(dtype)
45
+
46
+ if dtype not in BlockedRotaryEmbeddings.supported_dtypes:
47
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
48
+ dtype, BlockedRotaryEmbeddings.supported_dtypes))
49
+
50
+ inf_module = RaggedOpsBuilder().load()
51
+ self.kernel = inf_module.kv_rotary_embeddings
52
+ self.head_size = head_size
53
+ self.n_q_heads = n_q_heads
54
+ self.n_kv_heads = n_kv_heads
55
+ self.rotary_dim = rotary_dim
56
+ self.theta_base = theta_base
57
+
58
+ def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None:
59
+ """
60
+ Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
61
+
62
+ Args:
63
+ kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
64
+ qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
65
+ ragged_batch: Wrapper for the ragged batch.
66
+ """
67
+
68
+ q = qkv[:, :self.head_size * self.n_q_heads]
69
+ k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
70
+ v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
71
+
72
+ self.kernel(kv_cache, q, k, v, self.rotary_dim, self.theta_base, ragged_batch.batch_metadata_buffer(),
73
+ ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Copyright (c) Microsoft Corporation.
7
+ # SPDX-License-Identifier: Apache-2.0
8
+
9
+ # DeepSpeed Team
10
+
11
+ import torch
12
+
13
+ from ....inference_utils import DtypeEnum
14
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
15
+ from ....ragged import RaggedBatchWrapper
16
+ from ... import DSKernelBase
17
+
18
+
19
+ class BlockedTrainedRotaryEmbeddings(DSKernelBase):
20
+ """
21
+ CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
22
+ before copying into a blocked KV cache.
23
+ """
24
+
25
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
26
+ supported_head_sizes = [64, 80, 96, 128]
27
+ supported_q_ratios = [1, 2, 4, 5, 8]
28
+
29
+ def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None:
30
+ """
31
+ Args:
32
+ head_size: The size of the attention head.
33
+ dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
34
+ """
35
+
36
+ q_ratio = n_q_heads // n_kv_heads
37
+
38
+ if head_size not in BlockedTrainedRotaryEmbeddings.supported_head_sizes:
39
+ raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
40
+ head_size, BlockedTrainedRotaryEmbeddings.supported_head_sizes))
41
+
42
+ if q_ratio not in BlockedTrainedRotaryEmbeddings.supported_q_ratios:
43
+ raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
44
+ q_ratio, BlockedTrainedRotaryEmbeddings.supported_q_ratios))
45
+
46
+ if not isinstance(dtype, DtypeEnum):
47
+ dtype = DtypeEnum(dtype)
48
+
49
+ if dtype not in BlockedTrainedRotaryEmbeddings.supported_dtypes:
50
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
51
+ dtype, BlockedTrainedRotaryEmbeddings.supported_dtypes))
52
+
53
+ inf_module = RaggedOpsBuilder().load()
54
+ self.kernel = inf_module.kv_trained_rotary_embeddings
55
+ self.head_size = head_size
56
+ self.n_q_heads = n_q_heads
57
+ self.n_kv_heads = n_kv_heads
58
+
59
+ def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper,
60
+ inverse_freqs: torch.Tensor) -> None:
61
+ """
62
+ Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
63
+
64
+ Args:
65
+ kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
66
+ qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
67
+ ragged_batch: Wrapper for the ragged batch.
68
+ inverse_freqs: Inverse frequencies for the rotary embeddings. Shape [max_seq_len, rotary_dim // 2]
69
+ """
70
+
71
+ q = qkv[:, :self.head_size * self.n_q_heads]
72
+ k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
73
+ v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
74
+
75
+ self.kernel(kv_cache, q, k, v, inverse_freqs, ragged_batch.batch_metadata_buffer(),
76
+ ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .logits_gather import *
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (239 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "logits_gather.h"
7
+
8
+ #define DISPATCH_TO_LOGITS_GATHER(T_TYPE, C_TYPE) \
9
+ if (all_acts.options().dtype() == torch::T_TYPE) { \
10
+ launch_logits_gather((C_TYPE*)final_token_acts.data_ptr(), \
11
+ (const C_TYPE*)all_acts.data_ptr(), \
12
+ batch_metadata_raw, \
13
+ seq_metadata_raw, \
14
+ n_seqs, \
15
+ embed_dim, \
16
+ at::cuda::getCurrentCUDAStream()); \
17
+ }
18
+
19
+ /*
20
+ Logits gather will parse the ragged batch data structure and gather only the logits that
21
+ will be used for token sampling.
22
+ */
23
+ void gather_for_logits(torch::Tensor& final_token_acts,
24
+ torch::Tensor& all_acts,
25
+ torch::Tensor& batch_metadata,
26
+ torch::Tensor& seq_metadata)
27
+ {
28
+ const RaggedBatchDescriptor* batch_metadata_raw =
29
+ reinterpret_cast<const RaggedBatchDescriptor*>(batch_metadata.data_ptr());
30
+
31
+ const InflightSeqDescriptor* seq_metadata_raw =
32
+ reinterpret_cast<const InflightSeqDescriptor*>(seq_metadata.data_ptr());
33
+
34
+ const int n_seqs = final_token_acts.size(0);
35
+ const int embed_dim = final_token_acts.size(1);
36
+
37
+ TORCH_CHECK(all_acts.scalar_type() == final_token_acts.scalar_type(),
38
+ "all_acts and final_token_acts must have the same scalar type");
39
+
40
+ DISPATCH_TO_LOGITS_GATHER(kFloat, float)
41
+ DISPATCH_TO_LOGITS_GATHER(kHalf, half)
42
+ #ifdef BF16_AVAILABLE
43
+ DISPATCH_TO_LOGITS_GATHER(kBFloat16, __nv_bfloat16)
44
+ #endif
45
+ }
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ #ifdef BF16_AVAILABLE
12
+ #include <cuda_bf16.h>
13
+ #endif
14
+
15
+ template <typename T>
16
+ void launch_logits_gather(T* final_token_acts,
17
+ const T* all_acts,
18
+ const RaggedBatchDescriptor* batch_metadata,
19
+ const InflightSeqDescriptor* seq_metadata,
20
+ const int32_t n_seqs,
21
+ const int32_t embed_dim,
22
+ cudaStream_t stream);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "logits_gather.cuh"
11
+ #include "ragged_dtypes.h"
12
+
13
+ /*
14
+ Logits gather will parse the ragged batch data structure and gather only the logits that
15
+ will be used for token sampling.
16
+ */
17
+ void gather_for_logits(torch::Tensor& final_token_acts,
18
+ torch::Tensor& all_acts,
19
+ torch::Tensor& batch_metadata,
20
+ torch::Tensor& seq_metadata);
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
10
+ from ....inference_utils import elem_size
11
+ from ....ragged import RaggedBatchWrapper
12
+
13
+
14
+ class RaggedLogitsGather(DSKernelBase):
15
+ """
16
+ CUDA Kernel implementation for gather the hidden states of the final token
17
+ of each sequence. This is used to reduce the cost of the performing the unembedding.
18
+ """
19
+
20
+ supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
21
+
22
+ def __init__(self, model_dim: int, fp_dtype: torch.dtype):
23
+ """
24
+ Parameters:
25
+ fp_dtype (torch.dtype): Data type for the input/output. Supported values
26
+ are torch.float16, torch.bfloat16, and torch.float32.
27
+ """
28
+ if fp_dtype not in RaggedLogitsGather.supported_dtypes:
29
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
30
+ fp_dtype, RaggedLogitsGather.supported_dtypes))
31
+
32
+ if elem_size(fp_dtype) * model_dim % 16 != 0:
33
+ raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(model_dim))
34
+
35
+ inf_module = RaggedOpsBuilder().load()
36
+ self.kernel = inf_module.gather_for_logits
37
+
38
+ def __call__(self, final_token_activations: torch.Tensor, all_activations: torch.Tensor,
39
+ ragged_wrapper: RaggedBatchWrapper) -> torch.Tensor:
40
+ """
41
+ Gather the hidden states of the final token of each sequence from `all_activations` into
42
+ `final_token_activations`.
43
+
44
+ Args:
45
+ final_token_activations (torch.Tensor): Output tensor of shape [num_seqs, model_dim]
46
+ all_activations (torch.Tensor): Input tensor of shape [num_tokens, model_dim]
47
+ ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch.
48
+ """
49
+
50
+ self.kernel(final_token_activations, all_activations, ragged_wrapper.batch_metadata_buffer(),
51
+ ragged_wrapper.inflight_seq_descriptors())
52
+ return final_token_activations
vlmpy310/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "ds_kernel_utils.h"
7
+ #include "logits_gather.cuh"
8
+ #include "memory_access_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ namespace logits_gather {
12
+
13
+ constexpr int granularity = 16;
14
+ constexpr int threads = 512;
15
+
16
+ } // namespace logits_gather
17
+
18
+ template <typename T>
19
+ __global__ void logits_gather_kernel(T* final_token_acts,
20
+ const T* token_acts,
21
+ const RaggedBatchDescriptor* ragged_batch,
22
+ const InflightSeqDescriptor* inflight_batch,
23
+ const int32_t embed_dim)
24
+ {
25
+ constexpr int T_vector = logits_gather::granularity / sizeof(T);
26
+
27
+ const int32_t seq_id = blockIdx.y;
28
+
29
+ // It's possible we've padded the output Tensor (under CG conditions)
30
+ if (seq_id >= ragged_batch->n_sequences) return;
31
+
32
+ const InflightSeqDescriptor seq = inflight_batch[seq_id];
33
+ const int final_token_idx = seq.start_idx + seq.n_tokens - 1;
34
+
35
+ const int token_offset = final_token_idx * embed_dim;
36
+ const int thread_offset =
37
+ threadIdx.x * T_vector + blockIdx.x * logits_gather::threads * T_vector;
38
+
39
+ const int final_token_offset = seq_id * embed_dim;
40
+
41
+ T reg_buf[T_vector];
42
+
43
+ if (thread_offset < embed_dim) {
44
+ mem_access::load_global<logits_gather::granularity>(
45
+ reg_buf, token_acts + token_offset + thread_offset);
46
+
47
+ mem_access::store_global<logits_gather::granularity>(
48
+ final_token_acts + final_token_offset + thread_offset, reg_buf);
49
+ }
50
+ }
51
+
52
+ template <typename T>
53
+ void launch_logits_gather(T* final_token_acts,
54
+ const T* all_acts,
55
+ const RaggedBatchDescriptor* ragged_batch,
56
+ const InflightSeqDescriptor* inflight_batch,
57
+ const int32_t n_seqs,
58
+ const int32_t embed_dim,
59
+ cudaStream_t stream)
60
+ {
61
+ constexpr int T_vector = logits_gather::granularity / sizeof(T);
62
+ constexpr int elems_per_block = logits_gather::threads * T_vector;
63
+ const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block;
64
+
65
+ const dim3 grid(parallel_blocks, n_seqs, 1);
66
+ const dim3 block(logits_gather::threads, 1, 1);
67
+
68
+ logits_gather_kernel<T><<<grid, block, 0, stream>>>(
69
+ final_token_acts, all_acts, ragged_batch, inflight_batch, embed_dim);
70
+ }
71
+
72
+ #define INSTANTIATE_FOR_TYPE(T) \
73
+ template void launch_logits_gather<T>(T * final_token_acts, \
74
+ const T* all_acts, \
75
+ const RaggedBatchDescriptor* ragged_batch, \
76
+ const InflightSeqDescriptor* inflight_batch, \
77
+ const int32_t n_seqs, \
78
+ const int32_t embed_dim, \
79
+ cudaStream_t stream);
80
+
81
+ INSTANTIATE_FOR_TYPE(float)
82
+ INSTANTIATE_FOR_TYPE(__half)
83
+
84
+ #ifdef BF16_AVAILABLE
85
+ INSTANTIATE_FOR_TYPE(__nv_bfloat16)
86
+ #endif