cyd0806 commited on
Commit
97fbeab
·
verified ·
1 Parent(s): 282d392

Upload apex-master/csrc/multi_tensor_l2norm_scale_kernel.cu with huggingface_hub

Browse files
apex-master/csrc/multi_tensor_l2norm_scale_kernel.cu ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ #include <c10/cuda/CUDAGuard.h>
6
+ // Another possibility:
7
+ // #include <torch/all.h>
8
+
9
+ #include <assert.h>
10
+
11
+ #include "type_shim.h"
12
+ #include "multi_tensor_apply.cuh"
13
+
14
+ #define BLOCK_SIZE 512
15
+ #define ILP 4
16
+
17
+ template<typename T>
18
+ __device__ __forceinline__ bool is_aligned(T* p){
19
+ return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
20
+ }
21
+
22
+ template<typename T>
23
+ __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
24
+ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
25
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
26
+ }
27
+
28
+ template<typename in_t, typename out_t>
29
+ struct L2NormScaleFunctor
30
+ {
31
+ __device__ __forceinline__ void operator()(
32
+ int chunk_size,
33
+ volatile int* noop_gmem,
34
+ TensorListMetadata<2>& tl,
35
+ float* output,
36
+ float* output_per_tensor,
37
+ float scale,
38
+ bool per_tensor,
39
+ int max_chunks_per_tensor)
40
+ {
41
+ // I'd like this kernel to propagate infs/nans.
42
+ // if(*noop_gmem == 1)
43
+ // return;
44
+
45
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
46
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
47
+ int n = tl.sizes[tensor_loc];
48
+
49
+ in_t* in = (in_t*)tl.addresses[0][tensor_loc];
50
+ in += chunk_idx*chunk_size;
51
+
52
+ out_t* out = (out_t*)tl.addresses[1][tensor_loc];
53
+ out += chunk_idx*chunk_size;
54
+
55
+ n -= chunk_idx*chunk_size;
56
+
57
+ __shared__ float s_vals[512];
58
+
59
+ float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
60
+ in_t r_in[ILP];
61
+ for(int i = 0; i < ILP; i++)
62
+ {
63
+ vals[i] = 0.f;
64
+ r_in[i] = 0;
65
+ }
66
+ //bool finite = true;
67
+ out_t r_out[ILP];
68
+
69
+ // to make things simple, we put aligned case in a different code path
70
+ if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(in) && is_aligned(out))
71
+ {
72
+ for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
73
+ {
74
+ // load
75
+ load_store(r_in, in, 0 , i_start);
76
+ #pragma unroll
77
+ for(int ii = 0; ii < ILP; ii++)
78
+ {
79
+ float next = static_cast<float>(r_in[ii]);
80
+ r_out[ii] = next*scale;
81
+ vals[ii] += next*next;
82
+ //finite = finite && isfinite(r_in[ii]);
83
+ }
84
+ load_store(out, r_out, i_start, 0);
85
+ }
86
+ }
87
+ else
88
+ {
89
+ for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
90
+ {
91
+ #pragma unroll
92
+ for(int ii = 0; ii < ILP; ii++)
93
+ {
94
+ r_in[ii] = 0;
95
+ int i = i_start + threadIdx.x + ii*blockDim.x;
96
+ if(i < n && i < chunk_size)
97
+ {
98
+ r_in[ii] = in[i];
99
+ float next = static_cast<float>(in[i]);
100
+ vals[ii] += next*next;
101
+ }
102
+ }
103
+ #pragma unroll
104
+ for(int ii = 0; ii < ILP; ii++)
105
+ {
106
+ r_out[ii] = static_cast<float>(r_in[ii]) * scale;
107
+ // finite = finite && isfinite(r_in[ii]);
108
+ }
109
+ #pragma unroll
110
+ for(int ii = 0; ii < ILP; ii++)
111
+ {
112
+ int i = i_start + threadIdx.x + ii*blockDim.x;
113
+ if(i < n && i < chunk_size)
114
+ out[i] = r_out[ii];
115
+ }
116
+ }
117
+ }
118
+
119
+ float val = 0.f;
120
+ for(int i = 0; i < ILP; i++)
121
+ val += vals[i];
122
+
123
+ float final = reduce_block_into_lanes(s_vals, val);
124
+
125
+ if(threadIdx.x == 0)
126
+ {
127
+ if(!isfinite(final))
128
+ *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
129
+ output[blockIdx.x] += final;
130
+ if(per_tensor)
131
+ output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
132
+ }
133
+ }
134
+ };
135
+ // Probably better to template, but since we are not likely to support other norm
136
+ template<typename x_t>
137
+ struct MaxNormFunctor
138
+ {
139
+ __device__ __forceinline__ void operator()(
140
+ int chunk_size,
141
+ volatile int* noop_gmem,
142
+ TensorListMetadata<1>& tl,
143
+ float* output,
144
+ float* output_per_tensor,
145
+ bool per_tensor,
146
+ int max_chunks_per_tensor)
147
+ {
148
+ // I'd like this kernel to propagate infs/nans.
149
+ // if(*noop_gmem == 1)
150
+ // return;
151
+
152
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
153
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
154
+ int n = tl.sizes[tensor_loc];
155
+
156
+ x_t* x = (x_t*)tl.addresses[0][tensor_loc];
157
+ x += chunk_idx*chunk_size;
158
+
159
+ n -= chunk_idx*chunk_size;
160
+
161
+ __shared__ float s_vals[512];
162
+
163
+ float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
164
+ x_t r_x[ILP];
165
+ for(int i = 0; i < ILP; i++)
166
+ {
167
+ vals[i] = 0.f;
168
+ r_x[i] = 0;
169
+ }
170
+
171
+ // to make things simple, we put aligned case in a different code path
172
+ if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x))
173
+ {
174
+ for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
175
+ {
176
+ // load
177
+ load_store(r_x, x, 0 , i_start);
178
+ #pragma unroll
179
+ for(int ii = 0; ii < ILP; ii++)
180
+ {
181
+ float next = static_cast<float>(r_x[ii]);
182
+ vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next));
183
+ }
184
+ }
185
+ }
186
+ else
187
+ {
188
+ for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
189
+ {
190
+ #pragma unroll
191
+ for(int ii = 0; ii < ILP; ii++)
192
+ {
193
+ int i = i_start + threadIdx.x + ii*blockDim.x;
194
+ if(i < n && i < chunk_size)
195
+ {
196
+ float next = static_cast<float>(x[i]);
197
+ vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next));
198
+ }
199
+ }
200
+ }
201
+ }
202
+
203
+ float val = 0.f;
204
+ for(int i = 0; i < ILP; i++)
205
+ val = fmaxf(fabsf(val), fabsf(vals[i]));
206
+
207
+ float final = reduce_block_into_lanes_max_op(s_vals, val);
208
+
209
+ if(threadIdx.x == 0)
210
+ {
211
+ if(!isfinite(final))
212
+ *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
213
+ output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final));
214
+ if(per_tensor)
215
+ output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
216
+ }
217
+ }
218
+ };
219
+
220
+ __global__ void cleanup_v3(
221
+ float* output,
222
+ float* output_per_tensor,
223
+ float* ret,
224
+ float* ret_per_tensor,
225
+ bool per_tensor,
226
+ int max_chunks_per_tensor)
227
+ {
228
+ __shared__ float vals[512];
229
+
230
+ if(blockIdx.x == 0)
231
+ {
232
+ float val = 0;
233
+ if(threadIdx.x < 320)
234
+ val = output[threadIdx.x];
235
+
236
+ float final = reduce_block_into_lanes(vals, val);
237
+
238
+ if(threadIdx.x == 0)
239
+ *ret = sqrt(final);
240
+ }
241
+
242
+ if(per_tensor)
243
+ {
244
+ float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
245
+
246
+ float val = 0;
247
+ for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
248
+ val += output_this_tensor[i];
249
+
250
+ float final = reduce_block_into_lanes(vals, val);
251
+
252
+ if(threadIdx.x == 0)
253
+ ret_per_tensor[blockIdx.x] = sqrt(final);
254
+ }
255
+ }
256
+
257
+
258
+ std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_scale_cuda(
259
+ int chunk_size,
260
+ at::Tensor noop_flag,
261
+ std::vector<std::vector<at::Tensor>> tensor_lists,
262
+ float scale,
263
+ at::optional<bool> per_tensor_python)
264
+ {
265
+ bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
266
+
267
+ auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
268
+ auto output = at::zeros({320}, float_options);
269
+
270
+ at::Tensor output_per_tensor;
271
+ at::Tensor ret_per_tensor;
272
+
273
+ int ntensors = tensor_lists[0].size();
274
+ int max_chunks_per_tensor = -1;
275
+
276
+ if(per_tensor)
277
+ {
278
+ for(int t = 0; t < ntensors; t++)
279
+ {
280
+ int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
281
+ if(max_chunks_this_tensor > max_chunks_per_tensor)
282
+ max_chunks_per_tensor = max_chunks_this_tensor;
283
+ }
284
+ output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
285
+ ret_per_tensor = at::empty({ntensors}, float_options);
286
+ }
287
+ else
288
+ {
289
+ ret_per_tensor = at::empty({0}, float_options);
290
+ }
291
+
292
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_scale_cuda",
293
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_l2norm_scale_cuda",
294
+ multi_tensor_apply<2>(
295
+ BLOCK_SIZE,
296
+ chunk_size,
297
+ noop_flag,
298
+ tensor_lists,
299
+ L2NormScaleFunctor<scalar_t_0, scalar_t_1>(),
300
+ output.DATA_PTR<float>(),
301
+ per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
302
+ scale,
303
+ per_tensor,
304
+ max_chunks_per_tensor);))
305
+
306
+ AT_CUDA_CHECK(cudaGetLastError());
307
+ // AT_CUDA_CHECK(cudaDeviceSynchronize());
308
+
309
+ // This involves one more small kernel launches, but will be negligible end to end.
310
+ // I could get rid of these by hacking the functor + multi tensor harness with persistence
311
+ // logic, but keeping it simple for now
312
+ auto ret = at::empty({1}, output.options());
313
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(output));
314
+ auto stream = at::cuda::getCurrentCUDAStream();
315
+ cleanup_v3<<<per_tensor ? ntensors : 1, 512, 0, stream>>>(
316
+ output.DATA_PTR<float>(),
317
+ per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
318
+ ret.DATA_PTR<float>(),
319
+ per_tensor ? ret_per_tensor.DATA_PTR<float>() : nullptr,
320
+ per_tensor,
321
+ max_chunks_per_tensor);
322
+
323
+ return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
324
+ }
325
+
326
+