cyd0806 commited on
Commit
6732c7c
·
verified ·
1 Parent(s): fbc9e95

Upload apex-master/csrc/multi_tensor_lamb_mp.cu with huggingface_hub

Browse files
apex-master/csrc/multi_tensor_lamb_mp.cu ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ // Another possibility:
6
+ // #include <torch/all.h>
7
+
8
+ #include <assert.h>
9
+
10
+ #include "type_shim.h"
11
+ #include "multi_tensor_apply.cuh"
12
+
13
+ #define BLOCK_SIZE 512
14
+ #define ILP 4
15
+
16
+ template<typename T>
17
+ __device__ __forceinline__ bool is_aligned(T* p){
18
+ return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
19
+ }
20
+
21
+ template<typename T>
22
+ __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
23
+ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
24
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
25
+ }
26
+
27
+ typedef enum{
28
+ MOMENT_MODE_0 =0, // L2 regularization mode
29
+ MOMENT_MODE_1 =1 // Decoupled weight decay mode
30
+ } adamMode_t;
31
+
32
+ std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_mp_cuda(
33
+ int chunk_size,
34
+ at::Tensor noop_flag,
35
+ std::vector<std::vector<at::Tensor>> tensor_lists,
36
+ at::optional<bool> per_tensor_python);
37
+
38
+ using MATH_T = float;
39
+
40
+ template<typename T, typename param_t>
41
+ struct LAMBStage1Functor
42
+ {
43
+ __device__ __forceinline__ void operator()(
44
+ int chunk_size,
45
+ volatile int* noop_gmem,
46
+ TensorListMetadata<4>& tl,
47
+ const float beta1,
48
+ const float beta2,
49
+ const float beta3,
50
+ const int* step_ptr,
51
+ const int bias_correction,
52
+ const float epsilon,
53
+ adamMode_t mode,
54
+ const float decay,
55
+ const float* global_grad_norm,
56
+ const float* max_global_grad_norm,
57
+ const float* found_inf,
58
+ const float* inv_scale)
59
+ {
60
+ if (*noop_gmem) {
61
+ return;
62
+ }
63
+
64
+ float beta1_correction = 1.0f;
65
+ float beta2_correction = 1.0f;
66
+ if (bias_correction == 1) {
67
+ int step = *step_ptr;
68
+ beta1_correction = 1 - std::pow(beta1, step);
69
+ beta2_correction = 1 - std::pow(beta2, step);
70
+ }
71
+
72
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
73
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
74
+ int n = tl.sizes[tensor_loc];
75
+
76
+ float clipped_global_grad_norm = (*global_grad_norm) > (*max_global_grad_norm) ? (*global_grad_norm) / (*max_global_grad_norm) : 1.0f;
77
+
78
+ T* g = (T*)tl.addresses[0][tensor_loc];
79
+ g += chunk_idx*chunk_size;
80
+
81
+ param_t* p = (param_t*)tl.addresses[1][tensor_loc];
82
+ p += chunk_idx*chunk_size;
83
+
84
+ param_t* m = (param_t*)tl.addresses[2][tensor_loc];
85
+ m += chunk_idx*chunk_size;
86
+
87
+ param_t* v = (param_t*)tl.addresses[3][tensor_loc];
88
+ v += chunk_idx*chunk_size;
89
+
90
+ n -= chunk_idx*chunk_size;
91
+
92
+ MATH_T r_g[ILP];
93
+ MATH_T r_p[ILP];
94
+ MATH_T r_m[ILP];
95
+ MATH_T r_v[ILP];
96
+ // to make things simple, we put aligned case in a different code path
97
+ if(n % ILP == 0 &&
98
+ chunk_size % ILP == 0 &&
99
+ is_aligned(g) &&
100
+ is_aligned(p) &&
101
+ is_aligned(m) &&
102
+ is_aligned(v))
103
+ {
104
+ T l_g[ILP];
105
+ param_t l_p[ILP];
106
+ param_t l_m[ILP];
107
+ param_t l_v[ILP];
108
+ for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
109
+ {
110
+ // load
111
+ load_store(l_g, g, 0, i_start);
112
+ if (decay != 0)
113
+ load_store(l_p, p, 0, i_start);
114
+ load_store(l_m, m, 0, i_start);
115
+ load_store(l_v, v, 0, i_start);
116
+ // unpack
117
+ #pragma unroll
118
+ for(int ii = 0; ii < ILP; ii++)
119
+ {
120
+ r_g[ii] = l_g[ii] * (*inv_scale);
121
+ if (decay == 0) {
122
+ r_p[ii] = MATH_T(0);
123
+ }
124
+ else {
125
+ r_p[ii] = l_p[ii];
126
+ }
127
+ r_m[ii] = l_m[ii];
128
+ r_v[ii] = l_v[ii];
129
+ }
130
+ #pragma unroll
131
+ for(int ii = 0; ii < ILP; ii++)
132
+ {
133
+ if (mode == MOMENT_MODE_0) {
134
+ MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm;
135
+ // L2 on scaled grad
136
+ scaled_grad = scaled_grad + decay*r_p[ii];
137
+ r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
138
+ r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
139
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
140
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
141
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
142
+ r_p[ii] = next_m_unbiased / denom;
143
+ }
144
+ else {
145
+ MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm;
146
+ r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
147
+ r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
148
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
149
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
150
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
151
+ r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
152
+ }
153
+ }
154
+ #pragma unroll
155
+ for(int ii = 0; ii < ILP; ii++)
156
+ {
157
+ l_p[ii] = r_p[ii];
158
+ // Difference from APEX's LAMB kernel. `g` and `p` can be different dtypes.
159
+ l_g[ii] = r_p[ii];
160
+ l_m[ii] = r_m[ii];
161
+ l_v[ii] = r_v[ii];
162
+ }
163
+ // store
164
+ load_store(g, l_g, i_start, 0);
165
+ load_store(m, l_m, i_start, 0);
166
+ load_store(v, l_v, i_start, 0);
167
+ }
168
+ }
169
+ else
170
+ {
171
+ // see note in multi_tensor_scale_kernel.cu
172
+ for(int i_start = 0;
173
+ i_start < n && i_start < chunk_size;
174
+ i_start += blockDim.x*ILP)
175
+ {
176
+ MATH_T r_g[ILP];
177
+ MATH_T r_p[ILP];
178
+ MATH_T r_m[ILP];
179
+ MATH_T r_v[ILP];
180
+ #pragma unroll
181
+ for(int ii = 0; ii < ILP; ii++)
182
+ {
183
+ int i = i_start + threadIdx.x + ii*blockDim.x;
184
+ if(i < n && i < chunk_size)
185
+ {
186
+ r_g[ii] = g[i] * (*inv_scale);
187
+ // special ?optimization? for lamb stage 1
188
+ if (decay == 0) {
189
+ r_p[ii] = MATH_T(0);
190
+ }
191
+ else {
192
+ r_p[ii] = p[i];
193
+ }
194
+ r_m[ii] = m[i];
195
+ r_v[ii] = v[i];
196
+ } else {
197
+ r_g[ii] = MATH_T(0);
198
+ r_p[ii] = MATH_T(0);
199
+ r_m[ii] = MATH_T(0);
200
+ r_v[ii] = MATH_T(0);
201
+ }
202
+ }
203
+ #pragma unroll
204
+ for(int ii = 0; ii < ILP; ii++)
205
+ {
206
+ if (mode == MOMENT_MODE_0) {
207
+ MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm;
208
+ // L2 on scaled grad
209
+ scaled_grad = scaled_grad + decay*r_p[ii];
210
+ r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
211
+ r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
212
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
213
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
214
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
215
+ r_p[ii] = next_m_unbiased / denom;
216
+ }
217
+ else {
218
+ MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm;
219
+ r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
220
+ r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
221
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
222
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
223
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
224
+ r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
225
+ }
226
+ }
227
+ #pragma unroll
228
+ for(int ii = 0; ii < ILP; ii++)
229
+ {
230
+ int i = i_start + threadIdx.x + ii*blockDim.x;
231
+ if(i < n && i < chunk_size)
232
+ {
233
+ g[i] = r_p[ii];
234
+ m[i] = r_m[ii];
235
+ v[i] = r_v[ii];
236
+ }
237
+ }
238
+ }
239
+ }
240
+ }
241
+ };
242
+
243
+ // Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
244
+ // It computes new parameter value.
245
+ // N == 2: FP32 params, no master params
246
+ // N == 3: FP16 params, FP32 master params.
247
+ template<typename T, int N, typename param_t>
248
+ struct LAMBStage2Functor
249
+ {
250
+ static_assert((N == 2 && std::is_same<T, param_t>::value) || (N == 3 && std::is_same<param_t, float>::value), "");
251
+ __device__ __forceinline__ void operator()(
252
+ int chunk_size,
253
+ volatile int* noop_gmem,
254
+ TensorListMetadata<N>& tl,
255
+ const float* per_tensor_param_norm,
256
+ const float* per_tensor_update_norm,
257
+ const float* learning_rate,
258
+ const float decay,
259
+ bool use_nvlamb)
260
+ {
261
+ if (*noop_gmem) {
262
+ return;
263
+ }
264
+
265
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
266
+ int tensor_num = tl.start_tensor_this_launch + tensor_loc;
267
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
268
+ int n = tl.sizes[tensor_loc];
269
+
270
+ MATH_T ratio = *learning_rate;
271
+ // nvlamb: apply adaptive learning rate to all parameters
272
+ // otherwise, only apply to those with non-zero weight decay
273
+ if (use_nvlamb || (decay != 0.0))
274
+ {
275
+ float param_norm = per_tensor_param_norm[tensor_num];
276
+ float update_norm = per_tensor_update_norm[tensor_num];
277
+ ratio = (update_norm != 0.0f && param_norm != 0.0f) ? *learning_rate * (param_norm / update_norm) : *learning_rate;
278
+ }
279
+
280
+ T* update = (T*)tl.addresses[0][tensor_loc];
281
+ update += chunk_idx*chunk_size;
282
+
283
+ param_t* p = (param_t*)tl.addresses[1][tensor_loc];
284
+ p += chunk_idx*chunk_size;
285
+
286
+ T* out_p;
287
+ if (N == 3) {
288
+ out_p = (T*)tl.addresses[2][tensor_loc];
289
+ out_p += chunk_idx*chunk_size;
290
+ }
291
+
292
+ n -= chunk_idx*chunk_size;
293
+
294
+ // to make things simple, we put aligned case in a different code path
295
+ bool can_use_aligned_path = n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(update);
296
+ if (N == 3) {
297
+ can_use_aligned_path = can_use_aligned_path && is_aligned(out_p);
298
+ }
299
+ if(can_use_aligned_path)
300
+ {
301
+ param_t r_p[ILP];
302
+ T r_update[ILP];
303
+ T r_out_p[ILP];
304
+ for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
305
+ {
306
+ // load
307
+ load_store(r_p, p, 0, i_start);
308
+ load_store(r_update, update, 0, i_start);
309
+ if (N == 3) {
310
+ load_store(r_out_p, out_p, 0, i_start);
311
+ }
312
+ #pragma unroll
313
+ for(int ii = 0; ii < ILP; ii++)
314
+ {
315
+ r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * static_cast<MATH_T>(r_update[ii]));
316
+ if (N == 3) {
317
+ r_out_p[ii] = r_p[ii];
318
+ }
319
+ }
320
+ load_store(p, r_p, i_start, 0);
321
+ if (N == 3) {
322
+ load_store(out_p, r_out_p, i_start, 0);
323
+ }
324
+ }
325
+ }
326
+ else
327
+ {
328
+ for(int i_start = 0;
329
+ i_start < n && i_start < chunk_size;
330
+ i_start += blockDim.x*ILP)
331
+ {
332
+ MATH_T r_p[ILP];
333
+ MATH_T r_update[ILP];
334
+ #pragma unroll
335
+ for(int ii = 0; ii < ILP; ii++)
336
+ {
337
+ int i = i_start + threadIdx.x + ii*blockDim.x;
338
+ if(i < n && i < chunk_size)
339
+ {
340
+ r_p[ii] = p[i];
341
+ r_update[ii] = update[i];
342
+ }
343
+ }
344
+ #pragma unroll
345
+ for(int ii = 0; ii < ILP; ii++)
346
+ {
347
+ r_p[ii] = r_p[ii] - (ratio * r_update[ii]);
348
+ }
349
+ #pragma unroll
350
+ for(int ii = 0; ii < ILP; ii++)
351
+ {
352
+ int i = i_start + threadIdx.x + ii*blockDim.x;
353
+ if(i < n && i < chunk_size)
354
+ {
355
+ p[i] = r_p[ii];
356
+ if (N == 3) {
357
+ out_p[i] = r_p[ii];
358
+ }
359
+ }
360
+ }
361
+ }
362
+ }
363
+ }
364
+ };
365
+
366
+
367
+ void multi_tensor_lamb_mp_cuda(
368
+ int chunk_size,
369
+ at::Tensor noop_flag,
370
+ std::vector<std::vector<at::Tensor>> tensor_lists,
371
+ at::Tensor lr,
372
+ const float beta1,
373
+ const float beta2,
374
+ const float epsilon,
375
+ at::Tensor step,
376
+ const int bias_correction,
377
+ const float weight_decay,
378
+ const int grad_averaging,
379
+ const int mode,
380
+ at::Tensor global_grad_norm,
381
+ at::Tensor max_grad_norm,
382
+ at::optional<bool> use_nvlamb_python,
383
+ at::Tensor found_inf,
384
+ at::Tensor inv_scale)
385
+ {
386
+ // n_tensors == 5: FP16 model params & FP32 master params
387
+ // n_tensors == 4: FP32 model params & NO FP32 master params
388
+ const auto n_tensors = tensor_lists.size();
389
+ assert(n_tensors == 4 || n_tensors == 5);
390
+ using namespace at;
391
+
392
+ bool use_nvlamb = use_nvlamb_python.has_value() ? use_nvlamb_python.value() : false;
393
+
394
+ // note(mkozuki): move bias handling below to functor
395
+ // Handle bias correction mode
396
+ // float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
397
+ // if (bias_correction == 1) {
398
+ // bias_correction1 = 1 - std::pow(beta1, step);
399
+ // bias_correction2 = 1 - std::pow(beta2, step);
400
+ // }
401
+
402
+ // Handle grad averaging mode
403
+ float beta3 = 1.0f;
404
+ if (grad_averaging == 1) beta3 = 1 - beta1;
405
+
406
+ std::vector<std::vector<at::Tensor>> stage1_tensor_lists(tensor_lists.begin(), tensor_lists.begin() + 4);
407
+ std::vector<std::vector<at::Tensor>> grad_list(tensor_lists.begin(), tensor_lists.begin()+1);
408
+ std::vector<std::vector<at::Tensor>> param_list(tensor_lists.begin()+1, tensor_lists.begin()+2);
409
+
410
+ // Compute per tensor param norm
411
+ auto param_norm_tuple = multi_tensor_l2norm_mp_cuda(chunk_size, noop_flag, param_list, true);
412
+
413
+ // We now in-place modify grad to store update before compute its norm
414
+ // Generally this is not a issue since people modify grad in step() method all the time
415
+ // We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code
416
+ if (n_tensors == 4) {
417
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1",
418
+ multi_tensor_apply<4>(
419
+ BLOCK_SIZE,
420
+ chunk_size,
421
+ noop_flag,
422
+ stage1_tensor_lists,
423
+ LAMBStage1Functor<scalar_t_0, scalar_t_0>(),
424
+ beta1,
425
+ beta2,
426
+ beta3, // 1-beta1 or 1 depends on averaging mode
427
+ // bias_correction1,
428
+ // bias_correction2,
429
+ step.data_ptr<int>(),
430
+ bias_correction,
431
+ epsilon,
432
+ (adamMode_t) mode,
433
+ weight_decay,
434
+ global_grad_norm.data_ptr<float>(),
435
+ max_grad_norm.data_ptr<float>(),
436
+ found_inf.data_ptr<float>(),
437
+ inv_scale.data_ptr<float>()); )
438
+ } else {
439
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1",
440
+ multi_tensor_apply<4>(
441
+ BLOCK_SIZE,
442
+ chunk_size,
443
+ noop_flag,
444
+ stage1_tensor_lists,
445
+ LAMBStage1Functor<scalar_t_0, float>(),
446
+ beta1,
447
+ beta2,
448
+ beta3, // 1-beta1 or 1 depends on averaging mode
449
+ // bias_correction1,
450
+ // bias_correction2,
451
+ step.data_ptr<int>(),
452
+ bias_correction,
453
+ epsilon,
454
+ (adamMode_t) mode,
455
+ weight_decay,
456
+ global_grad_norm.data_ptr<float>(),
457
+ max_grad_norm.data_ptr<float>(),
458
+ found_inf.data_ptr<float>(),
459
+ inv_scale.data_ptr<float>()); )
460
+ }
461
+
462
+ // Compute update norms
463
+ auto update_norm_tuple = multi_tensor_l2norm_mp_cuda(chunk_size, noop_flag, grad_list, true);
464
+
465
+ std::vector<std::vector<at::Tensor>> grad_param_list(tensor_lists.begin(), tensor_lists.begin()+2);
466
+ if (n_tensors == 4) {
467
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2",
468
+ multi_tensor_apply<2>(
469
+ BLOCK_SIZE,
470
+ chunk_size,
471
+ noop_flag,
472
+ grad_param_list,
473
+ LAMBStage2Functor<scalar_t_0, 2, scalar_t_0>(),
474
+ std::get<1>(param_norm_tuple).data_ptr<float>(),
475
+ std::get<1>(update_norm_tuple).data_ptr<float>(),
476
+ lr.data_ptr<float>(),
477
+ weight_decay,
478
+ use_nvlamb); )
479
+ } else {
480
+ grad_param_list.push_back(tensor_lists[4]);
481
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2",
482
+ multi_tensor_apply<3>(
483
+ BLOCK_SIZE,
484
+ chunk_size,
485
+ noop_flag,
486
+ grad_param_list,
487
+ LAMBStage2Functor<scalar_t_0, 3, float>(),
488
+ std::get<1>(param_norm_tuple).data_ptr<float>(),
489
+ std::get<1>(update_norm_tuple).data_ptr<float>(),
490
+ lr.data_ptr<float>(),
491
+ weight_decay,
492
+ use_nvlamb); )
493
+ }
494
+ AT_CUDA_CHECK(cudaGetLastError());
495
+
496
+ }