cyd0806 commited on
Commit
14ae36f
·
verified ·
1 Parent(s): aad0d48

Upload apex-master/csrc/multi_tensor_adam.cu with huggingface_hub

Browse files
Files changed (1) hide show
  1. apex-master/csrc/multi_tensor_adam.cu +513 -0
apex-master/csrc/multi_tensor_adam.cu ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ // Another possibility:
6
+ // #include <torch/all.h>
7
+
8
+ #include <assert.h>
9
+
10
+ #include "type_shim.h"
11
+ #include "multi_tensor_apply.cuh"
12
+
13
+ #define BLOCK_SIZE 512
14
+ #define ILP 4
15
+
16
+ typedef enum{
17
+ ADAM_MODE_0 =0, // L2 regularization mode
18
+ ADAM_MODE_1 =1 // Decoupled weight decay mode(AdamW)
19
+ } adamMode_t;
20
+
21
+ using MATH_T = float;
22
+
23
+ template<typename T, typename FULL_T, typename index_t>
24
+ struct AdamFunctor
25
+ {
26
+ __device__ __forceinline__ void operator()(
27
+ index_t chunk_size,
28
+ volatile int* noop_gmem,
29
+ TensorListMetadata<4>& tl,
30
+ const float beta1,
31
+ const float beta2,
32
+ const float beta1_correction,
33
+ const float beta2_correction,
34
+ const float epsilon,
35
+ const float lr,
36
+ adamMode_t mode,
37
+ const float decay)
38
+ {
39
+ // I'd like this kernel to propagate infs/nans.
40
+ // if(*noop_gmem == 1)
41
+ // return;
42
+
43
+ index_t tensor_loc = tl.block_to_tensor[blockIdx.x];
44
+
45
+ // potentially use to pass in list of scalar
46
+ // int tensor_num = tl.start_tensor_this_launch + tensor_loc;
47
+
48
+ index_t chunk_idx = tl.block_to_chunk[blockIdx.x];
49
+ index_t n = tl.sizes[tensor_loc];
50
+
51
+ T* g = (T*)tl.addresses[0][tensor_loc];
52
+ g += chunk_idx*chunk_size;
53
+
54
+ T* p = (T*)tl.addresses[1][tensor_loc];
55
+ p += chunk_idx*chunk_size;
56
+
57
+ FULL_T* m = (FULL_T*)tl.addresses[2][tensor_loc];
58
+ m += chunk_idx*chunk_size;
59
+
60
+ FULL_T* v = (FULL_T*)tl.addresses[3][tensor_loc];
61
+ v += chunk_idx*chunk_size;
62
+
63
+ n -= chunk_idx*chunk_size;
64
+
65
+ // see note in multi_tensor_scale_kernel.cu
66
+ for(index_t i_start = 0;
67
+ i_start < n && i_start < chunk_size;
68
+ i_start += blockDim.x*ILP)
69
+ {
70
+ MATH_T r_g[ILP];
71
+ MATH_T r_p[ILP];
72
+ MATH_T r_m[ILP];
73
+ MATH_T r_v[ILP];
74
+ #pragma unroll
75
+ for(int ii = 0; ii < ILP; ii++)
76
+ {
77
+ int i = i_start + threadIdx.x + ii*blockDim.x;
78
+ if(i < n && i < chunk_size)
79
+ {
80
+ r_g[ii] = g[i];
81
+ r_p[ii] = p[i];
82
+ r_m[ii] = m[i];
83
+ r_v[ii] = v[i];
84
+ } else {
85
+ r_g[ii] = MATH_T(0);
86
+ r_p[ii] = MATH_T(0);
87
+ r_m[ii] = MATH_T(0);
88
+ r_v[ii] = MATH_T(0);
89
+ }
90
+ }
91
+ #pragma unroll
92
+ for(int ii = 0; ii < ILP; ii++)
93
+ {
94
+ if(mode == ADAM_MODE_0) { // L2
95
+ r_g[ii] = r_g[ii] + (decay * r_p[ii]);
96
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
97
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
98
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
99
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
100
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
101
+ MATH_T update = next_m_unbiased / denom;
102
+ r_p[ii] = r_p[ii] - (lr * update);
103
+ }
104
+ else { // weight decay
105
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
106
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
107
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
108
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
109
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
110
+ MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
111
+ r_p[ii] = r_p[ii] - (lr * update);
112
+ }
113
+ }
114
+ #pragma unroll
115
+ for(int ii = 0; ii < ILP; ii++)
116
+ {
117
+ int i = i_start + threadIdx.x + ii*blockDim.x;
118
+ if(i < n && i < chunk_size)
119
+ {
120
+ p[i] = r_p[ii];
121
+ m[i] = r_m[ii];
122
+ v[i] = r_v[ii];
123
+ }
124
+ }
125
+ }
126
+ }
127
+ };
128
+
129
+ template<typename T, typename FULL_T>
130
+ struct AdamCapturableFunctor
131
+ {
132
+ __device__ __forceinline__ void operator()(
133
+ int chunk_size,
134
+ volatile int* noop_gmem,
135
+ TensorListMetadata<4>& tl,
136
+ const float beta1,
137
+ const float beta2,
138
+ const int* step,
139
+ const int bias_correction,
140
+ const float epsilon,
141
+ const float* lr,
142
+ adamMode_t mode,
143
+ const float decay,
144
+ const float* inv_scale)
145
+ {
146
+ if(*noop_gmem == 1)
147
+ return;
148
+
149
+ float beta1_correction = 1.0f, beta2_correction = 1.0f;
150
+ if (bias_correction == 1) {
151
+ beta1_correction = 1 - pow(beta1, *step);
152
+ beta2_correction = 1 - pow(beta2, *step);
153
+ }
154
+
155
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
156
+
157
+ // potentially use to pass in list of scalar
158
+ // int tensor_num = tl.start_tensor_this_launch + tensor_loc;
159
+
160
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
161
+ int n = tl.sizes[tensor_loc];
162
+
163
+ T* g = (T*)tl.addresses[0][tensor_loc];
164
+ g += chunk_idx*chunk_size;
165
+
166
+ T* p = (T*)tl.addresses[1][tensor_loc];
167
+ p += chunk_idx*chunk_size;
168
+
169
+ FULL_T* m = (FULL_T*)tl.addresses[2][tensor_loc];
170
+ m += chunk_idx*chunk_size;
171
+
172
+ FULL_T* v = (FULL_T*)tl.addresses[3][tensor_loc];
173
+ v += chunk_idx*chunk_size;
174
+
175
+ n -= chunk_idx*chunk_size;
176
+
177
+ // see note in multi_tensor_scale_kernel.cu
178
+ for(int i_start = 0;
179
+ i_start < n && i_start < chunk_size;
180
+ i_start += blockDim.x*ILP)
181
+ {
182
+ MATH_T r_g[ILP];
183
+ MATH_T r_p[ILP];
184
+ MATH_T r_m[ILP];
185
+ MATH_T r_v[ILP];
186
+ #pragma unroll
187
+ for(int ii = 0; ii < ILP; ii++)
188
+ {
189
+ int i = i_start + threadIdx.x + ii*blockDim.x;
190
+ if(i < n && i < chunk_size)
191
+ {
192
+ r_g[ii] = static_cast<MATH_T>(g[i]) * (*inv_scale);
193
+ g[i] = static_cast<T>(r_g[ii]);
194
+ r_p[ii] = static_cast<MATH_T>(p[i]);
195
+ r_m[ii] = static_cast<MATH_T>(m[i]);
196
+ r_v[ii] = static_cast<MATH_T>(v[i]);
197
+ } else {
198
+ r_g[ii] = MATH_T(0);
199
+ r_p[ii] = MATH_T(0);
200
+ r_m[ii] = MATH_T(0);
201
+ r_v[ii] = MATH_T(0);
202
+ }
203
+ }
204
+ #pragma unroll
205
+ for(int ii = 0; ii < ILP; ii++)
206
+ {
207
+ if(mode == ADAM_MODE_0) { // L2
208
+ r_g[ii] = r_g[ii] + (decay * r_p[ii]);
209
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
210
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
211
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
212
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
213
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
214
+ MATH_T update = next_m_unbiased / denom;
215
+ r_p[ii] = r_p[ii] - (*lr * update);
216
+ }
217
+ else { // weight decay
218
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
219
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
220
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
221
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
222
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
223
+ MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
224
+ r_p[ii] = r_p[ii] - (*lr * update);
225
+ }
226
+ }
227
+ #pragma unroll
228
+ for(int ii = 0; ii < ILP; ii++)
229
+ {
230
+ int i = i_start + threadIdx.x + ii*blockDim.x;
231
+ if(i < n && i < chunk_size)
232
+ {
233
+ p[i] = static_cast<T>(r_p[ii]);
234
+ m[i] = static_cast<T>(r_m[ii]);
235
+ v[i] = static_cast<T>(r_v[ii]);
236
+ }
237
+ }
238
+ }
239
+ }
240
+ };
241
+
242
+ template<typename T, typename FULL_T>
243
+ struct AdamCapturableMasterFunctor
244
+ {
245
+ __device__ __forceinline__ void operator()(
246
+ int chunk_size,
247
+ volatile int* noop_gmem,
248
+ TensorListMetadata<5>& tl,
249
+ const float beta1,
250
+ const float beta2,
251
+ const int* step,
252
+ const int bias_correction,
253
+ const float epsilon,
254
+ const float* lr,
255
+ adamMode_t mode,
256
+ const float decay,
257
+ const float* inv_scale)
258
+ {
259
+ if(*noop_gmem == 1)
260
+ return;
261
+
262
+ float beta1_correction = 1.0f, beta2_correction = 1.0f;
263
+ if (bias_correction == 1) {
264
+ beta1_correction = 1 - pow(beta1, *step);
265
+ beta2_correction = 1 - pow(beta2, *step);
266
+ }
267
+
268
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
269
+
270
+ // potentially use to pass in list of scalar
271
+ // int tensor_num = tl.start_tensor_this_launch + tensor_loc;
272
+
273
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
274
+ int n = tl.sizes[tensor_loc];
275
+
276
+ T* g = (T*)tl.addresses[0][tensor_loc];
277
+ g += chunk_idx*chunk_size;
278
+
279
+ T* p = (T*)tl.addresses[1][tensor_loc];
280
+ p += chunk_idx*chunk_size;
281
+
282
+ FULL_T* m = (FULL_T*)tl.addresses[2][tensor_loc];
283
+ m += chunk_idx*chunk_size;
284
+
285
+ FULL_T* v = (FULL_T*)tl.addresses[3][tensor_loc];
286
+ v += chunk_idx*chunk_size;
287
+
288
+ FULL_T* p_master = (FULL_T*)tl.addresses[4][tensor_loc];
289
+ p_master += chunk_idx*chunk_size;
290
+
291
+ n -= chunk_idx*chunk_size;
292
+
293
+ // see note in multi_tensor_scale_kernel.cu
294
+ for(int i_start = 0;
295
+ i_start < n && i_start < chunk_size;
296
+ i_start += blockDim.x*ILP)
297
+ {
298
+ MATH_T r_g[ILP];
299
+ MATH_T r_p[ILP];
300
+ MATH_T r_m[ILP];
301
+ MATH_T r_v[ILP];
302
+ #pragma unroll
303
+ for(int ii = 0; ii < ILP; ii++)
304
+ {
305
+ int i = i_start + threadIdx.x + ii*blockDim.x;
306
+ if(i < n && i < chunk_size)
307
+ {
308
+ r_g[ii] = static_cast<MATH_T>(g[i]) * (*inv_scale);
309
+ g[i] = static_cast<T>(r_g[ii]);
310
+ r_p[ii] = static_cast<MATH_T>(p_master[i]);
311
+ r_m[ii] = static_cast<MATH_T>(m[i]);
312
+ r_v[ii] = static_cast<MATH_T>(v[i]);
313
+ } else {
314
+ r_g[ii] = MATH_T(0);
315
+ r_p[ii] = MATH_T(0);
316
+ r_m[ii] = MATH_T(0);
317
+ r_v[ii] = MATH_T(0);
318
+ }
319
+ }
320
+ #pragma unroll
321
+ for(int ii = 0; ii < ILP; ii++)
322
+ {
323
+ if(mode == ADAM_MODE_0) { // L2
324
+ r_g[ii] = r_g[ii] + (decay * r_p[ii]);
325
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
326
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
327
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
328
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
329
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
330
+ MATH_T update = next_m_unbiased / denom;
331
+ r_p[ii] = r_p[ii] - (*lr * update);
332
+ }
333
+ else { // weight decay
334
+ r_m[ii] = beta1 * r_m[ii] + (1-beta1) * r_g[ii];
335
+ r_v[ii] = beta2 * r_v[ii] + (1-beta2) * r_g[ii] * r_g[ii];
336
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
337
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
338
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
339
+ MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
340
+ r_p[ii] = r_p[ii] - (*lr * update);
341
+ }
342
+ }
343
+ #pragma unroll
344
+ for(int ii = 0; ii < ILP; ii++)
345
+ {
346
+ int i = i_start + threadIdx.x + ii*blockDim.x;
347
+ if(i < n && i < chunk_size)
348
+ {
349
+ p[i] = static_cast<T>(r_p[ii]);
350
+ p_master[i] = static_cast<FULL_T>(r_p[ii]);
351
+ m[i] = static_cast<FULL_T>(r_m[ii]);
352
+ v[i] = static_cast<FULL_T>(r_v[ii]);
353
+ }
354
+ }
355
+ }
356
+ }
357
+ };
358
+
359
+ void multi_tensor_adam_cuda(
360
+ int chunk_size,
361
+ at::Tensor noop_flag,
362
+ std::vector<std::vector<at::Tensor>> tensor_lists,
363
+ const float lr,
364
+ const float beta1,
365
+ const float beta2,
366
+ const float epsilon,
367
+ const int step,
368
+ const int mode,
369
+ const int bias_correction,
370
+ const float weight_decay)
371
+ {
372
+ using namespace at;
373
+
374
+ // Handle bias correction mode
375
+ float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
376
+ if (bias_correction == 1) {
377
+ bias_correction1 = 1 - std::pow(beta1, step);
378
+ bias_correction2 = 1 - std::pow(beta2, step);
379
+ }
380
+
381
+ size_t max_size = 0;
382
+ bool requires_64bit_indexing = false;
383
+ for (auto it = tensor_lists.begin(); it != tensor_lists.end(); it++) {
384
+ for (auto it2 = it->begin(); it2 != it->end(); it2++) {
385
+ if (it2->numel() > max_size) {
386
+ max_size = it2->numel();
387
+ if (max_size >= INT_MAX) {
388
+ requires_64bit_indexing = true;
389
+ break;
390
+ }
391
+ }
392
+ }
393
+ if (requires_64bit_indexing) {
394
+ break;
395
+ }
396
+ }
397
+
398
+ if (requires_64bit_indexing) {
399
+ // Assume single type across p,g,m1,m2 now
400
+ DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
401
+ tensor_lists[0][0].scalar_type(), 0, "adam",
402
+ multi_tensor_apply<4>(
403
+ (int64_t) BLOCK_SIZE,
404
+ (int64_t) chunk_size,
405
+ noop_flag,
406
+ tensor_lists,
407
+ AdamFunctor<scalar_t_0, float, int64_t>(),
408
+ beta1,
409
+ beta2,
410
+ bias_correction1,
411
+ bias_correction2,
412
+ epsilon,
413
+ lr,
414
+ (adamMode_t) mode,
415
+ weight_decay); )
416
+ } else {
417
+ // Assume single type across p,g,m1,m2 now
418
+ DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
419
+ tensor_lists[0][0].scalar_type(), 0, "adam",
420
+ multi_tensor_apply<4>(
421
+ BLOCK_SIZE,
422
+ chunk_size,
423
+ noop_flag,
424
+ tensor_lists,
425
+ AdamFunctor<scalar_t_0, float, int32_t>(),
426
+ beta1,
427
+ beta2,
428
+ bias_correction1,
429
+ bias_correction2,
430
+ epsilon,
431
+ lr,
432
+ (adamMode_t) mode,
433
+ weight_decay); )
434
+ }
435
+ AT_CUDA_CHECK(cudaGetLastError());
436
+ }
437
+
438
+ void multi_tensor_adam_capturable_cuda(
439
+ int chunk_size,
440
+ at::Tensor noop_flag,
441
+ std::vector<std::vector<at::Tensor>> tensor_lists,
442
+ at::Tensor lr,
443
+ const float beta1,
444
+ const float beta2,
445
+ const float epsilon,
446
+ at::Tensor step,
447
+ const int mode,
448
+ const int bias_correction,
449
+ const float weight_decay,
450
+ at::Tensor inv_scale)
451
+ {
452
+ using namespace at;
453
+
454
+ DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
455
+ tensor_lists[0][0].scalar_type(), 0, "adam",
456
+ multi_tensor_apply<4>(
457
+ BLOCK_SIZE,
458
+ chunk_size,
459
+ noop_flag,
460
+ tensor_lists,
461
+ AdamCapturableFunctor<scalar_t_0, float>(),
462
+ beta1,
463
+ beta2,
464
+ step.data_ptr<int>(),
465
+ bias_correction,
466
+ epsilon,
467
+ lr.data_ptr<float>(),
468
+ (adamMode_t) mode,
469
+ weight_decay,
470
+ inv_scale.data_ptr<float>()); )
471
+
472
+ AT_CUDA_CHECK(cudaGetLastError());
473
+
474
+ }
475
+
476
+ void multi_tensor_adam_capturable_master_cuda(
477
+ int chunk_size,
478
+ at::Tensor noop_flag,
479
+ std::vector<std::vector<at::Tensor>> tensor_lists,
480
+ at::Tensor lr,
481
+ const float beta1,
482
+ const float beta2,
483
+ const float epsilon,
484
+ at::Tensor step,
485
+ const int mode,
486
+ const int bias_correction,
487
+ const float weight_decay,
488
+ at::Tensor inv_scale)
489
+ {
490
+ using namespace at;
491
+
492
+ DISPATCH_DOUBLE_FLOAT_HALF_AND_BFLOAT(
493
+ tensor_lists[0][0].scalar_type(), 0, "adam",
494
+ multi_tensor_apply<5>(
495
+ BLOCK_SIZE,
496
+ chunk_size,
497
+ noop_flag,
498
+ tensor_lists,
499
+ AdamCapturableMasterFunctor<scalar_t_0, float>(),
500
+ beta1,
501
+ beta2,
502
+ step.data_ptr<int>(),
503
+ bias_correction,
504
+ epsilon,
505
+ lr.data_ptr<float>(),
506
+ (adamMode_t) mode,
507
+ weight_decay,
508
+ inv_scale.data_ptr<float>()); )
509
+
510
+ AT_CUDA_CHECK(cudaGetLastError());
511
+
512
+ }
513
+