cyd0806 commited on
Commit
45c961a
·
verified ·
1 Parent(s): 7efd67c

Upload apex-master/csrc/multi_tensor_lamb_stage_2.cu with huggingface_hub

Browse files
apex-master/csrc/multi_tensor_lamb_stage_2.cu ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ // Another possibility:
6
+ // #include <torch/all.h>
7
+
8
+ #include <assert.h>
9
+
10
+ #include "type_shim.h"
11
+ #include "multi_tensor_apply.cuh"
12
+
13
+ #define BLOCK_SIZE 512
14
+ #define ILP 4
15
+
16
+ using MATH_T = float;
17
+
18
+ // Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
19
+ // It computes new parameter value.
20
+ template<typename T, typename UPD_T>
21
+ struct LAMBStage2Functor
22
+ {
23
+ __device__ __forceinline__ void operator()(
24
+ int chunk_size,
25
+ volatile int* noop_gmem,
26
+ TensorListMetadata<2>& tl,
27
+ const float* per_tensor_param_norm,
28
+ const float* per_tensor_update_norm,
29
+ const float learning_rate,
30
+ const float decay,
31
+ bool use_nvlamb)
32
+ {
33
+ // I'd like this kernel to propagate infs/nans.
34
+ // if(*noop_gmem == 1)
35
+ // return;
36
+
37
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
38
+ int tensor_num = tl.start_tensor_this_launch + tensor_loc;
39
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
40
+ int n = tl.sizes[tensor_loc];
41
+
42
+ MATH_T ratio = learning_rate;
43
+ // nvlamb: apply adaptive learning rate to all parameters
44
+ // otherwise, only apply to those with non-zero weight decay
45
+ if (use_nvlamb || (decay != 0.0))
46
+ {
47
+ float param_norm = per_tensor_param_norm[tensor_num];
48
+ float update_norm = per_tensor_update_norm[tensor_num];
49
+ ratio = (update_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_norm) : learning_rate;
50
+ }
51
+
52
+ T* p = (T*)tl.addresses[0][tensor_loc];
53
+ p += chunk_idx*chunk_size;
54
+
55
+ UPD_T* update = (UPD_T*)tl.addresses[1][tensor_loc];
56
+ update += chunk_idx*chunk_size;
57
+
58
+ n -= chunk_idx*chunk_size;
59
+
60
+ for(int i_start = 0;
61
+ i_start < n && i_start < chunk_size;
62
+ i_start += blockDim.x*ILP)
63
+ {
64
+ T r_p[ILP];
65
+ UPD_T r_update[ILP];
66
+ #pragma unroll
67
+ for(int ii = 0; ii < ILP; ii++)
68
+ {
69
+ int i = i_start + threadIdx.x + ii*blockDim.x;
70
+ if(i < n && i < chunk_size)
71
+ {
72
+ r_p[ii] = p[i];
73
+ r_update[ii] = update[i];
74
+ }
75
+ }
76
+ #pragma unroll
77
+ for(int ii = 0; ii < ILP; ii++)
78
+ {
79
+ r_p[ii] = r_p[ii] - (ratio*(T)r_update[ii]);
80
+ }
81
+ #pragma unroll
82
+ for(int ii = 0; ii < ILP; ii++)
83
+ {
84
+ int i = i_start + threadIdx.x + ii*blockDim.x;
85
+ if(i < n && i < chunk_size)
86
+ {
87
+ p[i] = r_p[ii];
88
+ }
89
+ }
90
+ }
91
+ }
92
+ };
93
+
94
+ void multi_tensor_lamb_stage2_cuda(
95
+ int chunk_size,
96
+ at::Tensor noop_flag,
97
+ std::vector<std::vector<at::Tensor>> tensor_lists,
98
+ at::Tensor per_tensor_param_norm,
99
+ at::Tensor per_tensor_update_norm,
100
+ const float lr,
101
+ const float weight_decay,
102
+ at::optional<bool> use_nvlamb_python)
103
+ {
104
+ bool use_nvlamb = use_nvlamb_python.has_value() ? use_nvlamb_python.value() : false;
105
+
106
+ using namespace at;
107
+
108
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2",
109
+ DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "lamb_stage_2",
110
+ multi_tensor_apply<2>(
111
+ BLOCK_SIZE,
112
+ chunk_size,
113
+ noop_flag,
114
+ tensor_lists,
115
+ LAMBStage2Functor<scalar_t_0, scalar_t_1>(),
116
+ per_tensor_param_norm.DATA_PTR<float>(),
117
+ per_tensor_update_norm.DATA_PTR<float>(),
118
+ lr,
119
+ weight_decay,
120
+ use_nvlamb); ))
121
+
122
+ AT_CUDA_CHECK(cudaGetLastError());
123
+
124
+ // AT_CUDA_CHECK(cudaDeviceSynchronize());
125
+ }