cyd0806 commited on
Commit
9de55c2
·
verified ·
1 Parent(s): 78aabc0

Upload apex-master/csrc/multi_tensor_scale_kernel.cu with huggingface_hub

Browse files
apex-master/csrc/multi_tensor_scale_kernel.cu ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <ATen/cuda/Exceptions.h>
5
+ // Another possibility:
6
+ // #include <torch/all.h>
7
+
8
+ #include <assert.h>
9
+ // Stringstream is a big hammer, but I want to rely on operator<< for dtype.
10
+ #include <sstream>
11
+
12
+ #include "type_shim.h"
13
+ #include "multi_tensor_apply.cuh"
14
+
15
+ #define BLOCK_SIZE 512
16
+ #define ILP 4
17
+
18
+ template<typename T>
19
+ __device__ __forceinline__ bool is_aligned(T* p){
20
+ return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
21
+ }
22
+
23
+ template<typename T>
24
+ __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
25
+ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
26
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
27
+ }
28
+
29
+ template<typename in_t, typename out_t>
30
+ struct ScaleFunctor
31
+ {
32
+ __device__ __forceinline__ void operator()(
33
+ int chunk_size,
34
+ volatile int* noop_gmem,
35
+ TensorListMetadata<2>& tl,
36
+ float scale)
37
+ {
38
+ // I'd like this kernel to propagate infs/nans.
39
+ // if(*noop_gmem == 1)
40
+ // return;
41
+
42
+ int tensor_loc = tl.block_to_tensor[blockIdx.x];
43
+ int chunk_idx = tl.block_to_chunk[blockIdx.x];
44
+ int n = tl.sizes[tensor_loc];
45
+
46
+ in_t* in = (in_t*)tl.addresses[0][tensor_loc];
47
+ in += chunk_idx*chunk_size;
48
+
49
+ out_t* out = (out_t*)tl.addresses[1][tensor_loc];
50
+ out += chunk_idx*chunk_size;
51
+
52
+ n -= chunk_idx*chunk_size;
53
+
54
+ bool finite = true;
55
+ in_t r_in[ILP];
56
+ out_t r_out[ILP];
57
+
58
+ // to make things simple, we put aligned case in a different code path
59
+ if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(in) && is_aligned(out))
60
+ {
61
+ for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
62
+ {
63
+ // load
64
+ load_store(r_in, in, 0 , i_start);
65
+ #pragma unroll
66
+ for(int ii = 0; ii < ILP; ii++)
67
+ {
68
+ r_out[ii] = static_cast<float>(r_in[ii]) * scale;
69
+ finite = finite && isfinite(r_in[ii]);
70
+ }
71
+ // store
72
+ load_store(out, r_out, i_start, 0);
73
+ }
74
+ }
75
+ else
76
+ {
77
+ // Non-divergent exit condition for __syncthreads, not necessary here
78
+ for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
79
+ {
80
+ #pragma unroll
81
+ for(int ii = 0; ii < ILP; ii++)
82
+ {
83
+ r_in[ii] = 0;
84
+ int i = i_start + threadIdx.x + ii*blockDim.x;
85
+ if(i < n && i < chunk_size)
86
+ r_in[ii] = in[i];
87
+ }
88
+ // note for clarification to future michael:
89
+ // From a pure memory dependency perspective, there's likely no point unrolling
90
+ // the write loop, since writes just fire off once their LDGs arrive.
91
+ // Put another way, the STGs are dependent on the LDGs, but not on each other.
92
+ // There is still compute ILP benefit from unrolling the loop though.
93
+ #pragma unroll
94
+ for(int ii = 0; ii < ILP; ii++)
95
+ {
96
+ r_out[ii] = static_cast<float>(r_in[ii]) * scale;
97
+ finite = finite && isfinite(r_in[ii]);
98
+ }
99
+ #pragma unroll
100
+ for(int ii = 0; ii < ILP; ii++)
101
+ {
102
+ int i = i_start + threadIdx.x + ii*blockDim.x;
103
+ if(i < n && i < chunk_size)
104
+ out[i] = r_out[ii];
105
+ }
106
+ }
107
+ }
108
+ if(!finite)
109
+ *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
110
+ }
111
+ };
112
+
113
+ void multi_tensor_scale_cuda(
114
+ int chunk_size,
115
+ at::Tensor noop_flag,
116
+ std::vector<std::vector<at::Tensor>> tensor_lists,
117
+ float scale)
118
+ {
119
+ using namespace at;
120
+ // The output (downscaled) type is always float.
121
+ // If build times suffer, think about where to put this dispatch,
122
+ // and what logic should be moved out of multi_tensor_apply.
123
+
124
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_scale_cuda",
125
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_scale_cuda",
126
+ multi_tensor_apply<2>(
127
+ BLOCK_SIZE,
128
+ chunk_size,
129
+ noop_flag,
130
+ tensor_lists,
131
+ ScaleFunctor<scalar_t_0, scalar_t_1>(),
132
+ scale); ))
133
+ AT_CUDA_CHECK(cudaGetLastError());
134
+
135
+ // AT_CUDA_CHECK(cudaDeviceSynchronize());
136
+ }