Translation
English
Chinese
RWKV_V7
Englisg->Chinese
0.4B
1.5B
Alic-Li commited on
Commit
dcb3289
·
verified ·
1 Parent(s): 9a20473

Upload RWKV_pip_infer For ROCm Optimized version

Browse files
rwkv_rocm/__init__.py ADDED
File without changes
rwkv_rocm/cuda/gemm_fp16_cublas.cu ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cublas_v2.h>
2
+ #include <cuda.h>
3
+ #include <cuda_fp16.h>
4
+ #include <cuda_runtime.h>
5
+ #include <torch/extension.h>
6
+ #include <c10/cuda/CUDAGuard.h>
7
+ #include <ATen/cuda/CUDAContext.h>
8
+
9
+ #define CUBLAS_CHECK(condition) \
10
+ for (cublasStatus_t _cublas_check_status = (condition); \
11
+ _cublas_check_status != CUBLAS_STATUS_SUCCESS;) \
12
+ throw std::runtime_error("cuBLAS error " + \
13
+ std::to_string(_cublas_check_status) + " at " + \
14
+ std::to_string(__LINE__));
15
+
16
+ #define CUDA_CHECK(condition) \
17
+ for (cudaError_t _cuda_check_status = (condition); \
18
+ _cuda_check_status != cudaSuccess;) \
19
+ throw std::runtime_error( \
20
+ "CUDA error " + std::string(cudaGetErrorString(_cuda_check_status)) + \
21
+ " at " + std::to_string(__LINE__));
22
+
23
+ /*
24
+ NOTE: blas gemm is column-major by default, but we need row-major output.
25
+ The data of row-major, transposed matrix is exactly the same as the
26
+ column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
27
+ */
28
+ void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
29
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
30
+ const auto cuda_data_type = CUDA_R_16F;
31
+ const auto cuda_c_data_type =
32
+ c.dtype() == torch::kFloat32 ? CUDA_R_32F : CUDA_R_16F;
33
+ const auto compute_type = HIPBLAS_COMPUTE_32F;
34
+ const float sp_alpha = 1.f;
35
+ // swap a and b, and use CUBLAS_OP_N. see the notes above
36
+ std::swap(a, b);
37
+ const cublasOperation_t cublas_trans_a = CUBLAS_OP_N;
38
+ const cublasOperation_t cublas_trans_b = CUBLAS_OP_N;
39
+ // m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
40
+ // negative axis is used because of the existence of batch matmul.
41
+ const int m = a.size(-1);
42
+ const int k = a.size(-2);
43
+ const int n = b.size(-2);
44
+ const int cublas_lda = m;
45
+ const int cublas_ldb = k;
46
+ const int cublas_ldc = m;
47
+ cublasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
48
+
49
+ cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
50
+ const float sp_beta = 0.f;
51
+ if (a.sizes().size() == 2 && b.sizes().size() == 2) {
52
+ CUBLAS_CHECK(cublasGemmEx(
53
+ cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
54
+ a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
55
+ cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
56
+ compute_type, algo));
57
+ } else {
58
+ // batch matmul
59
+ assert(a.sizes().size() == 3 && b.sizes().size() == 3);
60
+
61
+ const long long int cublas_stride_a = m * k;
62
+ const long long int cublas_stride_b = k * n;
63
+ const long long int cublas_stride_c = m * n;
64
+ CUBLAS_CHECK(cublasGemmStridedBatchedEx(
65
+ cublas_handle, cublas_trans_a, cublas_trans_b, m,
66
+ n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
67
+ cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
68
+ &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
69
+ a.size(0), compute_type, algo));
70
+ }
71
+ }
rwkv_rocm/cuda/operators.cu ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <assert.h>
3
+ #include "ATen/ATen.h"
4
+ #include <cuda_fp16.h>
5
+ #define MIN_VALUE (-1e38)
6
+ typedef at::Half fp16;
7
+ __half *cast(fp16 *ptr) {
8
+ return reinterpret_cast<__half *>(ptr);
9
+ }
10
+
11
+ template <typename F>
12
+ __global__ void kernel_wkv_forward(const int B, const int T, const int C,
13
+ const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
14
+ F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
15
+ const int idx = blockIdx.x * blockDim.x + threadIdx.x;
16
+ const int _b = idx / C;
17
+ const int _c = idx % C;
18
+ const int _offset = _b * T * C + _c;
19
+ const int _state_offset = _b * C + _c;
20
+
21
+ float u = _u[_c];
22
+ float w = _w[_c];
23
+ const F *__restrict__ const k = _k + _offset;
24
+ const F *__restrict__ const v = _v + _offset;
25
+ F *__restrict__ const y = _y + _offset;
26
+
27
+ float aa = _aa[_state_offset];
28
+ float bb = _bb[_state_offset];
29
+ float pp = _pp[_state_offset];
30
+ for (int i = 0; i < T; i++) {
31
+ const int ii = i * C;
32
+ const float kk = float(k[ii]);
33
+ const float vv = float(v[ii]);
34
+ float ww = u + kk;
35
+ float p = max(pp, ww);
36
+ float e1 = exp(pp - p);
37
+ float e2 = exp(ww - p);
38
+ y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
39
+ ww = w + pp;
40
+ p = max(ww, kk);
41
+ e1 = exp(ww - p);
42
+ e2 = exp(kk - p);
43
+ aa = e1 * aa + e2 * vv;
44
+ bb = e1 * bb + e2;
45
+ pp = p;
46
+ }
47
+ _aa[_state_offset] = aa;
48
+ _bb[_state_offset] = bb;
49
+ _pp[_state_offset] = pp;
50
+ }
51
+
52
+ template <typename F>
53
+ void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
54
+ dim3 threadsPerBlock( min(C, 32) );
55
+ assert(B * C % threadsPerBlock.x == 0);
56
+ dim3 numBlocks(B * C / threadsPerBlock.x);
57
+ kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
58
+ }
59
+
60
+ template void cuda_wkv_forward<fp16>(
61
+ int B, int T, int C,
62
+ float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
63
+ float *aa, float *bb, float *pp);
64
+ template void cuda_wkv_forward<float>(
65
+ int B, int T, int C,
66
+ float *w, float *u, float *k, float *v, float *y,
67
+ float *aa, float *bb, float *pp);
68
+
69
+ __global__ void kernel_mm_seq_fp32i8(
70
+ const int B, const int N, const int M,
71
+ const float *__restrict__ const x, const int x_stride,
72
+ const uint8_t *__restrict__ const w, const int w_stride,
73
+ const float *__restrict__ const mx,
74
+ const float *__restrict__ const rx,
75
+ const float *__restrict__ const my,
76
+ const float *__restrict__ const ry,
77
+ float *__restrict__ const y, const int y_stride) {
78
+
79
+ const int i = blockIdx.x * blockDim.x + threadIdx.x;
80
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
81
+
82
+ if (i < B && k < M) {
83
+ float y_local = 0;
84
+ for (int j = 0; j < N; ++j) {
85
+ y_local += x[i * x_stride + j] * (
86
+ (float(w[j * w_stride + k]) + 0.5f)
87
+ * rx[k] * ry[j] + mx[k] + my[j]
88
+ );
89
+ }
90
+ y[i * y_stride + k] = y_local;
91
+ }
92
+ }
93
+
94
+ template <typename F>
95
+ void cuda_mm8_seq(int B, int N, int M,
96
+ F *x, int x_stride,
97
+ uint8_t *w, int w_stride,
98
+ F *mx, F *rx,
99
+ F *my, F *ry,
100
+ F *y, int y_stride);
101
+
102
+ template <>
103
+ void cuda_mm8_seq<float>(int B, int N, int M,
104
+ float *x, int x_stride,
105
+ uint8_t *w, int w_stride,
106
+ float *mx, float *rx,
107
+ float *my, float *ry,
108
+ float *y, int y_stride) {
109
+ dim3 blockSize(1, 128);
110
+ dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
111
+ kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
112
+ B, N, M, x, x_stride, w, w_stride,
113
+ mx, rx, my, ry, y, y_stride);
114
+ }
115
+
116
+ __global__ void kernel_mm_seq_fp16i8(
117
+ const int B, const int N, const int M,
118
+ const __half *__restrict__ const x, const int x_stride,
119
+ const uint8_t *__restrict__ const w, const int w_stride,
120
+ const __half *__restrict__ const mx,
121
+ const __half *__restrict__ const rx,
122
+ const __half *__restrict__ const my,
123
+ const __half *__restrict__ const ry,
124
+ __half *__restrict__ const y, const int y_stride) {
125
+
126
+ const int i = blockIdx.x * blockDim.x + threadIdx.x;
127
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
128
+
129
+ if (i < B && k < M) {
130
+ float y_local = 0;
131
+ for (int j = 0; j < N; ++j) {
132
+ y_local += __half2float(x[i * x_stride + j]) * (
133
+ (float(w[j * w_stride + k]) + 0.5f)
134
+ * __half2float(rx[k]) * __half2float(ry[j])
135
+ + __half2float(mx[k]) + __half2float(my[j])
136
+ );
137
+ }
138
+ y[i * y_stride + k] = __float2half(y_local);
139
+ }
140
+ }
141
+
142
+ template <>
143
+ void cuda_mm8_seq<fp16>(int B, int N, int M,
144
+ fp16 *x, int x_stride,
145
+ uint8_t *w, int w_stride,
146
+ fp16 *mx, fp16 *rx,
147
+ fp16 *my, fp16 *ry,
148
+ fp16 *y, int y_stride) {
149
+ dim3 blockSize(1, 128);
150
+ dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
151
+ kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
152
+ B, N, M, cast(x), x_stride, w, w_stride,
153
+ cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
154
+ }
155
+
156
+ #define MM8_ONE_JSPLIT 24
157
+ #define MM8_ONE_TILE 1024
158
+
159
+ __global__ void kernel_mm_one_fp32i8(
160
+ const int N, const int M,
161
+ const float *__restrict__ const x,
162
+ const uint8_t *__restrict__ const w, const int w_stride,
163
+ const float *__restrict__ const mx,
164
+ const float *__restrict__ const rx,
165
+ const float *__restrict__ const my,
166
+ const float *__restrict__ const ry,
167
+ float *__restrict__ const y) {
168
+
169
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
170
+ const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
171
+ const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
172
+
173
+ if (k < M) {
174
+ float y_local = 0;
175
+ for (int j = j0; j < j1; ++j) {
176
+ y_local += x[j] * (
177
+ (float(w[j * w_stride + k]) + 0.5f)
178
+ * rx[k] * ry[j] + mx[k] + my[j]
179
+ );
180
+ }
181
+ atomicAdd(&y[k], y_local);
182
+ }
183
+ }
184
+
185
+ template <typename F>
186
+ void cuda_mm8_one(int N, int M,
187
+ F *x,
188
+ uint8_t *w, int w_stride,
189
+ F *mx, F *rx,
190
+ F *my, F *ry,
191
+ float *y);
192
+
193
+ template <>
194
+ void cuda_mm8_one<float>(int N, int M,
195
+ float *x,
196
+ uint8_t *w, int w_stride,
197
+ float *mx, float *rx,
198
+ float *my, float *ry,
199
+ float *y) {
200
+ dim3 blockSize(1, MM8_ONE_TILE);
201
+ dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
202
+ kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
203
+ N, M, x, w, w_stride,
204
+ mx, rx, my, ry, y);
205
+ }
206
+
207
+ __global__ void kernel_mm_one_fp16i8(
208
+ const int N, const int M,
209
+ const __half *__restrict__ const x,
210
+ const uint8_t *__restrict__ const w, const int w_stride,
211
+ const __half *__restrict__ const mx,
212
+ const __half *__restrict__ const rx,
213
+ const __half *__restrict__ const my,
214
+ const __half *__restrict__ const ry,
215
+ float *__restrict__ const y) {
216
+
217
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
218
+ const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
219
+ const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
220
+
221
+ if (k < M) {
222
+ float y_local = 0;
223
+ for (int j = j0; j < j1; ++j) {
224
+ y_local += __half2float(x[j]) * (
225
+ (float(w[j * w_stride + k]) + 0.5f)
226
+ * __half2float(rx[k]) * __half2float(ry[j])
227
+ + __half2float(mx[k]) + __half2float(my[j])
228
+ );
229
+ }
230
+ atomicAdd(&y[k], y_local);
231
+ }
232
+ }
233
+
234
+ template <>
235
+ void cuda_mm8_one<fp16>(int N, int M,
236
+ fp16 *x,
237
+ uint8_t *w, int w_stride,
238
+ fp16 *mx, fp16 *rx,
239
+ fp16 *my, fp16 *ry,
240
+ float *y) {
241
+ dim3 blockSize(1, MM8_ONE_TILE);
242
+ dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
243
+ kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
244
+ N, M, cast(x), w, w_stride,
245
+ cast(mx), cast(rx), cast(my), cast(ry), y);
246
+ }
rwkv_rocm/cuda/rwkv5.cu ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <assert.h>
3
+ #include "ATen/ATen.h"
4
+ typedef at::BFloat16 bf16;
5
+ typedef at::Half fp16;
6
+ typedef float fp32;
7
+
8
+ template <typename F>
9
+ __global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
10
+ const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
11
+ F *__restrict__ const _y)
12
+ {
13
+ const int b = blockIdx.x / H;
14
+ const int h = blockIdx.x % H;
15
+ const int i = threadIdx.x;
16
+ _w += h*_N_;
17
+ _u += h*_N_;
18
+ _state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
19
+
20
+ __shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
21
+
22
+ float state[_N_];
23
+ #pragma unroll
24
+ for (int j = 0; j < _N_; j++)
25
+ state[j] = _state[j];
26
+
27
+ __syncthreads();
28
+ u[i] = float(_u[i]);
29
+ w[i] = _w[i];
30
+ __syncthreads();
31
+
32
+ for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
33
+ {
34
+ __syncthreads();
35
+ r[i] = float(_r[t]);
36
+ k[i] = float(_k[t]);
37
+ __syncthreads();
38
+
39
+ const float v = float(_v[t]);
40
+ float y = 0;
41
+
42
+ #pragma unroll
43
+ for (int j = 0; j < _N_; j+=4)
44
+ {
45
+ const float4& r_ = (float4&)(r[j]);
46
+ const float4& k_ = (float4&)(k[j]);
47
+ const float4& w_ = (float4&)(w[j]);
48
+ const float4& u_ = (float4&)(u[j]);
49
+ float4& s = (float4&)(state[j]);
50
+ float4 x;
51
+
52
+ x.x = k_.x * v;
53
+ x.y = k_.y * v;
54
+ x.z = k_.z * v;
55
+ x.w = k_.w * v;
56
+
57
+ y += r_.x * (u_.x * x.x + s.x);
58
+ y += r_.y * (u_.y * x.y + s.y);
59
+ y += r_.z * (u_.z * x.z + s.z);
60
+ y += r_.w * (u_.w * x.w + s.w);
61
+
62
+ s.x = s.x * w_.x + x.x;
63
+ s.y = s.y * w_.y + x.y;
64
+ s.z = s.z * w_.z + x.z;
65
+ s.w = s.w * w_.w + x.w;
66
+ }
67
+ _y[t] = F(y);
68
+ }
69
+ #pragma unroll
70
+ for (int j = 0; j < _N_; j++)
71
+ _state[j] = state[j];
72
+ }
73
+
74
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
75
+ {
76
+ assert(H*_N_ == C);
77
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
78
+ }
79
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
80
+ {
81
+ assert(H*_N_ == C);
82
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
83
+ }
84
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
85
+ {
86
+ assert(H*_N_ == C);
87
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
88
+ }
rwkv_rocm/cuda/rwkv5_op.cpp ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+ #include <c10/cuda/CUDAGuard.h>
4
+ typedef at::BFloat16 bf16;
5
+ typedef at::Half fp16;
6
+ typedef float fp32;
7
+
8
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
9
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
10
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
11
+
12
+ void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
13
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
14
+ cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
15
+ }
16
+ void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
17
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
18
+ cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
19
+ }
20
+ void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
21
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
22
+ cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
23
+ }
24
+
25
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
26
+ m.def("forward_bf16", &forward_bf16, "rwkv5 forward_bf16");
27
+ m.def("forward_fp16", &forward_fp16, "rwkv5 forward_fp16");
28
+ m.def("forward_fp32", &forward_fp32, "rwkv5 forward_fp32");
29
+ }
30
+ TORCH_LIBRARY(rwkv5, m) {
31
+ m.def("forward_bf16", forward_bf16);
32
+ m.def("forward_fp16", forward_fp16);
33
+ m.def("forward_fp32", forward_fp32);
34
+ }
rwkv_rocm/cuda/rwkv6.cu ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <assert.h>
3
+ #include "ATen/ATen.h"
4
+ typedef at::BFloat16 bf16;
5
+ typedef at::Half fp16;
6
+ typedef float fp32;
7
+
8
+ template <typename F>
9
+ __global__ void kernel_forward(const int B, const int T, const int C, const int H, float *__restrict__ _state,
10
+ const F *__restrict__ const _r, const F *__restrict__ const _k, const F *__restrict__ const _v, const float *__restrict__ _w, const F *__restrict__ _u,
11
+ F *__restrict__ const _y)
12
+ {
13
+ const int b = blockIdx.x / H;
14
+ const int h = blockIdx.x % H;
15
+ const int i = threadIdx.x;
16
+ _u += h*_N_;
17
+ _state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
18
+
19
+ __shared__ float r[_N_], k[_N_], u[_N_], w[_N_];
20
+
21
+ float state[_N_];
22
+ #pragma unroll
23
+ for (int j = 0; j < _N_; j++)
24
+ state[j] = _state[j];
25
+
26
+ __syncthreads();
27
+ u[i] = float(_u[i]);
28
+ __syncthreads();
29
+
30
+ for (int t = b*T*C + h*_N_ + i; t < (b+1)*T*C + h*_N_ + i; t += C)
31
+ {
32
+ __syncthreads();
33
+ w[i] = _w[t];
34
+ r[i] = float(_r[t]);
35
+ k[i] = float(_k[t]);
36
+ __syncthreads();
37
+
38
+ const float v = float(_v[t]);
39
+ float y = 0;
40
+
41
+ #pragma unroll
42
+ for (int j = 0; j < _N_; j+=4)
43
+ {
44
+ const float4& r_ = (float4&)(r[j]);
45
+ const float4& k_ = (float4&)(k[j]);
46
+ const float4& w_ = (float4&)(w[j]);
47
+ const float4& u_ = (float4&)(u[j]);
48
+ float4& s = (float4&)(state[j]);
49
+ float4 x;
50
+
51
+ x.x = k_.x * v;
52
+ x.y = k_.y * v;
53
+ x.z = k_.z * v;
54
+ x.w = k_.w * v;
55
+
56
+ y += r_.x * (u_.x * x.x + s.x);
57
+ y += r_.y * (u_.y * x.y + s.y);
58
+ y += r_.z * (u_.z * x.z + s.z);
59
+ y += r_.w * (u_.w * x.w + s.w);
60
+
61
+ s.x = s.x * w_.x + x.x;
62
+ s.y = s.y * w_.y + x.y;
63
+ s.z = s.z * w_.z + x.z;
64
+ s.w = s.w * w_.w + x.w;
65
+ }
66
+ _y[t] = F(y);
67
+ }
68
+ #pragma unroll
69
+ for (int j = 0; j < _N_; j++)
70
+ _state[j] = state[j];
71
+ }
72
+
73
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y)
74
+ {
75
+ assert(H*_N_ == C);
76
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
77
+ }
78
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y)
79
+ {
80
+ assert(H*_N_ == C);
81
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
82
+ }
83
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y)
84
+ {
85
+ assert(H*_N_ == C);
86
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, k, v, w, u, y);
87
+ }
rwkv_rocm/cuda/rwkv6_op.cpp ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+ #include <c10/cuda/CUDAGuard.h>
4
+ typedef at::BFloat16 bf16;
5
+ typedef at::Half fp16;
6
+ typedef float fp32;
7
+
8
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *k, bf16 *v, float *w, bf16 *u, bf16 *y);
9
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *k, fp16 *v, float *w, fp16 *u, fp16 *y);
10
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *k, fp32 *v, float *w, fp32 *u, fp32 *y);
11
+
12
+ void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
13
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
14
+ cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), w.data_ptr<float>(), u.data_ptr<bf16>(), y.data_ptr<bf16>());
15
+ }
16
+ void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
17
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
18
+ cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), w.data_ptr<float>(), u.data_ptr<fp16>(), y.data_ptr<fp16>());
19
+ }
20
+ void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &k, torch::Tensor &v, torch::Tensor &w, torch::Tensor &u, torch::Tensor &y) {
21
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(state));
22
+ cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), w.data_ptr<float>(), u.data_ptr<fp32>(), y.data_ptr<fp32>());
23
+ }
24
+
25
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
26
+ m.def("forward_bf16", &forward_bf16, "rwkv6 forward_bf16");
27
+ m.def("forward_fp16", &forward_fp16, "rwkv6 forward_fp16");
28
+ m.def("forward_fp32", &forward_fp32, "rwkv6 forward_fp32");
29
+ }
30
+ TORCH_LIBRARY(rwkv6, m) {
31
+ m.def("forward_bf16", forward_bf16);
32
+ m.def("forward_fp16", forward_fp16);
33
+ m.def("forward_fp32", forward_fp32);
34
+ }
rwkv_rocm/cuda/rwkv7.cu ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <assert.h>
3
+ #include "ATen/ATen.h"
4
+
5
+ typedef at::Half fp16;
6
+ typedef at::BFloat16 bf16;
7
+ typedef float fp32;
8
+
9
+ template <typename F>
10
+ __global__ void kernel_forward(const int B, const int T, const int C, const int H,
11
+ float *__restrict__ _state, const F *__restrict__ const _r, const F *__restrict__ const _w, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ const _a, const F *__restrict__ const _b,
12
+ F *__restrict__ const _y)
13
+ {
14
+ const int e = blockIdx.x / H;
15
+ const int h = blockIdx.x % H;
16
+ const int i = threadIdx.x;
17
+ _state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
18
+
19
+ float state[_N_];
20
+ #pragma unroll
21
+ for (int j = 0; j < _N_; j++)
22
+ state[j] = _state[j];
23
+
24
+ __shared__ float r[_N_], k[_N_], w[_N_], a[_N_], b[_N_];
25
+
26
+ for (int _t = 0; _t < T; _t++)
27
+ {
28
+ const int t = e*T*C + h*_N_ + i + _t * C;
29
+ __syncthreads();
30
+ r[i] = float(_r[t]);
31
+ w[i] = __expf(-__expf(float(_w[t])));
32
+ k[i] = float(_k[t]);
33
+ a[i] = float(_a[t]);
34
+ b[i] = float(_b[t]);
35
+ __syncthreads();
36
+
37
+ float sa = 0;
38
+ #pragma unroll
39
+ for (int j = 0; j < _N_; j++)
40
+ {
41
+ sa += a[j] * state[j];
42
+ }
43
+
44
+ float vv = float(_v[t]);
45
+ float y = 0;
46
+ #pragma unroll
47
+ for (int j = 0; j < _N_; j++)
48
+ {
49
+ float& s = state[j];
50
+ s = s * w[j] + k[j] * vv + sa * b[j];
51
+ y += s * r[j];
52
+ }
53
+ _y[t] = F(y);
54
+ }
55
+ #pragma unroll
56
+ for (int j = 0; j < _N_; j++)
57
+ _state[j] = state[j];
58
+ }
59
+
60
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16* w, bf16 *k, bf16 *v, bf16 *a, bf16 *b, bf16 *y)
61
+ {
62
+ assert(H*_N_ == C);
63
+ assert(B == 1); // only for B=1
64
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, w, k, v, a, b, y);
65
+ }
66
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16* w, fp16 *k, fp16 *v, fp16 *a, fp16 *b, fp16 *y)
67
+ {
68
+ assert(H*_N_ == C);
69
+ assert(B == 1); // only for B=1
70
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, w, k, v, a, b, y);
71
+ }
72
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32* w, fp32 *k, fp32 *v, fp32 *a, fp32 *b, fp32 *y)
73
+ {
74
+ assert(H*_N_ == C);
75
+ assert(B == 1); // only for B=1
76
+ kernel_forward<<<dim3(B * H), dim3(_N_)>>>(B, T, C, H, state, r, w, k, v, a, b, y);
77
+ }
rwkv_rocm/cuda/rwkv7_op.cpp ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+
4
+ typedef at::Half fp16;
5
+ typedef at::BFloat16 bf16;
6
+ typedef float fp32;
7
+
8
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *w, bf16 *k, bf16 *v, bf16 *a, bf16 *b, bf16 *y);
9
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *w, fp16 *k, fp16 *v, fp16 *a, fp16 *b, fp16 *y);
10
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *w, fp32 *k, fp32 *v, fp32 *a, fp32 *b, fp32 *y);
11
+
12
+ void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
13
+ cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), w.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), a.data_ptr<bf16>(), b.data_ptr<bf16>(), y.data_ptr<bf16>());
14
+ }
15
+ void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
16
+ cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), w.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), a.data_ptr<fp16>(), b.data_ptr<fp16>(), y.data_ptr<fp16>());
17
+ }
18
+ void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
19
+ cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), w.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), a.data_ptr<fp32>(), b.data_ptr<fp32>(), y.data_ptr<fp32>());
20
+ }
21
+
22
+ TORCH_LIBRARY(wkv7s, m) {
23
+ m.def("forward_bf16", forward_bf16);
24
+ m.def("forward_fp16", forward_fp16);
25
+ m.def("forward_fp32", forward_fp32);
26
+ }
rwkv_rocm/cuda/wrapper.cpp ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+ #include <iostream>
4
+ #include <c10/cuda/CUDAGuard.h>
5
+
6
+ typedef at::Half fp16;
7
+
8
+ template <typename F>
9
+ void cuda_wkv_forward(int B, int T, int C,
10
+ float *w, float *u, F *k, F *v, F *y,
11
+ float *aa, float *bb, float *pp);
12
+ template <typename F>
13
+ void cuda_mm8_seq(int B, int N, int M,
14
+ F *x, int x_stride,
15
+ uint8_t *w, int w_stride,
16
+ F *mx, F *rx,
17
+ F *my, F *ry,
18
+ F *y, int y_stride);
19
+ template <typename F>
20
+ void cuda_mm8_one(int N, int M,
21
+ F *x,
22
+ uint8_t *w, int w_stride,
23
+ F *mx, F *rx,
24
+ F *my, F *ry,
25
+ float *y);
26
+
27
+ void wkv_forward(int64_t B, int64_t T, int64_t C,
28
+ torch::Tensor &w, torch::Tensor &u,
29
+ torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
30
+ torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
31
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
32
+ switch (k.scalar_type()) {
33
+ case c10::ScalarType::Half:
34
+ cuda_wkv_forward(B, T, C,
35
+ w.data_ptr<float>(), u.data_ptr<float>(),
36
+ k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
37
+ aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
38
+ break;
39
+ case c10::ScalarType::Float:
40
+ cuda_wkv_forward(B, T, C,
41
+ w.data_ptr<float>(), u.data_ptr<float>(),
42
+ k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
43
+ aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
44
+ break;
45
+ default:
46
+ assert(false && "Only FP16 and FP32 are currently supported");
47
+ }
48
+ }
49
+
50
+ void mm8_seq(int64_t B, int64_t N, int64_t M,
51
+ torch::Tensor &x, torch::Tensor &w,
52
+ torch::Tensor &mx, torch::Tensor &rx,
53
+ torch::Tensor &my, torch::Tensor &ry,
54
+ torch::Tensor &y) {
55
+ assert(x.stride(1) == 1);
56
+ assert(w.stride(1) == 1);
57
+ assert(mx.stride(0) == 1 && rx.stride(0) == 1);
58
+ assert(my.stride(0) == 1 && ry.stride(0) == 1);
59
+ assert(y.stride(1) == 1);
60
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
61
+ switch (x.scalar_type()) {
62
+ case c10::ScalarType::Half:
63
+ cuda_mm8_seq(
64
+ B, N, M,
65
+ x.data_ptr<fp16>(), x.stride(0),
66
+ w.data_ptr<uint8_t>(), w.stride(0),
67
+ mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
68
+ my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
69
+ y.data_ptr<fp16>(), y.stride(0));
70
+ break;
71
+ case c10::ScalarType::Float:
72
+ cuda_mm8_seq(
73
+ B, N, M,
74
+ x.data_ptr<float>(), x.stride(0),
75
+ w.data_ptr<uint8_t>(), w.stride(0),
76
+ mx.data_ptr<float>(), rx.data_ptr<float>(),
77
+ my.data_ptr<float>(), ry.data_ptr<float>(),
78
+ y.data_ptr<float>(), y.stride(0));
79
+ break;
80
+ default:
81
+ assert(false && "Only FP16 and FP32 are currently supported");
82
+ }
83
+ }
84
+ void mm8_one(int64_t N, int64_t M,
85
+ torch::Tensor &x, torch::Tensor &w,
86
+ torch::Tensor &mx, torch::Tensor &rx,
87
+ torch::Tensor &my, torch::Tensor &ry,
88
+ torch::Tensor &y) {
89
+ assert(x.stride(0) == 1);
90
+ assert(w.stride(1) == 1);
91
+ assert(mx.stride(0) == 1 && rx.stride(0) == 1);
92
+ assert(my.stride(0) == 1 && ry.stride(0) == 1);
93
+ assert(y.stride(0) == 1);
94
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(w));
95
+ switch (x.scalar_type()) {
96
+ case c10::ScalarType::Half:
97
+ cuda_mm8_one(
98
+ N, M,
99
+ x.data_ptr<fp16>(),
100
+ w.data_ptr<uint8_t>(), w.stride(0),
101
+ mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
102
+ my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
103
+ y.data_ptr<float>());
104
+ break;
105
+ case c10::ScalarType::Float:
106
+ cuda_mm8_one(
107
+ N, M,
108
+ x.data_ptr<float>(),
109
+ w.data_ptr<uint8_t>(), w.stride(0),
110
+ mx.data_ptr<float>(), rx.data_ptr<float>(),
111
+ my.data_ptr<float>(), ry.data_ptr<float>(),
112
+ y.data_ptr<float>());
113
+ break;
114
+ default:
115
+ assert(false && "Only FP16 and FP32 are currently supported");
116
+ }
117
+ }
118
+
119
+ using torch::Tensor;
120
+
121
+ #ifndef DISABLE_CUBLAS_GEMM
122
+ void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
123
+ #endif
124
+
125
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
126
+ m.def("wkv_forward", &wkv_forward, "wkv forward");
127
+ m.def("mm8_seq", &mm8_seq, "mm8 seq");
128
+ m.def("mm8_one", &mm8_one, "mm8 one");
129
+ #ifndef DISABLE_CUBLAS_GEMM
130
+ m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
131
+ #endif
132
+ }
133
+
134
+ TORCH_LIBRARY(rwkv, m) {
135
+ m.def("wkv_forward", wkv_forward);
136
+ m.def("mm8_seq", mm8_seq);
137
+ m.def("mm8_one", mm8_one);
138
+ #ifndef DISABLE_CUBLAS_GEMM
139
+ m.def("gemm_fp16_cublas", gemm_fp16_cublas);
140
+ #endif
141
+ }
rwkv_rocm/hip/gemm_fp16_cublas.hip ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <hipblas/hipblas.h>
2
+ #include <hip/hip_runtime.h>
3
+ #include <hip/hip_fp16.h>
4
+ #include <hip/hip_runtime.h>
5
+ #include <torch/extension.h>
6
+ #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
7
+ #include <ATen/hip/HIPContext.h>
8
+
9
+ #define CUBLAS_CHECK(condition) \
10
+ for (hipblasStatus_t _cublas_check_status = (condition); \
11
+ _cublas_check_status != HIPBLAS_STATUS_SUCCESS;) \
12
+ throw std::runtime_error("cuBLAS error " + \
13
+ std::to_string(_cublas_check_status) + " at " + \
14
+ std::to_string(__LINE__));
15
+
16
+ #define CUDA_CHECK(condition) \
17
+ for (hipError_t _cuda_check_status = (condition); \
18
+ _cuda_check_status != hipSuccess;) \
19
+ throw std::runtime_error( \
20
+ "CUDA error " + std::string(hipGetErrorString(_cuda_check_status)) + \
21
+ " at " + std::to_string(__LINE__));
22
+
23
+ /*
24
+ NOTE: blas gemm is column-major by default, but we need row-major output.
25
+ The data of row-major, transposed matrix is exactly the same as the
26
+ column-major, non-transposed matrix, and C = A * B ---> C^T = B^T * A^T
27
+ */
28
+ void gemm_fp16_cublas(torch::Tensor a, torch::Tensor b, torch::Tensor c) {
29
+ const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(a));
30
+ const auto cuda_data_type = HIP_R_16F;
31
+ const auto cuda_c_data_type =
32
+ c.dtype() == torch::kFloat32 ? HIP_R_32F : HIP_R_16F;
33
+ const auto compute_type = HIPBLAS_COMPUTE_32F;
34
+ const float sp_alpha = 1.f;
35
+ // swap a and b, and use HIPBLAS_OP_N. see the notes above
36
+ std::swap(a, b);
37
+ const hipblasOperation_t cublas_trans_a = HIPBLAS_OP_N;
38
+ const hipblasOperation_t cublas_trans_b = HIPBLAS_OP_N;
39
+ // m = (B^T).size(0) = B.size(1), and = A.size(1) after swap,
40
+ // negative axis is used because of the existence of batch matmul.
41
+ const int m = a.size(-1);
42
+ const int k = a.size(-2);
43
+ const int n = b.size(-2);
44
+ const int cublas_lda = m;
45
+ const int cublas_ldb = k;
46
+ const int cublas_ldc = m;
47
+ hipblasHandle_t cublas_handle = at::cuda::getCurrentCUDABlasHandle();
48
+
49
+ hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
50
+ const float sp_beta = 0.f;
51
+ if (a.sizes().size() == 2 && b.sizes().size() == 2) {
52
+ CUBLAS_CHECK(hipblasGemmEx(
53
+ cublas_handle, cublas_trans_a, cublas_trans_b, m, n, k, &sp_alpha,
54
+ a.data_ptr(), cuda_data_type, cublas_lda, b.data_ptr(), cuda_data_type,
55
+ cublas_ldb, &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc,
56
+ compute_type, algo));
57
+ } else {
58
+ // batch matmul
59
+ assert(a.sizes().size() == 3 && b.sizes().size() == 3);
60
+
61
+ const long long int cublas_stride_a = m * k;
62
+ const long long int cublas_stride_b = k * n;
63
+ const long long int cublas_stride_c = m * n;
64
+ CUBLAS_CHECK(hipblasGemmStridedBatchedEx(
65
+ cublas_handle, cublas_trans_a, cublas_trans_b, m,
66
+ n, k, &sp_alpha, a.data_ptr(), cuda_data_type, cublas_lda,
67
+ cublas_stride_a, b.data_ptr(), cuda_data_type, cublas_ldb, cublas_stride_b,
68
+ &sp_beta, c.data_ptr(), cuda_c_data_type, cublas_ldc, cublas_stride_c,
69
+ a.size(0), compute_type, algo));
70
+ }
71
+ }
rwkv_rocm/hip/operators.hip ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "hip/hip_runtime.h"
2
+ #include <stdio.h>
3
+ #include <assert.h>
4
+ #include "ATen/ATen.h"
5
+ #include <hip/hip_fp16.h>
6
+ #define MIN_VALUE (-1e38)
7
+ typedef at::Half fp16;
8
+ __half *cast(fp16 *ptr) {
9
+ return reinterpret_cast<__half *>(ptr);
10
+ }
11
+
12
+ template <typename F>
13
+ __global__ void kernel_wkv_forward(const int B, const int T, const int C,
14
+ const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
15
+ F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
16
+ const int idx = blockIdx.x * blockDim.x + threadIdx.x;
17
+ const int _b = idx / C;
18
+ const int _c = idx % C;
19
+ const int _offset = _b * T * C + _c;
20
+ const int _state_offset = _b * C + _c;
21
+
22
+ float u = _u[_c];
23
+ float w = _w[_c];
24
+ const F *__restrict__ const k = _k + _offset;
25
+ const F *__restrict__ const v = _v + _offset;
26
+ F *__restrict__ const y = _y + _offset;
27
+
28
+ float aa = _aa[_state_offset];
29
+ float bb = _bb[_state_offset];
30
+ float pp = _pp[_state_offset];
31
+ for (int i = 0; i < T; i++) {
32
+ const int ii = i * C;
33
+ const float kk = float(k[ii]);
34
+ const float vv = float(v[ii]);
35
+ float ww = u + kk;
36
+ float p = max(pp, ww);
37
+ float e1 = exp(pp - p);
38
+ float e2 = exp(ww - p);
39
+ y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
40
+ ww = w + pp;
41
+ p = max(ww, kk);
42
+ e1 = exp(ww - p);
43
+ e2 = exp(kk - p);
44
+ aa = e1 * aa + e2 * vv;
45
+ bb = e1 * bb + e2;
46
+ pp = p;
47
+ }
48
+ _aa[_state_offset] = aa;
49
+ _bb[_state_offset] = bb;
50
+ _pp[_state_offset] = pp;
51
+ }
52
+
53
+ template <typename F>
54
+ void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
55
+ dim3 threadsPerBlock( min(C, 32) );
56
+ assert(B * C % threadsPerBlock.x == 0);
57
+ dim3 numBlocks(B * C / threadsPerBlock.x);
58
+ hipLaunchKernelGGL(( kernel_wkv_forward), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, B, T, C, w, u, k, v, y, aa, bb, pp);
59
+ }
60
+
61
+ template void cuda_wkv_forward<fp16>(
62
+ int B, int T, int C,
63
+ float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
64
+ float *aa, float *bb, float *pp);
65
+ template void cuda_wkv_forward<float>(
66
+ int B, int T, int C,
67
+ float *w, float *u, float *k, float *v, float *y,
68
+ float *aa, float *bb, float *pp);
69
+
70
+ __global__ void kernel_mm_seq_fp32i8(
71
+ const int B, const int N, const int M,
72
+ const float *__restrict__ const x, const int x_stride,
73
+ const uint8_t *__restrict__ const w, const int w_stride,
74
+ const float *__restrict__ const mx,
75
+ const float *__restrict__ const rx,
76
+ const float *__restrict__ const my,
77
+ const float *__restrict__ const ry,
78
+ float *__restrict__ const y, const int y_stride) {
79
+
80
+ const int i = blockIdx.x * blockDim.x + threadIdx.x;
81
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
82
+
83
+ if (i < B && k < M) {
84
+ float y_local = 0;
85
+ for (int j = 0; j < N; ++j) {
86
+ y_local += x[i * x_stride + j] * (
87
+ (float(w[j * w_stride + k]) + 0.5f)
88
+ * rx[k] * ry[j] + mx[k] + my[j]
89
+ );
90
+ }
91
+ y[i * y_stride + k] = y_local;
92
+ }
93
+ }
94
+
95
+ template <typename F>
96
+ void cuda_mm8_seq(int B, int N, int M,
97
+ F *x, int x_stride,
98
+ uint8_t *w, int w_stride,
99
+ F *mx, F *rx,
100
+ F *my, F *ry,
101
+ F *y, int y_stride);
102
+
103
+ template <>
104
+ void cuda_mm8_seq<float>(int B, int N, int M,
105
+ float *x, int x_stride,
106
+ uint8_t *w, int w_stride,
107
+ float *mx, float *rx,
108
+ float *my, float *ry,
109
+ float *y, int y_stride) {
110
+ dim3 blockSize(1, 128);
111
+ dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
112
+ hipLaunchKernelGGL(( kernel_mm_seq_fp32i8), dim3(gridSize), dim3(blockSize), 0, 0,
113
+ B, N, M, x, x_stride, w, w_stride,
114
+ mx, rx, my, ry, y, y_stride);
115
+ }
116
+
117
+ __global__ void kernel_mm_seq_fp16i8(
118
+ const int B, const int N, const int M,
119
+ const __half *__restrict__ const x, const int x_stride,
120
+ const uint8_t *__restrict__ const w, const int w_stride,
121
+ const __half *__restrict__ const mx,
122
+ const __half *__restrict__ const rx,
123
+ const __half *__restrict__ const my,
124
+ const __half *__restrict__ const ry,
125
+ __half *__restrict__ const y, const int y_stride) {
126
+
127
+ const int i = blockIdx.x * blockDim.x + threadIdx.x;
128
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
129
+
130
+ if (i < B && k < M) {
131
+ float y_local = 0;
132
+ for (int j = 0; j < N; ++j) {
133
+ y_local += __half2float(x[i * x_stride + j]) * (
134
+ (float(w[j * w_stride + k]) + 0.5f)
135
+ * __half2float(rx[k]) * __half2float(ry[j])
136
+ + __half2float(mx[k]) + __half2float(my[j])
137
+ );
138
+ }
139
+ y[i * y_stride + k] = __float2half(y_local);
140
+ }
141
+ }
142
+
143
+ template <>
144
+ void cuda_mm8_seq<fp16>(int B, int N, int M,
145
+ fp16 *x, int x_stride,
146
+ uint8_t *w, int w_stride,
147
+ fp16 *mx, fp16 *rx,
148
+ fp16 *my, fp16 *ry,
149
+ fp16 *y, int y_stride) {
150
+ dim3 blockSize(1, 128);
151
+ dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
152
+ hipLaunchKernelGGL(( kernel_mm_seq_fp16i8), dim3(gridSize), dim3(blockSize), 0, 0,
153
+ B, N, M, cast(x), x_stride, w, w_stride,
154
+ cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
155
+ }
156
+
157
+ #define MM8_ONE_JSPLIT 24
158
+ #define MM8_ONE_TILE 1024
159
+
160
+ __global__ void kernel_mm_one_fp32i8(
161
+ const int N, const int M,
162
+ const float *__restrict__ const x,
163
+ const uint8_t *__restrict__ const w, const int w_stride,
164
+ const float *__restrict__ const mx,
165
+ const float *__restrict__ const rx,
166
+ const float *__restrict__ const my,
167
+ const float *__restrict__ const ry,
168
+ float *__restrict__ const y) {
169
+
170
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
171
+ const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
172
+ const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
173
+
174
+ if (k < M) {
175
+ float y_local = 0;
176
+ for (int j = j0; j < j1; ++j) {
177
+ y_local += x[j] * (
178
+ (float(w[j * w_stride + k]) + 0.5f)
179
+ * rx[k] * ry[j] + mx[k] + my[j]
180
+ );
181
+ }
182
+ atomicAdd(&y[k], y_local);
183
+ }
184
+ }
185
+
186
+ template <typename F>
187
+ void cuda_mm8_one(int N, int M,
188
+ F *x,
189
+ uint8_t *w, int w_stride,
190
+ F *mx, F *rx,
191
+ F *my, F *ry,
192
+ float *y);
193
+
194
+ template <>
195
+ void cuda_mm8_one<float>(int N, int M,
196
+ float *x,
197
+ uint8_t *w, int w_stride,
198
+ float *mx, float *rx,
199
+ float *my, float *ry,
200
+ float *y) {
201
+ dim3 blockSize(1, MM8_ONE_TILE);
202
+ dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
203
+ hipLaunchKernelGGL(( kernel_mm_one_fp32i8), dim3(gridSize), dim3(blockSize), 0, 0,
204
+ N, M, x, w, w_stride,
205
+ mx, rx, my, ry, y);
206
+ }
207
+
208
+ __global__ void kernel_mm_one_fp16i8(
209
+ const int N, const int M,
210
+ const __half *__restrict__ const x,
211
+ const uint8_t *__restrict__ const w, const int w_stride,
212
+ const __half *__restrict__ const mx,
213
+ const __half *__restrict__ const rx,
214
+ const __half *__restrict__ const my,
215
+ const __half *__restrict__ const ry,
216
+ float *__restrict__ const y) {
217
+
218
+ const int k = blockIdx.y * blockDim.y + threadIdx.y;
219
+ const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
220
+ const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
221
+
222
+ if (k < M) {
223
+ float y_local = 0;
224
+ for (int j = j0; j < j1; ++j) {
225
+ y_local += __half2float(x[j]) * (
226
+ (float(w[j * w_stride + k]) + 0.5f)
227
+ * __half2float(rx[k]) * __half2float(ry[j])
228
+ + __half2float(mx[k]) + __half2float(my[j])
229
+ );
230
+ }
231
+ atomicAdd(&y[k], y_local);
232
+ }
233
+ }
234
+
235
+ template <>
236
+ void cuda_mm8_one<fp16>(int N, int M,
237
+ fp16 *x,
238
+ uint8_t *w, int w_stride,
239
+ fp16 *mx, fp16 *rx,
240
+ fp16 *my, fp16 *ry,
241
+ float *y) {
242
+ dim3 blockSize(1, MM8_ONE_TILE);
243
+ dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
244
+ hipLaunchKernelGGL(( kernel_mm_one_fp16i8), dim3(gridSize), dim3(blockSize), 0, 0,
245
+ N, M, cast(x), w, w_stride,
246
+ cast(mx), cast(rx), cast(my), cast(ry), y);
247
+ }
rwkv_rocm/hip/rwkv7.hip ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "hip/hip_runtime.h"
2
+ #include <stdio.h>
3
+ #include <assert.h>
4
+ #include "ATen/ATen.h"
5
+
6
+ typedef at::Half fp16;
7
+ typedef at::BFloat16 bf16;
8
+ typedef float fp32;
9
+
10
+ template <typename F>
11
+ __global__ void kernel_forward(const int B, const int T, const int C, const int H,
12
+ float *__restrict__ _state, const F *__restrict__ const _r, const F *__restrict__ const _w, const F *__restrict__ const _k, const F *__restrict__ const _v, const F *__restrict__ const _a, const F *__restrict__ const _b,
13
+ F *__restrict__ const _y)
14
+ {
15
+ const int e = blockIdx.x / H;
16
+ const int h = blockIdx.x % H;
17
+ const int i = threadIdx.x;
18
+ _state += h*_N_*_N_ + i*_N_; // wrong if B > 1 !!!
19
+
20
+ float state[_N_];
21
+ #pragma unroll
22
+ for (int j = 0; j < _N_; j++)
23
+ state[j] = _state[j];
24
+
25
+ __shared__ float r[_N_], k[_N_], w[_N_], a[_N_], b[_N_];
26
+
27
+ for (int _t = 0; _t < T; _t++)
28
+ {
29
+ const int t = e*T*C + h*_N_ + i + _t * C;
30
+ __syncthreads();
31
+ r[i] = float(_r[t]);
32
+ w[i] = __expf(-__expf(float(_w[t])));
33
+ k[i] = float(_k[t]);
34
+ a[i] = float(_a[t]);
35
+ b[i] = float(_b[t]);
36
+ __syncthreads();
37
+
38
+ float sa = 0;
39
+ #pragma unroll
40
+ for (int j = 0; j < _N_; j++)
41
+ {
42
+ sa += a[j] * state[j];
43
+ }
44
+
45
+ float vv = float(_v[t]);
46
+ float y = 0;
47
+ #pragma unroll
48
+ for (int j = 0; j < _N_; j++)
49
+ {
50
+ float& s = state[j];
51
+ s = s * w[j] + k[j] * vv + sa * b[j];
52
+ y += s * r[j];
53
+ }
54
+ _y[t] = F(y);
55
+ }
56
+ #pragma unroll
57
+ for (int j = 0; j < _N_; j++)
58
+ _state[j] = state[j];
59
+ }
60
+
61
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16* w, bf16 *k, bf16 *v, bf16 *a, bf16 *b, bf16 *y)
62
+ {
63
+ assert(H*_N_ == C);
64
+ assert(B == 1); // only for B=1
65
+ hipLaunchKernelGGL(( kernel_forward), dim3(dim3(B * H)), dim3(dim3(_N_)), 0, 0, B, T, C, H, state, r, w, k, v, a, b, y);
66
+ }
67
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16* w, fp16 *k, fp16 *v, fp16 *a, fp16 *b, fp16 *y)
68
+ {
69
+ assert(H*_N_ == C);
70
+ assert(B == 1); // only for B=1
71
+ hipLaunchKernelGGL(( kernel_forward), dim3(dim3(B * H)), dim3(dim3(_N_)), 0, 0, B, T, C, H, state, r, w, k, v, a, b, y);
72
+ }
73
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32* w, fp32 *k, fp32 *v, fp32 *a, fp32 *b, fp32 *y)
74
+ {
75
+ assert(H*_N_ == C);
76
+ assert(B == 1); // only for B=1
77
+ hipLaunchKernelGGL(( kernel_forward), dim3(dim3(B * H)), dim3(dim3(_N_)), 0, 0, B, T, C, H, state, r, w, k, v, a, b, y);
78
+ }
rwkv_rocm/hip/rwkv7_op.cpp ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+
4
+ typedef at::Half fp16;
5
+ typedef at::BFloat16 bf16;
6
+ typedef float fp32;
7
+
8
+ void cuda_forward_bf16(int B, int T, int C, int H, float *state, bf16 *r, bf16 *w, bf16 *k, bf16 *v, bf16 *a, bf16 *b, bf16 *y);
9
+ void cuda_forward_fp16(int B, int T, int C, int H, float *state, fp16 *r, fp16 *w, fp16 *k, fp16 *v, fp16 *a, fp16 *b, fp16 *y);
10
+ void cuda_forward_fp32(int B, int T, int C, int H, float *state, fp32 *r, fp32 *w, fp32 *k, fp32 *v, fp32 *a, fp32 *b, fp32 *y);
11
+
12
+ void forward_bf16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
13
+ cuda_forward_bf16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<bf16>(), w.data_ptr<bf16>(), k.data_ptr<bf16>(), v.data_ptr<bf16>(), a.data_ptr<bf16>(), b.data_ptr<bf16>(), y.data_ptr<bf16>());
14
+ }
15
+ void forward_fp16(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
16
+ cuda_forward_fp16(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp16>(), w.data_ptr<fp16>(), k.data_ptr<fp16>(), v.data_ptr<fp16>(), a.data_ptr<fp16>(), b.data_ptr<fp16>(), y.data_ptr<fp16>());
17
+ }
18
+ void forward_fp32(int64_t B, int64_t T, int64_t C, int64_t H, torch::Tensor &state, torch::Tensor &r, torch::Tensor &w, torch::Tensor &k, torch::Tensor &v, torch::Tensor &a, torch::Tensor &b, torch::Tensor &y) {
19
+ cuda_forward_fp32(B, T, C, H, state.data_ptr<float>(), r.data_ptr<fp32>(), w.data_ptr<fp32>(), k.data_ptr<fp32>(), v.data_ptr<fp32>(), a.data_ptr<fp32>(), b.data_ptr<fp32>(), y.data_ptr<fp32>());
20
+ }
21
+
22
+ TORCH_LIBRARY(wkv7s, m) {
23
+ m.def("forward_bf16", forward_bf16);
24
+ m.def("forward_fp16", forward_fp16);
25
+ m.def("forward_fp32", forward_fp32);
26
+ }
rwkv_rocm/hip/wrapper.cpp ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/ATen.h"
3
+ #include <iostream>
4
+ #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
5
+
6
+ typedef at::Half fp16;
7
+
8
+ template <typename F>
9
+ void cuda_wkv_forward(int B, int T, int C,
10
+ float *w, float *u, F *k, F *v, F *y,
11
+ float *aa, float *bb, float *pp);
12
+ template <typename F>
13
+ void cuda_mm8_seq(int B, int N, int M,
14
+ F *x, int x_stride,
15
+ uint8_t *w, int w_stride,
16
+ F *mx, F *rx,
17
+ F *my, F *ry,
18
+ F *y, int y_stride);
19
+ template <typename F>
20
+ void cuda_mm8_one(int N, int M,
21
+ F *x,
22
+ uint8_t *w, int w_stride,
23
+ F *mx, F *rx,
24
+ F *my, F *ry,
25
+ float *y);
26
+
27
+ void wkv_forward(int64_t B, int64_t T, int64_t C,
28
+ torch::Tensor &w, torch::Tensor &u,
29
+ torch::Tensor &k, torch::Tensor &v, torch::Tensor &y,
30
+ torch::Tensor &aa, torch::Tensor &bb, torch::Tensor &pp) {
31
+ const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(w));
32
+ switch (k.scalar_type()) {
33
+ case c10::ScalarType::Half:
34
+ cuda_wkv_forward(B, T, C,
35
+ w.data_ptr<float>(), u.data_ptr<float>(),
36
+ k.data_ptr<fp16>(), v.data_ptr<fp16>(), y.data_ptr<fp16>(),
37
+ aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
38
+ break;
39
+ case c10::ScalarType::Float:
40
+ cuda_wkv_forward(B, T, C,
41
+ w.data_ptr<float>(), u.data_ptr<float>(),
42
+ k.data_ptr<float>(), v.data_ptr<float>(), y.data_ptr<float>(),
43
+ aa.data_ptr<float>(), bb.data_ptr<float>(), pp.data_ptr<float>());
44
+ break;
45
+ default:
46
+ assert(false && "Only FP16 and FP32 are currently supported");
47
+ }
48
+ }
49
+
50
+ void mm8_seq(int64_t B, int64_t N, int64_t M,
51
+ torch::Tensor &x, torch::Tensor &w,
52
+ torch::Tensor &mx, torch::Tensor &rx,
53
+ torch::Tensor &my, torch::Tensor &ry,
54
+ torch::Tensor &y) {
55
+ assert(x.stride(1) == 1);
56
+ assert(w.stride(1) == 1);
57
+ assert(mx.stride(0) == 1 && rx.stride(0) == 1);
58
+ assert(my.stride(0) == 1 && ry.stride(0) == 1);
59
+ assert(y.stride(1) == 1);
60
+ const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(w));
61
+ switch (x.scalar_type()) {
62
+ case c10::ScalarType::Half:
63
+ cuda_mm8_seq(
64
+ B, N, M,
65
+ x.data_ptr<fp16>(), x.stride(0),
66
+ w.data_ptr<uint8_t>(), w.stride(0),
67
+ mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
68
+ my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
69
+ y.data_ptr<fp16>(), y.stride(0));
70
+ break;
71
+ case c10::ScalarType::Float:
72
+ cuda_mm8_seq(
73
+ B, N, M,
74
+ x.data_ptr<float>(), x.stride(0),
75
+ w.data_ptr<uint8_t>(), w.stride(0),
76
+ mx.data_ptr<float>(), rx.data_ptr<float>(),
77
+ my.data_ptr<float>(), ry.data_ptr<float>(),
78
+ y.data_ptr<float>(), y.stride(0));
79
+ break;
80
+ default:
81
+ assert(false && "Only FP16 and FP32 are currently supported");
82
+ }
83
+ }
84
+ void mm8_one(int64_t N, int64_t M,
85
+ torch::Tensor &x, torch::Tensor &w,
86
+ torch::Tensor &mx, torch::Tensor &rx,
87
+ torch::Tensor &my, torch::Tensor &ry,
88
+ torch::Tensor &y) {
89
+ assert(x.stride(0) == 1);
90
+ assert(w.stride(1) == 1);
91
+ assert(mx.stride(0) == 1 && rx.stride(0) == 1);
92
+ assert(my.stride(0) == 1 && ry.stride(0) == 1);
93
+ assert(y.stride(0) == 1);
94
+ const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(w));
95
+ switch (x.scalar_type()) {
96
+ case c10::ScalarType::Half:
97
+ cuda_mm8_one(
98
+ N, M,
99
+ x.data_ptr<fp16>(),
100
+ w.data_ptr<uint8_t>(), w.stride(0),
101
+ mx.data_ptr<fp16>(), rx.data_ptr<fp16>(),
102
+ my.data_ptr<fp16>(), ry.data_ptr<fp16>(),
103
+ y.data_ptr<float>());
104
+ break;
105
+ case c10::ScalarType::Float:
106
+ cuda_mm8_one(
107
+ N, M,
108
+ x.data_ptr<float>(),
109
+ w.data_ptr<uint8_t>(), w.stride(0),
110
+ mx.data_ptr<float>(), rx.data_ptr<float>(),
111
+ my.data_ptr<float>(), ry.data_ptr<float>(),
112
+ y.data_ptr<float>());
113
+ break;
114
+ default:
115
+ assert(false && "Only FP16 and FP32 are currently supported");
116
+ }
117
+ }
118
+
119
+ using torch::Tensor;
120
+
121
+ #ifndef DISABLE_CUBLAS_GEMM
122
+ void gemm_fp16_cublas(Tensor a, Tensor b, Tensor c);
123
+ #endif
124
+
125
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
126
+ m.def("wkv_forward", &wkv_forward, "wkv forward");
127
+ m.def("mm8_seq", &mm8_seq, "mm8 seq");
128
+ m.def("mm8_one", &mm8_one, "mm8 one");
129
+ #ifndef DISABLE_CUBLAS_GEMM
130
+ m.def("gemm_fp16_cublas", &gemm_fp16_cublas, "gemv fp16 cublas");
131
+ #endif
132
+ }
133
+
134
+ TORCH_LIBRARY(rwkv, m) {
135
+ m.def("wkv_forward", wkv_forward);
136
+ m.def("mm8_seq", mm8_seq);
137
+ m.def("mm8_one", mm8_one);
138
+ #ifndef DISABLE_CUBLAS_GEMM
139
+ m.def("gemm_fp16_cublas", gemm_fp16_cublas);
140
+ #endif
141
+ }
rwkv_rocm/model.py ADDED
@@ -0,0 +1,1530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ########################################################################################################
2
+ # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
3
+ ########################################################################################################
4
+
5
+ from typing import Optional
6
+ import types, gc, os, time, re, math
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+ torch.backends.cudnn.benchmark = True
11
+ torch.backends.cudnn.allow_tf32 = True
12
+ torch.backends.cuda.matmul.allow_tf32 = True
13
+ current_path = os.path.dirname(os.path.abspath(__file__))
14
+
15
+ ########################################################################################################
16
+
17
+ if os.environ.get('RWKV_JIT_ON') != '0':
18
+ os.environ["RWKV_JIT_ON"] = '1'
19
+ MyModule = torch.jit.ScriptModule
20
+ MyFunction = torch.jit.script_method
21
+ MyStatic = torch.jit.script
22
+ else:
23
+ MyModule = torch.nn.Module
24
+ def __nop(ob):
25
+ return ob
26
+ MyFunction = __nop
27
+ MyStatic = __nop
28
+
29
+ if os.environ.get('RWKV_CUDA_ON') == '1':
30
+ from torch.utils.cpp_extension import load
31
+ try:
32
+ load(
33
+ name=f"wkv_cuda",
34
+ sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu", f"{current_path}/cuda/gemm_fp16_cublas.cu"],
35
+ verbose=True,
36
+ extra_ldflags=["cublas.lib" if os.name == "nt" else ""],
37
+ extra_cuda_cflags=['-fopenmp', '-ffast-math', '-O3', '--offload-arch=gfx1030','-munsafe-fp-atomics'],
38
+ is_python_module=False)
39
+ DISABLE_CUBLAS_GEMM = False
40
+ except:
41
+ print("Failed to build cuBLAS matmul, falling back to torch.matmul. Small model with fp16 will overflow.")
42
+ load(
43
+ name=f"wkv_cuda",
44
+ sources=[f"{current_path}/cuda/wrapper.cpp", f"{current_path}/cuda/operators.cu"],
45
+ verbose=True,
46
+ extra_cuda_cflags=['-fopenmp', '-ffast-math', '-O3', '--offload-arch=gfx1030','-munsafe-fp-atomics'],
47
+ extra_cflags=["-DDISABLE_CUBLAS_GEMM"],
48
+ is_python_module=False)
49
+ DISABLE_CUBLAS_GEMM = True
50
+
51
+ @MyStatic
52
+ def cuda_wkv(T: int, C: int, w, u, k, v, aa, bb, pp):
53
+ assert 1 * C % min(C, 32) == 0
54
+ assert k.dtype == v.dtype == torch.float16 or k.dtype == v.dtype == torch.float32
55
+ assert w.dtype == u.dtype == aa.dtype == bb.dtype == pp.dtype == torch.float32
56
+ w = w.contiguous()
57
+ u = u.contiguous()
58
+ k = k.contiguous()
59
+ v = v.contiguous()
60
+ y = torch.empty((T, C), device=w.device, memory_format=torch.contiguous_format, dtype=k.dtype)
61
+ torch.ops.rwkv.wkv_forward(1, T, C, w, u, k, v, y, aa, bb, pp)
62
+ return y, aa, bb, pp
63
+ @MyStatic
64
+ def cuda_mm8_seq(B: int, N: int, M: int, x, w, mx, rx, my, ry):
65
+ assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
66
+ assert x.dtype == torch.float32 or x.dtype == torch.float16
67
+ assert w.dtype == torch.uint8
68
+ assert x.shape == (B, N)
69
+ assert w.shape == (N, M)
70
+ assert rx.shape == mx.shape == (M,)
71
+ assert ry.shape == my.shape == (N, 1)
72
+ y = torch.empty((B, M), device=w.device, dtype=x.dtype)
73
+ torch.ops.rwkv.mm8_seq(B, N, M, x, w, mx, rx, my, ry, y)
74
+ return y
75
+ @MyStatic
76
+ def cuda_mm8_one(N: int, M: int, x, w, mx, rx, my, ry):
77
+ assert x.dtype == mx.dtype == rx.dtype == my.dtype == ry.dtype
78
+ assert x.dtype == torch.float32 or x.dtype == torch.float16
79
+ assert w.dtype == torch.uint8
80
+ assert x.shape == (N,)
81
+ assert w.shape == (N, M)
82
+ assert rx.shape == mx.shape == (M,)
83
+ assert ry.shape == my.shape == (N, 1)
84
+ y = torch.zeros((M,), device=w.device, dtype=torch.float32)
85
+ torch.ops.rwkv.mm8_one(N, M, x, w, mx, rx, my, ry, y)
86
+ return y.to(dtype=x.dtype)
87
+ else:
88
+ os.environ["RWKV_CUDA_ON"] = '0'
89
+
90
+
91
+ @MyStatic
92
+ def torch_mm8_seq(x, w, mx, rx, my, ry):
93
+ return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
94
+
95
+ @MyStatic
96
+ def torch_mm8_one(x, w, mx, rx, my, ry):
97
+ return x @ ((w.to(dtype=x.dtype) + 0.5) * ry * rx + my + mx)
98
+
99
+ if os.environ.get('RWKV_CUDA_ON') == '1':
100
+ @MyStatic
101
+ def mm8_seq(x, w, mx, rx, my, ry):
102
+ if w.device.type == 'cuda' and x.dtype == torch.float16:
103
+ B, N, M = x.shape[0], w.shape[0], w.shape[1]
104
+ return cuda_mm8_seq(B, N, M, x, w, mx, rx, my, ry)
105
+ else:
106
+ return torch_mm8_seq(x, w, mx, rx, my, ry)
107
+ @MyStatic
108
+ def mm8_one(x, w, mx, rx, my, ry):
109
+ if w.device.type == 'cuda':
110
+ N, M = w.shape[0], w.shape[1]
111
+ return cuda_mm8_one(N, M, x, w, mx, rx, my, ry)
112
+ else:
113
+ return torch_mm8_one(x, w, mx, rx, my, ry)
114
+ else:
115
+ @MyStatic
116
+ def mm8_seq(x, w, mx, rx, my, ry):
117
+ return torch_mm8_seq(x, w, mx, rx, my, ry)
118
+ @MyStatic
119
+ def mm8_one(x, w, mx, rx, my, ry):
120
+ return torch_mm8_one(x, w, mx, rx, my, ry)
121
+
122
+ def mm8(x: torch.Tensor, w: torch.Tensor, mx: torch.Tensor, rx: torch.Tensor, my: torch.Tensor, ry: torch.Tensor):
123
+ if len(x.shape) == 1:
124
+ return mm8_one(x, w, mx, rx, my, ry)
125
+ return mm8_seq(x, w, mx, rx, my, ry)
126
+
127
+ def matmul(a, b, mx: Optional[torch.Tensor]=None, rx: Optional[torch.Tensor]=None, my: Optional[torch.Tensor]=None, ry: Optional[torch.Tensor]=None, output_dtype: Optional[torch.dtype]=None) -> torch.Tensor:
128
+ if output_dtype is None:
129
+ output_dtype = a.dtype
130
+ if b.dtype in [torch.float16, torch.bfloat16, torch.float32]:
131
+ assert a.dtype == b.dtype
132
+ return matmul_float(a, b, output_dtype=output_dtype)
133
+ elif b.dtype == torch.uint8:
134
+ assert mx is not None
135
+ assert rx is not None
136
+ assert my is not None
137
+ assert ry is not None
138
+ return mm8(a, b, mx, rx, my, ry).to(output_dtype)
139
+ else:
140
+ raise ValueError("Unsupported dtype")
141
+
142
+
143
+ if os.environ.get('RWKV_CUDA_ON') == '1' and not DISABLE_CUBLAS_GEMM:
144
+ def matmul_float(a, b, output_dtype: Optional[torch.dtype]=None):
145
+ if output_dtype is None:
146
+ output_dtype = a.dtype
147
+ if a.dtype == b.dtype == torch.float16 and a.device.type == 'cuda':
148
+ if len(a.shape) == 1:
149
+ assert len(b.shape) == 2
150
+ c = torch.empty((b.shape[-1],), dtype=output_dtype, device=a.device)
151
+ a = a.unsqueeze(0)
152
+ else:
153
+ assert len(a.shape) == len(b.shape)
154
+ assert len(a.shape) == 2 or len(a.shape) == 3
155
+ # torch.empty((*a.shape[:-1], b.shape[-1])) doesn't work with jit
156
+ if len(a.shape) == 2:
157
+ c = torch.empty((a.shape[0], b.shape[-1]), dtype=output_dtype, device=a.device)
158
+ else:
159
+ c = torch.empty((a.shape[0], a.shape[1], b.shape[-1]), dtype=output_dtype, device=a.device)
160
+ torch.ops.rwkv.gemm_fp16_cublas(a, b, c)
161
+ return c
162
+ else:
163
+ return (a @ b).to(output_dtype)
164
+
165
+ else:
166
+ def matmul_float(a, b, output_dtype: Optional[torch.dtype]=None):
167
+ return (a @ b).to(output_dtype)
168
+
169
+
170
+ if os.environ.get('RWKV_DML_ON') == '1':
171
+ import torch_directml
172
+ print("PyTorch with DirectML Enabled")
173
+
174
+ if os.environ.get('RWKV_V7_ON') == '1':
175
+
176
+ print(f'\n### RWKV-7 "Goose" enabled ###\n')
177
+
178
+ torch.backends.cudnn.benchmark = True
179
+ torch.backends.cudnn.allow_tf32 = True
180
+ torch.backends.cuda.matmul.allow_tf32 = True
181
+ # torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = True
182
+ # torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = True
183
+ torch._C._jit_set_autocast_mode(False)
184
+
185
+ MyModule = torch.jit.ScriptModule
186
+ MyFunction = torch.jit.script_method
187
+ MyStatic = torch.jit.script
188
+ from typing import List
189
+
190
+ DTYPE = None
191
+ DEVICE = None
192
+ HEAD_SIZE = 64
193
+
194
+ if os.environ.get('RWKV_CUDA_ON') == '1':
195
+ from torch.utils.cpp_extension import load
196
+ load(name="wkv7s", sources=[f"{current_path}/cuda/rwkv7_op.cpp", f"{current_path}/cuda/rwkv7.cu"], is_python_module=False,
197
+ verbose=True, extra_cuda_cflags=['-fopenmp', '-ffast-math', '-O3', '--offload-arch=gfx1030','-munsafe-fp-atomics', f"-D_N_={HEAD_SIZE}"])
198
+ class WKV_7(torch.autograd.Function):
199
+ @staticmethod
200
+ def forward(ctx, state, r, w, k, v, a, b):
201
+ with torch.no_grad():
202
+ T, C = r.size()
203
+ H = C // HEAD_SIZE
204
+ N = HEAD_SIZE
205
+ assert HEAD_SIZE == C // H
206
+ assert all(x.dtype == DTYPE for x in [r,w,k,v,a,b])
207
+ assert all(x.is_contiguous() for x in [r,w,k,v,a,b])
208
+ y = torch.empty((T, C), device=DEVICE, dtype=r.dtype, requires_grad=False, memory_format=torch.contiguous_format)
209
+
210
+ if DTYPE == torch.float16:
211
+ torch.ops.wkv7s.forward_fp16(1, T, C, H, state, r, w, k, v, a, b, y)
212
+ elif DTYPE == torch.bfloat16:
213
+ torch.ops.wkv7s.forward_bf16(1, T, C, H, state, r, w, k, v, a, b, y)
214
+ elif DTYPE == torch.float32:
215
+ torch.ops.wkv7s.forward_fp32(1, T, C, H, state, r, w, k, v, a, b, y)
216
+
217
+ return y
218
+ def RWKV7_OP(state, r, w, k, v, a, b):
219
+ return WKV_7.apply(state, r, w, k, v, a, b)
220
+
221
+ ########################################################################################################
222
+
223
+ class RWKV_x070(MyModule):
224
+ def __init__(self, model, strategy):
225
+ global DTYPE, DEVICE
226
+ super().__init__()
227
+ self.eval()
228
+ args = types.SimpleNamespace()
229
+ self.args = args
230
+ args.MODEL_NAME = model
231
+
232
+ print(f'Loading {model} ({strategy})\n')
233
+
234
+ ss = strategy.split(' ')
235
+ DEVICE = ss[0]
236
+ if ss[1] == 'fp16':
237
+ DTYPE = torch.half
238
+ elif ss[1] == 'fp32':
239
+ DTYPE = torch.float32
240
+ elif ss[1] == 'bf16':
241
+ DTYPE = torch.bfloat16
242
+ else:
243
+ assert False, "currently rwkv7 strategy must be: cuda/cpu fp16/fp32/bf16"
244
+
245
+ temp_z = torch.load(args.MODEL_NAME + '.pth', map_location='cpu', mmap=True)
246
+
247
+ self.n_head, self.head_size = temp_z['blocks.0.att.r_k'].shape
248
+ args.head_size = self.head_size
249
+ args.vocab_size, args.n_embd = temp_z['emb.weight'].shape
250
+
251
+ args.n_layer = 0
252
+ keys = list(temp_z.keys())
253
+ self.z = {}
254
+
255
+ for k in keys:
256
+ layer_id = int(k.split('.')[1]) if ('blocks.' in k) else 0
257
+ args.n_layer = max(args.n_layer, layer_id+1)
258
+ tensor = temp_z[k]
259
+ if 'key.weight' in k or 'value.weight' in k or 'receptance.weight' in k or 'output.weight' in k or 'head.weight' in k:
260
+ tensor = tensor.t().contiguous()
261
+ tensor = tensor.squeeze()
262
+ if k.endswith('att.r_k'):
263
+ tensor = tensor.flatten()
264
+ self.z[k] = tensor.to(DEVICE).to(DTYPE)
265
+ del temp_z[k]
266
+ if keys.index(k) % 5 == 0:
267
+ torch.cuda.empty_cache()
268
+
269
+ self.n_embd = args.n_embd
270
+ self.n_layer = args.n_layer
271
+
272
+ self.z['emb.weight'] = F.layer_norm(self.z['emb.weight'], (args.n_embd,), weight=self.z['blocks.0.ln0.weight'], bias=self.z['blocks.0.ln0.bias'])
273
+ self.z['blocks.0.att.v0'] = torch.empty(0, device=DEVICE, dtype=DTYPE) # actually ignored
274
+ self.z['blocks.0.att.v1'] = torch.empty(0, device=DEVICE, dtype=DTYPE) # actually ignored
275
+ self.z['blocks.0.att.v2'] = torch.empty(0, device=DEVICE, dtype=DTYPE) # actually ignored
276
+ torch.cuda.empty_cache()
277
+
278
+ def forward(self, idx, state, full_output=False):
279
+ if state == None:
280
+ state = [None for _ in range(self.args.n_layer * 3)]
281
+ for i in range(self.args.n_layer): # state: 0=att_x_prev 1=att_kv 2=ffn_x_prev
282
+ state[i*3+0] = torch.zeros(self.args.n_embd, dtype=DTYPE, requires_grad=False, device=DEVICE)
283
+ state[i*3+1] = torch.zeros((self.args.n_embd // self.args.head_size, self.args.head_size, self.args.head_size), dtype=torch.float, requires_grad=False, device=DEVICE)
284
+ state[i*3+2] = torch.zeros(self.args.n_embd, dtype=DTYPE, requires_grad=False, device=DEVICE)
285
+
286
+ if type(idx) is list:
287
+ if len(idx) > 1:
288
+ return self.forward_seq(idx, state, full_output)
289
+ else:
290
+ return self.forward_one(idx[0], state)
291
+ else:
292
+ return self.forward_one(idx, state)
293
+
294
+ @MyFunction
295
+ def forward_one(self, idx:int, state:List[torch.Tensor]):
296
+ with torch.no_grad():
297
+ z = self.z
298
+ x = z['emb.weight'][idx]
299
+
300
+ v_first = torch.empty_like(x)
301
+ for i in range(self.n_layer):
302
+ bbb = f'blocks.{i}.'
303
+ att = f'blocks.{i}.att.'
304
+ ffn = f'blocks.{i}.ffn.'
305
+
306
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln1.weight'], bias=z[bbb+'ln1.bias'])
307
+
308
+ xx, state[i*3+0], state[i*3+1], v_first = RWKV_x070_TMix_one(i, self.n_head, self.head_size, xx, state[i*3+0], v_first, state[i*3+1],
309
+ z[att+'x_r'], z[att+'x_w'], z[att+'x_k'], z[att+'x_v'], z[att+'x_a'], z[att+'x_g'],
310
+ z[att+'w0'], z[att+'w1'], z[att+'w2'], z[att+'a0'], z[att+'a1'], z[att+'a2'], z[att+'v0'], z[att+'v1'], z[att+'v2'],
311
+ z[att+'g1'], z[att+'g2'], z[att+'k_k'], z[att+'k_a'], z[att+'r_k'],
312
+ z[att+'receptance.weight'], z[att+'key.weight'], z[att+'value.weight'], z[att+'output.weight'],
313
+ z[att+'ln_x.weight'], z[att+'ln_x.bias'])
314
+ x = x + xx
315
+
316
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln2.weight'], bias=z[bbb+'ln2.bias'])
317
+
318
+ xx, state[i*3+2] = RWKV_x070_CMix_one(xx, state[i*3+2], z[ffn+'x_k'], z[ffn+'key.weight'], z[ffn+'value.weight'])
319
+ x = x + xx
320
+
321
+ # if math.isnan(torch.min(x).item()): print(idx, i)
322
+
323
+ x = F.layer_norm(x, (self.n_embd,), weight=z['ln_out.weight'], bias=z['ln_out.bias'])
324
+ x = x @ z['head.weight']
325
+ return x, state
326
+
327
+ @MyFunction
328
+ def forward_seq(self, idx:List[int], state:List[torch.Tensor], full_output:bool=False):
329
+ with torch.no_grad():
330
+ z = self.z
331
+ x = z['emb.weight'][idx]
332
+
333
+ v_first = torch.empty_like(x)
334
+ for i in range(self.n_layer):
335
+ bbb = f'blocks.{i}.'
336
+ att = f'blocks.{i}.att.'
337
+ ffn = f'blocks.{i}.ffn.'
338
+
339
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln1.weight'], bias=z[bbb+'ln1.bias'])
340
+
341
+ xx, state[i*3+0], state[i*3+1], v_first = RWKV_x070_TMix_seq(i, self.n_head, self.head_size, xx, state[i*3+0], v_first, state[i*3+1],
342
+ z[att+'x_r'], z[att+'x_w'], z[att+'x_k'], z[att+'x_v'], z[att+'x_a'], z[att+'x_g'],
343
+ z[att+'w0'], z[att+'w1'], z[att+'w2'], z[att+'a0'], z[att+'a1'], z[att+'a2'], z[att+'v0'], z[att+'v1'], z[att+'v2'],
344
+ z[att+'g1'], z[att+'g2'], z[att+'k_k'], z[att+'k_a'], z[att+'r_k'],
345
+ z[att+'receptance.weight'], z[att+'key.weight'], z[att+'value.weight'], z[att+'output.weight'],
346
+ z[att+'ln_x.weight'], z[att+'ln_x.bias'])
347
+ x = x + xx
348
+
349
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln2.weight'], bias=z[bbb+'ln2.bias'])
350
+
351
+ xx, state[i*3+2] = RWKV_x070_CMix_seq(xx, state[i*3+2], z[ffn+'x_k'], z[ffn+'key.weight'], z[ffn+'value.weight'])
352
+ x = x + xx
353
+
354
+ if not full_output: x = x[-1,:]
355
+ x = F.layer_norm(x, (self.n_embd,), weight=z['ln_out.weight'], bias=z['ln_out.bias'])
356
+ x = x @ z['head.weight']
357
+ return x, state
358
+
359
+ ########################################################################################################
360
+
361
+ @MyStatic
362
+ def RWKV_x070_TMix_one(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
363
+ xx = x_prev - x
364
+ xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
365
+
366
+ r = xr @ R_
367
+ w = torch.tanh(xw @ w1) @ w2
368
+ k = xk @ K_
369
+ v = xv @ V_
370
+ a = torch.sigmoid(a0 + (xa @ a1) @ a2)
371
+ g = torch.sigmoid(xg @ g1) @ g2
372
+
373
+ kk = torch.nn.functional.normalize((k * k_k).view(H,N), dim=-1, p=2.0).view(H*N)
374
+ k = k * (1 + (a-1) * k_a)
375
+ if layer_id == 0: v_first = v
376
+ else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
377
+ w = torch.exp(-0.606531 * torch.sigmoid((w0 + w).float())) # 0.606531 = exp(-0.5)
378
+
379
+ vk = v.view(H,N,1) @ k.view(H,1,N)
380
+ ab = (-kk).view(H,N,1) @ (kk*a).view(H,1,N)
381
+ state = state * w.view(H,1,N) + state @ ab.float() + vk.float()
382
+ xx = (state.to(dtype=x.dtype) @ r.view(H,N,1))
383
+
384
+ xx = torch.nn.functional.group_norm(xx.view(1,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(H*N)
385
+ xx = xx + ((r * k * r_k).view(H,N).sum(dim=-1, keepdim=True) * v.view(H,N)).view(H*N)
386
+ return (xx * g) @ O_, x, state, v_first
387
+
388
+ if os.environ.get('RWKV_CUDA_ON') == '1':
389
+ @MyStatic
390
+ def RWKV_x070_TMix_seq(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
391
+ T = x.shape[0]
392
+ xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
393
+ xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
394
+
395
+ r = xr @ R_
396
+ w = torch.tanh(xw @ w1) @ w2
397
+ k = xk @ K_
398
+ v = xv @ V_
399
+ a = torch.sigmoid(a0 + (xa @ a1) @ a2)
400
+ g = torch.sigmoid(xg @ g1) @ g2
401
+
402
+ kk = torch.nn.functional.normalize((k * k_k).view(T,H,N), dim=-1, p=2.0).view(T,H*N)
403
+ k = k * (1 + (a-1) * k_a)
404
+ if layer_id == 0: v_first = v
405
+ else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
406
+
407
+ w = -torch.nn.functional.softplus(-(w0 + w)) - 0.5
408
+ xx = RWKV7_OP(state, r, w, k, v, -kk, kk*a)
409
+
410
+ xx = torch.nn.functional.group_norm(xx.view(T,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(T,H*N)
411
+ xx = xx + ((r * k * r_k).view(T,H,N).sum(dim=-1, keepdim=True) * v.view(T,H,N)).view(T,H*N)
412
+ return (xx * g) @ O_, x[-1,:], state, v_first
413
+ else:
414
+ @MyStatic
415
+ def RWKV_x070_TMix_seq(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
416
+ T = x.shape[0]
417
+ xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
418
+ xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
419
+
420
+ r = xr @ R_
421
+ w = torch.tanh(xw @ w1) @ w2
422
+ k = xk @ K_
423
+ v = xv @ V_
424
+ a = torch.sigmoid(a0 + (xa @ a1) @ a2)
425
+ g = torch.sigmoid(xg @ g1) @ g2
426
+
427
+ kk = torch.nn.functional.normalize((k * k_k).view(T,H,N), dim=-1, p=2.0).view(T,H*N)
428
+ k = k * (1 + (a-1) * k_a)
429
+ if layer_id == 0: v_first = v
430
+ else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
431
+
432
+ w = torch.exp(-0.606531 * torch.sigmoid((w0 + w).float())) # 0.606531 = exp(-0.5)
433
+ for t in range(T):
434
+ r_, w_, k_, v_, kk_, a_ = r[t], w[t], k[t], v[t], kk[t], a[t]
435
+ vk = v_.view(H,N,1) @ k_.view(H,1,N)
436
+ ab = (-kk_).view(H,N,1) @ (kk_*a_).view(H,1,N)
437
+ state = state * w_.view(H,1,N) + state @ ab.float() + vk.float()
438
+ xx[t] = (state.to(dtype=x.dtype) @ r_.view(H,N,1)).view(H*N)
439
+
440
+ xx = torch.nn.functional.group_norm(xx.view(T,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(T,H*N)
441
+ xx = xx + ((r * k * r_k).view(T,H,N).sum(dim=-1, keepdim=True) * v.view(T,H,N)).view(T,H*N)
442
+ return (xx * g) @ O_, x[-1,:], state, v_first
443
+
444
+ ########################################################################################################
445
+
446
+ @MyStatic
447
+ def RWKV_x070_CMix_one(x, x_prev, x_k, K_, V_):
448
+ xx = x_prev - x
449
+ k = x + xx * x_k
450
+ k = torch.relu(k @ K_) ** 2
451
+ return k @ V_, x
452
+
453
+ @MyStatic
454
+ def RWKV_x070_CMix_seq(x, x_prev, x_k, K_, V_):
455
+ xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
456
+ k = x + xx * x_k
457
+ k = torch.relu(k @ K_) ** 2
458
+ return k @ V_, x[-1,:]
459
+
460
+ ########################################################################################################
461
+
462
+ class RWKV(MyModule):
463
+ def __init__(self, model, strategy, verbose = True, convert_and_save_and_exit = None):
464
+ super().__init__()
465
+ if verbose:
466
+ prxxx = lambda *args, **kwargs: print(*args, **kwargs)
467
+ else:
468
+ prxxx = lambda *args, **kwargs: None
469
+
470
+ STRATEGY_REGEX = r"^(?:(?:^|->) *(?:cuda(?::[\d]+)?|cpu|mps|dml) (?:fp(?:16|32)|bf16)(?:i8|i4|i3)?(?: \*[\d]+\+?)? *)+$"
471
+ if not re.match(STRATEGY_REGEX, strategy):
472
+ raise ValueError("Invalid strategy. Please read https://pypi.org/project/rwkv/")
473
+
474
+ strategy = ('->'.join([x.strip() for x in strategy.split('->')])).replace('->', ' -> ')
475
+ self.args = types.SimpleNamespace()
476
+ args = self.args
477
+ args.MODEL_NAME = model
478
+ args.strategy_string = strategy
479
+
480
+ # Rescale for fp16 mode: set x = x/2 every X layer (to avoid fp16 overflow)
481
+ try:
482
+ self.RESCALE_LAYER = int(os.environ["RWKV_RESCALE_LAYER"]) # !!! NOTE: SEEMS YOU SHOULD SET IT TO 999 (disable) FOR RWKV-MUSIC MODELS !!!
483
+ except:
484
+ self.RESCALE_LAYER = 6 if 'fp16' in strategy else 0
485
+ prxxx(f'RWKV_JIT_ON {os.environ["RWKV_JIT_ON"]} RWKV_CUDA_ON {os.environ["RWKV_CUDA_ON"]} RESCALE_LAYER {self.RESCALE_LAYER}\n')
486
+
487
+ args.MODEL_NAME = args.MODEL_NAME.strip()
488
+ if not args.MODEL_NAME.endswith('.pth'):
489
+ args.MODEL_NAME += '.pth'
490
+ prxxx(f'Loading {args.MODEL_NAME} ...')
491
+ with torch.no_grad():
492
+ self.w = torch.load(args.MODEL_NAME, map_location='cpu') # load model to CPU first
493
+ gc.collect()
494
+ w = self.w
495
+
496
+ ALREADY_CONVERTED = False
497
+ if '_strategy' in w:
498
+ ALREADY_CONVERTED = True
499
+ assert convert_and_save_and_exit == None # you should only convert a raw model
500
+ prxxx(f"Converted model: strategy {w['_strategy']}, version {w['_version']}\n")
501
+ assert w['_strategy'] == args.strategy_string # if you are using a new strategy, re-convert the model
502
+ assert float(w['_version']) >= 0.7 # sometimes you should re-convert using latest convert_model.py
503
+ assert w['_rescale_layer'] == self.RESCALE_LAYER # must use same RESCALE_LAYER to avoid mistakes
504
+ del w['_strategy']
505
+ del w['_version']
506
+ del w['_rescale_layer']
507
+
508
+ args.n_embd = w['emb.weight'].shape[1]
509
+ args.n_att = w['blocks.0.att.key.weight'].shape[0] # note: transposed matrix
510
+ args.n_ffn = w['blocks.0.ffn.key.weight'].shape[0] # note: transposed matrix
511
+ args.n_layer = 0
512
+ keys = list(w.keys())
513
+ self.version = 4
514
+ for x in keys:
515
+ layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
516
+ args.n_layer = max(args.n_layer, layer_id+1)
517
+ if 'ln_x' in x:
518
+ self.version = max(5, self.version)
519
+ if 'gate.weight' in x:
520
+ self.version = max(5.1, self.version)
521
+ if int(self.version) == 5 and 'att.time_decay' in x:
522
+ args.n_head = w[x].shape[0]
523
+ if len(w[x].shape) > 1:
524
+ if w[x].shape[1] > 1:
525
+ self.version = max(5.2, self.version)
526
+ if 'time_maa' in x:
527
+ self.version = max(6, self.version)
528
+ if int(self.version) == 6 and 'time_faaaa' in x:
529
+ args.n_head = w[x].shape[0]
530
+ prxxx(f'Model detected: v{self.version:.1f}')
531
+
532
+ ####################### Compute strategy
533
+
534
+ s = [x.strip().split(' ') for x in strategy.split('->')]
535
+ plan = [0] * len(s)
536
+ stream_i = -1
537
+ stream_count = 0
538
+ to_allocate = args.n_layer + 1
539
+ allocated = 0
540
+ free_slots = 0
541
+ for i in range(len(s)):
542
+ si = s[i]
543
+ si1 = si[1]
544
+ if si1.startswith('fp32'): si[1] = [torch.float]
545
+ elif si1.startswith('fp16'): si[1] = [torch.float16]
546
+ elif si1.startswith('bf16'): si[1] = [torch.bfloat16]
547
+ if si1.endswith('i8'): si[1] += [torch.uint8]
548
+ else: si[1] += [si[1][0]]
549
+ if len(si) > 2:
550
+ ss = si[2]
551
+ assert ss.startswith('*')
552
+ if ss.endswith('+'):
553
+ plan[i] = int(ss[1:-1])
554
+ stream_i = i
555
+ else:
556
+ plan[i] = int(ss[1:])
557
+ allocated += plan[i]
558
+ if allocated >= to_allocate:
559
+ plan[i] += to_allocate - allocated
560
+ break
561
+ else:
562
+ free_slots += 1
563
+ if stream_i < 0:
564
+ if free_slots > 0 and to_allocate > allocated:
565
+ for i in range(len(s)):
566
+ if plan[i] == 0:
567
+ plan[i] = (to_allocate - allocated) // free_slots
568
+ allocated += plan[i]
569
+ free_slots -= 1
570
+ if to_allocate > allocated:
571
+ plan[len(s)-1] += to_allocate - allocated
572
+ else:
573
+ if to_allocate > allocated:
574
+ stream_count = to_allocate - allocated
575
+ plan[stream_i] += stream_count
576
+ prxxx(f'Strategy: (total {args.n_layer}+1={args.n_layer+1} layers)')
577
+ for i in range(len(s)):
578
+ ss = s[i]
579
+ if i != stream_i:
580
+ prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]} layers')
581
+ else:
582
+ prxxx(f'* {ss[0]} {str(ss[1]).replace("torch.","")}, store {plan[i]-stream_count} layers, stream {stream_count} layers')
583
+ plan[i] += (0 if i == 0 else plan[i-1])
584
+ self.strategy = [None] * (args.n_layer + 1)
585
+ strategy = self.strategy
586
+ for n in range(args.n_layer + 1):
587
+ for i in range(len(s)):
588
+ if n < plan[i]:
589
+ strategy[n] = types.SimpleNamespace()
590
+ strategy[n].device = s[i][0]
591
+ strategy[n].atype = s[i][1][0]
592
+ strategy[n].wtype = s[i][1][1]
593
+ strategy[n].stream = False
594
+ if strategy[n].device == 'dml':
595
+ strategy[n].device = torch_directml.device()
596
+ if i == stream_i and n >= (plan[i] - stream_count):
597
+ strategy[n].stream = True
598
+ break
599
+ prxxx(f"{n}-{strategy[n].device}-{str(strategy[n].atype).replace('torch.','')}-{str(strategy[n].wtype).replace('torch.','')}{'-stream' if strategy[n].stream else ''}",end=' ')
600
+ prxxx()
601
+
602
+ ####################### Load weights to self.w
603
+
604
+ if not ALREADY_CONVERTED:
605
+ try: # precompute embedding
606
+ w['emb.weight'] = F.layer_norm(w['emb.weight'], (args.n_embd,), weight=w['blocks.0.ln0.weight'], bias=w['blocks.0.ln0.bias'])
607
+ except:
608
+ w['emb.weight'] = F.layer_norm(w['emb.weight'].float(), (args.n_embd,), weight=w['blocks.0.ln0.weight'].float(), bias=w['blocks.0.ln0.bias'].float())
609
+ del w['blocks.0.ln0.weight']
610
+ del w['blocks.0.ln0.bias']
611
+
612
+ print_need_newline = False
613
+
614
+ REAL_TIME_FIRST = False
615
+ args.time_state = False
616
+ for x in list(w.keys()):
617
+ if '.time_faaaa' in x: REAL_TIME_FIRST = True
618
+ if '.time_state' in x: args.time_state = True
619
+ if REAL_TIME_FIRST:
620
+ w = {k.replace('.time_faaaa','.time_first') if '.time_faaaa' in k else k: v for k, v in w.items()}
621
+ self.w = w
622
+
623
+ keys = list(w.keys())
624
+ for x in keys:
625
+ w[x].requires_grad = False
626
+ layer_id = int(x.split('.')[1]) if ('blocks.' in x) else 0
627
+ if ('ln_out.' in x) or ('head.' in x):
628
+ layer_id = args.n_layer
629
+ dd = strategy[layer_id]
630
+ DEVICE = dd.device
631
+ ATYPE = dd.atype
632
+ WTYPE = dd.wtype
633
+
634
+ if not ALREADY_CONVERTED:
635
+ if self.RESCALE_LAYER > 0:
636
+ if 'att.output.weight' in x:
637
+ w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
638
+ if 'ffn.value.weight' in x:
639
+ w[x] = w[x] / (2 ** int(layer_id // self.RESCALE_LAYER))
640
+
641
+ if '.time_' in x:
642
+ w[x] = w[x].squeeze()
643
+ if 'key.weight' in x or 'value.weight' in x or 'receptance.weight' in x or 'gate.weight' in x or 'output.weight' in x or 'head.weight' in x:
644
+ w[x] = w[x].t()
645
+
646
+ if '.time_decay' in x and '_w' not in x: # need fp32 for this
647
+ if self.version == 4:
648
+ w[x] = -torch.exp(w[x].float())
649
+ elif int(self.version) == 5:
650
+ w[x] = torch.exp(-torch.exp(w[x].float())).reshape(-1,1,1)
651
+ if self.version == 5.2:
652
+ w[x] = w[x].reshape(args.n_head, -1, 1)
653
+ elif self.version == 6.0:
654
+ w[x] = w[x].float().reshape(args.n_head, -1, 1)
655
+ elif '.time_first' in x: # need fp32 for this
656
+ if self.version == 4:
657
+ w[x] = w[x].float()
658
+ elif int(self.version) in [5, 6]:
659
+ if REAL_TIME_FIRST:
660
+ w[x] = w[x].float().reshape(-1,1,1)
661
+ else:
662
+ w[x] = torch.exp(w[x].float()).reshape(-1,1,1)
663
+ if self.version in [5.2, 6.0]:
664
+ w[x] = w[x].reshape(args.n_head, -1, 1)
665
+ elif '.ln_x' in x: # need fp32 for group_norm
666
+ w[x] = w[x].float()
667
+ else:
668
+ if (len(w[x].shape) == 2) and ('emb' not in x) and ('_w1' not in x) and ('_w2' not in x):
669
+ if WTYPE != torch.uint8:
670
+ w[x] = w[x].to(dtype=WTYPE)
671
+ else:
672
+ w[x] = w[x].float()
673
+
674
+ if w[x].shape[0] > w[x].shape[1]:
675
+ w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
676
+ w[x] = w[x] - w[x+'_my']
677
+ w[x+'_mx'] = torch.amin(w[x], dim=0)
678
+ w[x] = w[x] - w[x+'_mx']
679
+ w[x+'_rx'] = torch.amax(w[x], dim=0)
680
+ w[x] = w[x] / w[x+'_rx']
681
+ w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
682
+ w[x] = w[x] / w[x+'_ry']
683
+ else:
684
+ w[x+'_mx'] = torch.amin(w[x], dim=0)
685
+ w[x] = w[x] - w[x+'_mx']
686
+ w[x+'_my'] = torch.amin(w[x], dim=1).unsqueeze(1)
687
+ w[x] = w[x] - w[x+'_my']
688
+ w[x+'_rx'] = torch.amax(w[x], dim=0)
689
+ w[x] = w[x] / w[x+'_rx']
690
+ w[x+'_ry'] = torch.amax(w[x], dim=1).unsqueeze(1)
691
+ w[x] = w[x] / w[x+'_ry']
692
+
693
+ w[x] = torch.clip(torch.floor(w[x] * 256), min=0, max=255).to(dtype=torch.uint8)
694
+ w[x+'_mx'] = w[x+'_mx'].to(dtype=ATYPE).contiguous()
695
+ w[x+'_rx'] = (w[x+'_rx'] / 16).to(dtype=ATYPE).contiguous()
696
+ w[x+'_my'] = w[x+'_my'].to(dtype=ATYPE).contiguous()
697
+ w[x+'_ry'] = (w[x+'_ry'] / 16).to(dtype=ATYPE).contiguous()
698
+ else:
699
+ w[x] = w[x].to(dtype=ATYPE)
700
+
701
+ if convert_and_save_and_exit == None:
702
+ if 'emb.' in x:
703
+ w[x] = w[x].contiguous()
704
+ elif (dd.stream) and (x.endswith('key.weight') or x.endswith('value.weight') or x.endswith('receptance.weight') or x.endswith('output.weight')):
705
+ try:
706
+ w[x] = w[x].contiguous().pin_memory() # if you see "CUDA error: out of memory" here, that's out of CPU RAM, not VRAM. Get more RAM :)
707
+ except:
708
+ print('Note: You are running out of RAM. Get more CPU RAM. Now this will run much slower.')
709
+ elif DEVICE != 'cpu':
710
+ w[x] = w[x].to(device=DEVICE).contiguous()
711
+
712
+ if (dd.stream) or (DEVICE != 'cpu'):
713
+ try:
714
+ w[x+'_mx'] = w[x+'_mx'].to(device=DEVICE).contiguous()
715
+ w[x+'_rx'] = w[x+'_rx'].to(device=DEVICE).contiguous()
716
+ w[x+'_my'] = w[x+'_my'].to(device=DEVICE).contiguous()
717
+ w[x+'_ry'] = w[x+'_ry'].to(device=DEVICE).contiguous()
718
+ except:
719
+ pass
720
+
721
+ if 'ffn.value.weight' in x:
722
+ gc.collect()
723
+ if 'cuda' in args.strategy_string:
724
+ torch.cuda.empty_cache()
725
+
726
+ shape = [i for i in w[x].shape if i != 1]
727
+ if len(shape) > 2:
728
+ shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)} {str(shape[2]).rjust(5)}"
729
+ elif len(shape) > 1:
730
+ shape = f" {str(shape[0]).rjust(5)} {str(shape[1]).rjust(5)} "
731
+ else:
732
+ shape = f" {str(shape[0]).rjust(5)} "
733
+ if layer_id == 0 or layer_id >= args.n_layer-1:
734
+ if print_need_newline:
735
+ prxxx('\n', end = '')
736
+ print_need_newline = False
737
+ dt = str(w[x].dtype).replace('torch.', '')
738
+ dt = dt.replace('float32', 'f32').replace('bfloat16', 'bf16').replace('float16', 'f16').replace('uint8', 'i8')
739
+ prxxx(x.ljust(32), dt.rjust(4), str(w[x].device).rjust(8), shape, ' (pinned)' if w[x].is_pinned() else '')
740
+ else:
741
+ print_need_newline = True
742
+ prxxx('.', end = '', flush = True)
743
+
744
+ if convert_and_save_and_exit:
745
+ w['_strategy'] = args.strategy_string
746
+ w['_rescale_layer'] = self.RESCALE_LAYER
747
+ w['_version'] = '0.7'
748
+ if not convert_and_save_and_exit.endswith('.pth'):
749
+ convert_and_save_and_exit += '.pth'
750
+ prxxx(f'Saving to {convert_and_save_and_exit}...')
751
+ torch.save(w, convert_and_save_and_exit)
752
+ prxxx(f'Converted and saved. Now this will exit.')
753
+ exit(0)
754
+
755
+ if self.version == 5.2 and os.environ["RWKV_CUDA_ON"] == '1':
756
+ HEAD_SIZE = args.n_att // args.n_head
757
+ rwkv5 = load(name="rwkv5", sources=[f"{current_path}/cuda/rwkv5_op.cpp", f"{current_path}/cuda/rwkv5.cu"],
758
+ verbose=True, extra_cuda_cflags=['-fopenmp', '-ffast-math', '-O3', '--offload-arch=gfx1030','-munsafe-fp-atomics' if os.name != "nt" else "", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"])
759
+
760
+ class RWKV_5(torch.autograd.Function):
761
+ @staticmethod
762
+ def forward(ctx, B, T, C, H, state, r, k, v, w, u):
763
+ with torch.no_grad():
764
+ assert HEAD_SIZE == C // H
765
+ ctx.B = B
766
+ ctx.T = T
767
+ ctx.C = C
768
+ ctx.H = H
769
+ assert state.dtype == torch.float32
770
+ assert w.dtype == torch.float32
771
+ assert r.is_contiguous()
772
+ assert k.is_contiguous()
773
+ assert v.is_contiguous()
774
+ assert w.is_contiguous()
775
+ assert u.is_contiguous()
776
+ assert state.is_contiguous()
777
+
778
+ y = torch.empty((B, T, C), device=w.device, dtype=r.dtype, memory_format=torch.contiguous_format)
779
+ if r.dtype == torch.bfloat16:
780
+ rwkv5.forward_bf16(B, T, C, H, state, r, k, v, w, u, y)
781
+ elif r.dtype == torch.float16:
782
+ rwkv5.forward_fp16(B, T, C, H, state, r, k, v, w, u, y)
783
+ elif r.dtype == torch.float32:
784
+ rwkv5.forward_fp32(B, T, C, H, state, r, k, v, w, u, y)
785
+ return y, state
786
+ self.RWKV_5 = RWKV_5
787
+
788
+ if self.version == 6.0 and os.environ["RWKV_CUDA_ON"] == '1':
789
+ HEAD_SIZE = args.n_att // args.n_head
790
+ rwkv6 = load(name="rwkv6", sources=[f"{current_path}/cuda/rwkv6_op.cpp", f"{current_path}/cuda/rwkv6.cu"],
791
+ verbose=True, extra_cuda_cflags=['-fopenmp', '-ffast-math', '-O3', '--offload-arch=gfx1030','-munsafe-fp-atomics' if os.name != "nt" else "", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={4096}"])
792
+
793
+ class RWKV_6(torch.autograd.Function):
794
+ @staticmethod
795
+ def forward(ctx, B, T, C, H, state, r, k, v, w, u):
796
+ with torch.no_grad():
797
+ assert HEAD_SIZE == C // H
798
+ ctx.B = B
799
+ ctx.T = T
800
+ ctx.C = C
801
+ ctx.H = H
802
+ assert state.dtype == torch.float32
803
+ assert w.dtype == torch.float32
804
+ assert r.is_contiguous()
805
+ assert k.is_contiguous()
806
+ assert v.is_contiguous()
807
+ assert w.is_contiguous()
808
+ assert u.is_contiguous()
809
+ eew = torch.exp(-torch.exp(w.float())).contiguous()
810
+
811
+ y = torch.empty((B, T, C), device=w.device, dtype=r.dtype, memory_format=torch.contiguous_format)
812
+ if r.dtype == torch.bfloat16:
813
+ rwkv6.forward_bf16(B, T, C, H, state, r, k, v, eew, u, y)
814
+ elif r.dtype == torch.float16:
815
+ rwkv6.forward_fp16(B, T, C, H, state, r, k, v, eew, u, y)
816
+ elif r.dtype == torch.float32:
817
+ rwkv6.forward_fp32(B, T, C, H, state, r, k, v, eew, u, y)
818
+ return y, state
819
+ self.RWKV_6 = RWKV_6
820
+
821
+ gc.collect()
822
+ if 'cuda' in args.strategy_string:
823
+ torch.cuda.empty_cache()
824
+
825
+ def RUN_RWKV_5(self, B, T, C, H, state, r, k, v, w, u):
826
+ return self.RWKV_5.apply(B, T, C, H, state, r, k, v, w, u)
827
+
828
+ def RUN_RWKV_6(self, B, T, C, H, state, r, k, v, w, u):
829
+ return self.RWKV_6.apply(B, T, C, H, state, r, k, v, w, u)
830
+
831
+ ########################################################################################################
832
+
833
+ @MyFunction
834
+ def ffn_one(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
835
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
836
+ kx = xx * k_mix + sx * (1 - k_mix)
837
+ rx = xx * r_mix + sx * (1 - r_mix)
838
+
839
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
840
+ vx = torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)) ** 2
841
+ out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
842
+ return x + out, xx
843
+
844
+ @MyFunction
845
+ def ffn_seq(self, x, sx, ln_w, ln_b, k_mix, r_mix, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
846
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
847
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
848
+ kx = xx * k_mix + sx * (1 - k_mix)
849
+ rx = xx * r_mix + sx * (1 - r_mix)
850
+
851
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
852
+ vx = torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)) ** 2
853
+ out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
854
+ return x + out, xx[-1,:]
855
+
856
+ @MyFunction
857
+ def ffn_one_v6(self, x, sx, ln_w, ln_b, k_maa, r_maa, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
858
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
859
+ sx = sx - xx
860
+ kx = xx + sx * k_maa
861
+ rx = xx + sx * r_maa
862
+
863
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
864
+ vx = torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)) ** 2
865
+ out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
866
+ return x + out, xx
867
+
868
+ @MyFunction
869
+ def ffn_seq_v6(self, x, sx, ln_w, ln_b, k_maa, r_maa, kw, vw, rw, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry):
870
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
871
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
872
+ sx = sx - xx
873
+ kx = xx + sx * k_maa
874
+ rx = xx + sx * r_maa
875
+
876
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
877
+ vx = torch.relu(matmul(kx, kw, kmx, krx, kmy, kry)) ** 2
878
+ out = r * matmul(vx, vw, vmx, vrx, vmy, vry)
879
+ return x + out, xx[-1,:]
880
+
881
+ ########################################################################################################
882
+
883
+ @MyFunction
884
+ def att_one(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
885
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
886
+ kx = xx * k_mix + sx * (1 - k_mix)
887
+ vx = xx * v_mix + sx * (1 - v_mix)
888
+ rx = xx * r_mix + sx * (1 - r_mix)
889
+
890
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
891
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
892
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
893
+
894
+ ww = t_first + k
895
+ p = torch.maximum(pp, ww)
896
+ e1 = torch.exp(pp - p)
897
+ e2 = torch.exp(ww - p)
898
+ wkv = ((e1 * aa + e2 * v) / (e1 * bb + e2)).to(dtype=x.dtype)
899
+ ww = t_decay + pp
900
+ p = torch.maximum(ww, k)
901
+ e1 = torch.exp(ww - p)
902
+ e2 = torch.exp(k - p)
903
+
904
+ out = matmul(r * wkv, ow, omx, orx, omy, ory)
905
+ return x + out, xx, e1 * aa + e2 * v, e1 * bb + e2, p
906
+
907
+ @MyFunction
908
+ def att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
909
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
910
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
911
+ kx = xx * k_mix + sx * (1 - k_mix)
912
+ vx = xx * v_mix + sx * (1 - v_mix)
913
+ rx = xx * r_mix + sx * (1 - r_mix)
914
+
915
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
916
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
917
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
918
+
919
+ T = x.shape[0]
920
+ for t in range(T):
921
+ kk = k[t]
922
+ vv = v[t]
923
+ ww = t_first + kk
924
+ p = torch.maximum(pp, ww)
925
+ e1 = torch.exp(pp - p)
926
+ e2 = torch.exp(ww - p)
927
+ sx[t] = ((e1 * aa + e2 * vv) / (e1 * bb + e2)).to(dtype=x.dtype)
928
+ ww = t_decay + pp
929
+ p = torch.maximum(ww, kk)
930
+ e1 = torch.exp(ww - p)
931
+ e2 = torch.exp(kk - p)
932
+ aa = e1 * aa + e2 * vv
933
+ bb = e1 * bb + e2
934
+ pp = p
935
+ out = matmul(r * sx, ow, omx, orx, omy, ory)
936
+ return x + out, xx[-1,:], aa, bb, pp
937
+
938
+ ########################################################################################################
939
+
940
+ @MyFunction
941
+ def att_one_v5(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
942
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
943
+ kx = xx * k_mix + sx * (1 - k_mix)
944
+ vx = xx * v_mix + sx * (1 - v_mix)
945
+ rx = xx * r_mix + sx * (1 - r_mix)
946
+
947
+ H = t_decay.shape[0]
948
+ N = x.shape[-1] // H
949
+
950
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(H, 1, N)
951
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(H, N, 1)
952
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(H, 1, N)
953
+
954
+ a = matmul(k, v)
955
+ out = r @ (t_first * a + s)
956
+ s = a + t_decay * s
957
+
958
+ out = out.flatten()
959
+ out = F.group_norm(out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5).squeeze(0)
960
+ out = out.to(dtype=x.dtype)
961
+ out = matmul(out, ow, omx, orx, omy, ory)
962
+
963
+ return x + out, xx, s
964
+
965
+ @MyFunction
966
+ def att_seq_v5(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
967
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
968
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
969
+ kx = xx * k_mix + sx * (1 - k_mix)
970
+ vx = xx * v_mix + sx * (1 - v_mix)
971
+ rx = xx * r_mix + sx * (1 - r_mix)
972
+
973
+ H = t_decay.shape[0]
974
+ N = x.shape[-1] // H
975
+ T = x.shape[0]
976
+
977
+ w = t_decay.reshape(-1, 1)
978
+ u = t_first.reshape(-1, 1)
979
+ ws = w.pow(T).reshape(H, 1, 1)
980
+ ind = torch.arange(T-1, -1, -1, device=w.device).unsqueeze(0).repeat(H, 1)
981
+ w = w.repeat(1, T).pow(ind)
982
+ wk = w.reshape(H, 1, T)
983
+ wb = wk.transpose(-2, -1).flip(1)
984
+ w = torch.cat([w[:, 1:], u], dim=1)
985
+ w = F.pad(w, (0, T))
986
+ w = torch.tile(w, [T])
987
+ w = w[:, :-T].reshape(-1, T, 2 * T - 1)
988
+ w = w[:, :, T-1:].reshape(H, T, T)
989
+
990
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
991
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(T, H, N).permute(1, 2, 0)
992
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
993
+
994
+ out = ((r @ k) * w) @ v + (r @ s) * wb
995
+ s = ws * s + (k * wk) @ v
996
+
997
+ out = out.transpose(0, 1).contiguous().reshape(T, H*N)
998
+ out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5)
999
+ out = out.to(dtype=x.dtype)
1000
+ out = matmul(out, ow, omx, orx, omy, ory)
1001
+
1002
+ return x + out, xx[-1,:], s
1003
+
1004
+ ########################################################################################################
1005
+
1006
+ @MyFunction
1007
+ def att_one_v5_1(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1008
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1009
+ kx = xx * k_mix + sx * (1 - k_mix)
1010
+ vx = xx * v_mix + sx * (1 - v_mix)
1011
+ rx = xx * r_mix + sx * (1 - r_mix)
1012
+ gx = xx * g_mix + sx * (1 - g_mix)
1013
+
1014
+ H = t_decay.shape[0]
1015
+ N = x.shape[-1] // H
1016
+
1017
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(H, 1, N)
1018
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(H, N, 1)
1019
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(H, 1, N)
1020
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1021
+
1022
+ a = matmul(k, v)
1023
+ out = r @ (t_first * a + s)
1024
+ s = a + t_decay * s
1025
+
1026
+ out = out.flatten()
1027
+ out = F.group_norm(out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5).squeeze(0)
1028
+ out = out.to(dtype=x.dtype) * g
1029
+ out = matmul(out, ow, omx, orx, omy, ory)
1030
+
1031
+ return x + out, xx, s
1032
+
1033
+ @MyFunction
1034
+ def att_seq_v5_1(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1035
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1036
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
1037
+ kx = xx * k_mix + sx * (1 - k_mix)
1038
+ vx = xx * v_mix + sx * (1 - v_mix)
1039
+ rx = xx * r_mix + sx * (1 - r_mix)
1040
+ gx = xx * g_mix + sx * (1 - g_mix)
1041
+
1042
+ H = t_decay.shape[0]
1043
+ N = x.shape[-1] // H
1044
+ T = x.shape[0]
1045
+
1046
+ w = t_decay.reshape(-1, 1)
1047
+ u = t_first.reshape(-1, 1)
1048
+ ws = w.pow(T).reshape(H, 1, 1)
1049
+ ind = torch.arange(T-1, -1, -1, device=w.device).unsqueeze(0).repeat(H, 1)
1050
+ w = w.repeat(1, T).pow(ind)
1051
+ wk = w.reshape(H, 1, T)
1052
+ wb = wk.transpose(-2, -1).flip(1)
1053
+ w = torch.cat([w[:, 1:], u], dim=1)
1054
+ w = F.pad(w, (0, T))
1055
+ w = torch.tile(w, [T])
1056
+ w = w[:, :-T].reshape(-1, T, 2 * T - 1)
1057
+ w = w[:, :, T-1:].reshape(H, T, T)
1058
+
1059
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1060
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(T, H, N).permute(1, 2, 0)
1061
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1062
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1063
+
1064
+ out = ((r @ k) * w) @ v + (r @ s) * wb
1065
+ s = ws * s + (k * wk) @ v
1066
+
1067
+ out = out.transpose(0, 1).contiguous().reshape(T, H*N)
1068
+ out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5)
1069
+ out = out.to(dtype=x.dtype) * g
1070
+ out = matmul(out, ow, omx, orx, omy, ory)
1071
+
1072
+ return x + out, xx[-1,:], s
1073
+
1074
+ ########################################################################################################
1075
+
1076
+ @MyFunction
1077
+ def att_seq_v5_2(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1078
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1079
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
1080
+ kx = xx * k_mix + sx * (1 - k_mix)
1081
+ vx = xx * v_mix + sx * (1 - v_mix)
1082
+ rx = xx * r_mix + sx * (1 - r_mix)
1083
+ gx = xx * g_mix + sx * (1 - g_mix)
1084
+
1085
+ H = t_decay.shape[0]
1086
+ N = x.shape[-1] // H
1087
+ T = x.shape[0]
1088
+
1089
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1090
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(T, H, N).permute(1, 2, 0)
1091
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1092
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1093
+
1094
+ out = torch.empty((T, H, N), dtype=r.dtype, device=r.device)
1095
+ for t in range(T):
1096
+ rt = r[:,t:t+1,:]
1097
+ kt = k[:,:,t:t+1]
1098
+ vt = v[:,t:t+1,:]
1099
+ at = matmul(kt, vt)
1100
+ out[t] = (rt @ (t_first * at + s)).squeeze(1)
1101
+ s = at + t_decay * s
1102
+
1103
+ out = out.reshape(T, H*N)
1104
+ out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5)
1105
+ out = out.to(dtype=x.dtype) * g
1106
+ out = matmul(out, ow, omx, orx, omy, ory)
1107
+
1108
+ return x + out, xx[-1,:], s
1109
+
1110
+ ########################################################################################################
1111
+
1112
+ @MyFunction
1113
+ def att_one_v6_0(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, x_maa, w_maa, k_maa, v_maa, r_maa, g_maa, tm_w1, tm_w2, td_w1, td_w2, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1114
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1115
+
1116
+ sx = sx - xx
1117
+ xxx = xx + sx * x_maa
1118
+ xxx = torch.tanh(xxx @ tm_w1).view(5, 1, -1)
1119
+ xxx = torch.bmm(xxx, tm_w2).view(5, -1)
1120
+ mw, mk, mv, mr, mg = xxx.unbind(dim=0)
1121
+
1122
+ wx = xx + sx * (w_maa + mw)
1123
+ kx = xx + sx * (k_maa + mk)
1124
+ vx = xx + sx * (v_maa + mv)
1125
+ rx = xx + sx * (r_maa + mr)
1126
+ gx = xx + sx * (g_maa + mg)
1127
+
1128
+ H = t_decay.shape[0]
1129
+ N = x.shape[-1] // H
1130
+
1131
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(H, 1, N)
1132
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(H, N, 1)
1133
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(H, 1, N)
1134
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1135
+
1136
+ w = t_decay + (torch.tanh(wx @ td_w1) @ td_w2).float().view(H, N, 1)
1137
+ w = torch.exp(-torch.exp(w.float()))
1138
+
1139
+ a = matmul(k, v)
1140
+ out = r @ (t_first * a + s)
1141
+ s = a + w * s
1142
+
1143
+ out = out.flatten()
1144
+ out = F.group_norm(out.unsqueeze(0), num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5).squeeze(0)
1145
+ out = out.to(dtype=x.dtype) * g
1146
+ out = matmul(out, ow, omx, orx, omy, ory)
1147
+
1148
+ return x + out, xx, s
1149
+
1150
+ @MyFunction
1151
+ def att_seq_v6_0(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, x_maa, w_maa, k_maa, v_maa, r_maa, g_maa, tm_w1, tm_w2, td_w1, td_w2, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1152
+ H = t_decay.shape[0]
1153
+ N = x.shape[-1] // H
1154
+ T = x.shape[0]
1155
+
1156
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1157
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:])) - xx
1158
+ xxx = xx + sx * x_maa
1159
+ xxx = torch.tanh(xxx @ tm_w1).view(T, 5, -1).transpose(0, 1)
1160
+ xxx = torch.bmm(xxx, tm_w2).view(5, T, -1)
1161
+ mw, mk, mv, mr, mg = xxx.unbind(dim=0)
1162
+
1163
+ wx = xx + sx * (w_maa + mw)
1164
+ kx = xx + sx * (k_maa + mk)
1165
+ vx = xx + sx * (v_maa + mv)
1166
+ rx = xx + sx * (r_maa + mr)
1167
+ gx = xx + sx * (g_maa + mg)
1168
+
1169
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1170
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32).view(T, H, N).permute(1, 2, 0)
1171
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32).view(T, H, N).transpose(0, 1)
1172
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1173
+
1174
+ w = t_decay.view(1, H, N, 1) + (torch.tanh(wx @ td_w1) @ td_w2).float().view(T, H, N, 1)
1175
+ w = torch.exp(-torch.exp(w.float()))
1176
+ out = torch.empty((T, H, N), dtype=r.dtype, device=r.device)
1177
+ for t in range(T):
1178
+ rt = r[:,t:t+1,:]
1179
+ kt = k[:,:,t:t+1]
1180
+ vt = v[:,t:t+1,:]
1181
+ at = matmul(kt, vt)
1182
+ out[t] = (rt @ (t_first * at + s)).squeeze(1)
1183
+ s = at + w[t] * s
1184
+
1185
+ out = out.reshape(T, H*N)
1186
+ out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5)
1187
+ out = out.to(dtype=x.dtype) * g
1188
+ out = matmul(out, ow, omx, orx, omy, ory)
1189
+
1190
+ return x + out, xx[-1,:], s
1191
+
1192
+ ########################################################################################################
1193
+
1194
+ if os.environ["RWKV_CUDA_ON"] == '1':
1195
+ @MyFunction
1196
+ def cuda_att_seq(self, x, sx, aa, bb, pp, ln_w, ln_b, k_mix, v_mix, r_mix, t_decay, t_first, kw, vw, rw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, omx, orx, omy, ory):
1197
+ T, C = x.shape
1198
+ xx = F.layer_norm(x, (C,), weight=ln_w, bias=ln_b)
1199
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
1200
+ kx = xx * k_mix + sx * (1 - k_mix)
1201
+ vx = xx * v_mix + sx * (1 - v_mix)
1202
+ rx = xx * r_mix + sx * (1 - r_mix)
1203
+
1204
+ r = torch.sigmoid(matmul(rx, rw, rmx, rrx, rmy, rry))
1205
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
1206
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
1207
+ y, aa, bb, pp = cuda_wkv(T, C, t_decay, t_first, k, v, aa, bb, pp)
1208
+
1209
+ out = matmul(r * y.to(x.dtype), ow, omx, orx, omy, ory)
1210
+ return x + out, xx[-1,:], aa, bb, pp
1211
+
1212
+ @MyFunction
1213
+ def v5_2_before(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1214
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1215
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:]))
1216
+ kx = xx * k_mix + sx * (1 - k_mix)
1217
+ vx = xx * v_mix + sx * (1 - v_mix)
1218
+ rx = xx * r_mix + sx * (1 - r_mix)
1219
+ gx = xx * g_mix + sx * (1 - g_mix)
1220
+
1221
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
1222
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
1223
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
1224
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1225
+
1226
+ return r, k, v, g, xx[-1,:], s.transpose(-1,-2).contiguous()
1227
+
1228
+ @MyFunction
1229
+ def v5_2_after(self, t_decay, out, s, x, xxx, g, lx_w, lx_b, ow, omx, orx, omy, ory):
1230
+ H = t_decay.shape[0]
1231
+ N = x.shape[-1] // H
1232
+ T = x.shape[0]
1233
+
1234
+ s = s.transpose(-1,-2)
1235
+ out = out.reshape(T, H*N)
1236
+ out = F.group_norm(out, num_groups=H, weight=lx_w, bias=lx_b, eps = 64e-5)
1237
+ out = out.to(dtype=x.dtype) * g
1238
+ out = matmul(out, ow, omx, orx, omy, ory)
1239
+
1240
+ return x + out, xxx, s
1241
+
1242
+ def cuda_att_seq_v5_2(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1243
+ H = t_decay.shape[0]
1244
+ N = x.shape[-1] // H
1245
+ T = x.shape[0]
1246
+
1247
+ r, k, v, g, xxx, ss = self.v5_2_before(x, sx, s, ln_w, ln_b, lx_w, lx_b, k_mix, v_mix, r_mix, g_mix, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory)
1248
+
1249
+ out, s = self.RUN_RWKV_5(1, T, self.args.n_att, H, ss, r, k, v, w=t_decay, u=t_first)
1250
+
1251
+ return self.v5_2_after(t_decay, out, s, x, xxx, g, lx_w, lx_b, ow, omx, orx, omy, ory)
1252
+
1253
+ @MyFunction
1254
+ def v6_0_before(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, x_maa, w_maa, k_maa, v_maa, r_maa, g_maa, tm_w1, tm_w2, td_w1, td_w2, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1255
+ H = t_decay.shape[0]
1256
+ N = x.shape[-1] // H
1257
+ T = x.shape[0]
1258
+
1259
+ xx = F.layer_norm(x, (x.shape[-1],), weight=ln_w, bias=ln_b)
1260
+ sx = torch.cat((sx.unsqueeze(0), xx[:-1,:])) - xx
1261
+ xxx = xx + sx * x_maa
1262
+ xxx = torch.tanh(xxx @ tm_w1).view(T, 5, -1).transpose(0, 1)
1263
+ xxx = torch.bmm(xxx, tm_w2).view(5, T, -1)
1264
+ mw, mk, mv, mr, mg = xxx.unbind(dim=0)
1265
+
1266
+ wx = xx + sx * (w_maa + mw)
1267
+ kx = xx + sx * (k_maa + mk)
1268
+ vx = xx + sx * (v_maa + mv)
1269
+ rx = xx + sx * (r_maa + mr)
1270
+ gx = xx + sx * (g_maa + mg)
1271
+
1272
+ r = matmul(rx, rw, rmx, rrx, rmy, rry, output_dtype=torch.float32)
1273
+ k = matmul(kx, kw, kmx, krx, kmy, kry, output_dtype=torch.float32)
1274
+ v = matmul(vx, vw, vmx, vrx, vmy, vry, output_dtype=torch.float32)
1275
+ g = F.silu(matmul(gx, gw, gmx, grx, gmy, gry))
1276
+
1277
+ w = t_decay.view(1, H, N, 1) + (torch.tanh(wx @ td_w1) @ td_w2).float().view(T, H, N, 1)
1278
+
1279
+ return r, k, v, g, w, xx[-1,:], s.transpose(-1,-2).contiguous()
1280
+
1281
+ def cuda_att_seq_v6_0(self, x, sx, s, ln_w, ln_b, lx_w, lx_b, x_maa, w_maa, k_maa, v_maa, r_maa, g_maa, tm_w1, tm_w2, td_w1, td_w2, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory):
1282
+ H = t_decay.shape[0]
1283
+ N = x.shape[-1] // H
1284
+ T = x.shape[0]
1285
+
1286
+ r, k, v, g, w, xxx, ss = self.v6_0_before(x, sx, s, ln_w, ln_b, lx_w, lx_b, x_maa, w_maa, k_maa, v_maa, r_maa, g_maa, tm_w1, tm_w2, td_w1, td_w2, t_decay, t_first, kw, vw, rw, gw, ow, kmx, krx, kmy, kry, vmx, vrx, vmy, vry, rmx, rrx, rmy, rry, gmx, grx, gmy, gry, omx, orx, omy, ory)
1287
+
1288
+ out, s = self.RUN_RWKV_6(1, T, self.args.n_att, H, ss, r, k, v, w=w, u=t_first)
1289
+ return self.v5_2_after(t_decay, out, s, x, xxx, g, lx_w, lx_b, ow, omx, orx, omy, ory)
1290
+
1291
+ ########################################################################################################
1292
+
1293
+ def forward(self, tokens, state, full_output=False):
1294
+ with torch.no_grad():
1295
+ w = self.w
1296
+ args = self.args
1297
+
1298
+ if state == None:
1299
+ if self.version == 4:
1300
+ state = [None] * args.n_layer * 5
1301
+ for i in range(args.n_layer): # state: 0=att_xx 1=att_aa 2=att_bb 3=att_pp 4=ffn_xx
1302
+ dd = self.strategy[i]
1303
+ dev = dd.device
1304
+ atype = dd.atype
1305
+ state[i*5+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
1306
+ state[i*5+1] = torch.zeros(args.n_att, dtype=torch.float, requires_grad=False, device=dev).contiguous()
1307
+ state[i*5+2] = torch.zeros(args.n_att, dtype=torch.float, requires_grad=False, device=dev).contiguous()
1308
+ state[i*5+3] = torch.zeros(args.n_att, dtype=torch.float, requires_grad=False, device=dev).contiguous() - 1e30
1309
+ state[i*5+4] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
1310
+ elif int(self.version) in [5,6]:
1311
+ state = [None] * args.n_layer * 3
1312
+ for i in range(args.n_layer): # state: 0=att_xx 1=att_kv 2=ffn_xx
1313
+ dd = self.strategy[i]
1314
+ dev = dd.device
1315
+ atype = dd.atype
1316
+ state[i*3+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
1317
+ if args.time_state:
1318
+ state[i*3+1] = w[f'blocks.{i}.att.time_state'].transpose(1,2).to(dtype=torch.float, device=dev).requires_grad_(False).contiguous()
1319
+ else:
1320
+ state[i*3+1] = torch.zeros((args.n_head, args.n_att//args.n_head, args.n_att//args.n_head), dtype=torch.float, requires_grad=False, device=dev).contiguous()
1321
+ state[i*3+2] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
1322
+
1323
+ seq_mode = len(tokens) > 1
1324
+
1325
+ x = w['emb.weight'][tokens if seq_mode else tokens[0]]
1326
+
1327
+ for i in range(args.n_layer):
1328
+ bbb = f'blocks.{i}.'
1329
+ att = f'blocks.{i}.att.'
1330
+ ffn = f'blocks.{i}.ffn.'
1331
+ dd = self.strategy[i]
1332
+ dev = dd.device
1333
+ atype = dd.atype
1334
+ wtype = dd.wtype
1335
+ if seq_mode:
1336
+ cuda_applicable = os.environ["RWKV_CUDA_ON"] == '1' and 'cuda' in str(dev)
1337
+ if cuda_applicable:
1338
+ ATT = self.cuda_att_seq
1339
+ else:
1340
+ ATT = self.att_seq
1341
+ if self.version == 5:
1342
+ ATT = self.att_seq_v5
1343
+ elif self.version == 5.1:
1344
+ ATT = self.att_seq_v5_1
1345
+ elif self.version == 5.2:
1346
+ ATT = self.att_seq_v5_2
1347
+ if cuda_applicable:
1348
+ ATT = self.cuda_att_seq_v5_2
1349
+ elif self.version == 6.0:
1350
+ ATT = self.att_seq_v6_0
1351
+ if cuda_applicable:
1352
+ ATT = self.cuda_att_seq_v6_0
1353
+ FFN = self.ffn_seq
1354
+ if self.version >= 6.0:
1355
+ FFN = self.ffn_seq_v6
1356
+ else:
1357
+ ATT = self.att_one
1358
+ if self.version == 5:
1359
+ ATT = self.att_one_v5
1360
+ elif self.version == 5.1:
1361
+ ATT = self.att_one_v5_1
1362
+ elif self.version == 5.2:
1363
+ ATT = self.att_one_v5_1 # same as v5.1
1364
+ elif self.version == 6.0:
1365
+ ATT = self.att_one_v6_0
1366
+ FFN = self.ffn_one
1367
+ if self.version >= 6.0:
1368
+ FFN = self.ffn_one_v6
1369
+
1370
+ x = x.to(dtype=atype, device=dev)
1371
+
1372
+ kw = w[f'{att}key.weight']
1373
+ vw = w[f'{att}value.weight']
1374
+ rw = w[f'{att}receptance.weight']
1375
+ ow = w[f'{att}output.weight']
1376
+ if dd.stream:
1377
+ kw = kw.to(device=dev, non_blocking=True)
1378
+ vw = vw.to(device=dev, non_blocking=True)
1379
+ rw = rw.to(device=dev, non_blocking=True)
1380
+ ow = ow.to(device=dev, non_blocking=True)
1381
+ kmx = w[f'{att}key.weight_mx'] if wtype == torch.uint8 else x
1382
+ krx = w[f'{att}key.weight_rx'] if wtype == torch.uint8 else x
1383
+ kmy = w[f'{att}key.weight_my'] if wtype == torch.uint8 else x
1384
+ kry = w[f'{att}key.weight_ry'] if wtype == torch.uint8 else x
1385
+ vmx = w[f'{att}value.weight_mx'] if wtype == torch.uint8 else x
1386
+ vrx = w[f'{att}value.weight_rx'] if wtype == torch.uint8 else x
1387
+ vmy = w[f'{att}value.weight_my'] if wtype == torch.uint8 else x
1388
+ vry = w[f'{att}value.weight_ry'] if wtype == torch.uint8 else x
1389
+ rmx = w[f'{att}receptance.weight_mx'] if wtype == torch.uint8 else x
1390
+ rrx = w[f'{att}receptance.weight_rx'] if wtype == torch.uint8 else x
1391
+ rmy = w[f'{att}receptance.weight_my'] if wtype == torch.uint8 else x
1392
+ rry = w[f'{att}receptance.weight_ry'] if wtype == torch.uint8 else x
1393
+ omx = w[f'{att}output.weight_mx'] if wtype == torch.uint8 else x
1394
+ orx = w[f'{att}output.weight_rx'] if wtype == torch.uint8 else x
1395
+ omy = w[f'{att}output.weight_my'] if wtype == torch.uint8 else x
1396
+ ory = w[f'{att}output.weight_ry'] if wtype == torch.uint8 else x
1397
+ if self.version in [5.1, 5.2, 6.0]:
1398
+ gw = w[f'{att}gate.weight']
1399
+ if dd.stream:
1400
+ gw = gw.to(device=dev, non_blocking=True)
1401
+ gmx = w[f'{att}gate.weight_mx'] if wtype == torch.uint8 else x
1402
+ grx = w[f'{att}gate.weight_rx'] if wtype == torch.uint8 else x
1403
+ gmy = w[f'{att}gate.weight_my'] if wtype == torch.uint8 else x
1404
+ gry = w[f'{att}gate.weight_ry'] if wtype == torch.uint8 else x
1405
+ if self.version == 4:
1406
+ x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3] = ATT(
1407
+ x, state[i*5+0], state[i*5+1], state[i*5+2], state[i*5+3],
1408
+ w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
1409
+ w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
1410
+ w[f'{att}time_decay'], w[f'{att}time_first'],
1411
+ kw, vw, rw, ow,
1412
+ kmx, krx, kmy, kry,
1413
+ vmx, vrx, vmy, vry,
1414
+ rmx, rrx, rmy, rry,
1415
+ omx, orx, omy, ory,
1416
+ )
1417
+ elif self.version == 5:
1418
+ x, state[i*3+0], state[i*3+1] = ATT(
1419
+ x, state[i*3+0], state[i*3+1],
1420
+ w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
1421
+ w[f'{att}ln_x.weight'], w[f'{att}ln_x.bias'],
1422
+ w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'],
1423
+ w[f'{att}time_decay'], w[f'{att}time_first'],
1424
+ kw, vw, rw, ow,
1425
+ kmx, krx, kmy, kry,
1426
+ vmx, vrx, vmy, vry,
1427
+ rmx, rrx, rmy, rry,
1428
+ omx, orx, omy, ory,
1429
+ )
1430
+ elif self.version in [5.1, 5.2]:
1431
+ x, state[i*3+0], state[i*3+1] = ATT(
1432
+ x, state[i*3+0], state[i*3+1],
1433
+ w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
1434
+ w[f'{att}ln_x.weight'], w[f'{att}ln_x.bias'],
1435
+ w[f'{att}time_mix_k'], w[f'{att}time_mix_v'], w[f'{att}time_mix_r'], w[f'{att}time_mix_g'],
1436
+ w[f'{att}time_decay'], w[f'{att}time_first'],
1437
+ kw, vw, rw, gw, ow,
1438
+ kmx, krx, kmy, kry,
1439
+ vmx, vrx, vmy, vry,
1440
+ rmx, rrx, rmy, rry,
1441
+ gmx, grx, gmy, gry,
1442
+ omx, orx, omy, ory,
1443
+ )
1444
+ elif self.version == 6.0:
1445
+ x, state[i*3+0], state[i*3+1] = ATT(
1446
+ x, state[i*3+0], state[i*3+1],
1447
+ w[f'{bbb}ln1.weight'], w[f'{bbb}ln1.bias'],
1448
+ w[f'{att}ln_x.weight'], w[f'{att}ln_x.bias'],
1449
+ w[f'{att}time_maa_x'], w[f'{att}time_maa_w'], w[f'{att}time_maa_k'], w[f'{att}time_maa_v'], w[f'{att}time_maa_r'], w[f'{att}time_maa_g'],
1450
+ w[f'{att}time_maa_w1'], w[f'{att}time_maa_w2'], w[f'{att}time_decay_w1'], w[f'{att}time_decay_w2'],
1451
+ w[f'{att}time_decay'], w[f'{att}time_first'],
1452
+ kw, vw, rw, gw, ow,
1453
+ kmx, krx, kmy, kry,
1454
+ vmx, vrx, vmy, vry,
1455
+ rmx, rrx, rmy, rry,
1456
+ gmx, grx, gmy, gry,
1457
+ omx, orx, omy, ory,
1458
+ )
1459
+ if dd.stream:
1460
+ del kw, vw, rw, ow
1461
+ if self.version in [5.1, 5.2, 6.0]:
1462
+ del gw
1463
+
1464
+ kw = w[f'{ffn}key.weight']
1465
+ vw = w[f'{ffn}value.weight']
1466
+ rw = w[f'{ffn}receptance.weight']
1467
+ if dd.stream:
1468
+ kw = kw.to(device=dev, non_blocking=True)
1469
+ vw = vw.to(device=dev, non_blocking=True)
1470
+ rw = rw.to(device=dev, non_blocking=True)
1471
+ kmx = w[f'{ffn}key.weight_mx'] if wtype == torch.uint8 else x
1472
+ krx = w[f'{ffn}key.weight_rx'] if wtype == torch.uint8 else x
1473
+ kmy = w[f'{ffn}key.weight_my'] if wtype == torch.uint8 else x
1474
+ kry = w[f'{ffn}key.weight_ry'] if wtype == torch.uint8 else x
1475
+ vmx = w[f'{ffn}value.weight_mx'] if wtype == torch.uint8 else x
1476
+ vrx = w[f'{ffn}value.weight_rx'] if wtype == torch.uint8 else x
1477
+ vmy = w[f'{ffn}value.weight_my'] if wtype == torch.uint8 else x
1478
+ vry = w[f'{ffn}value.weight_ry'] if wtype == torch.uint8 else x
1479
+ rmx = w[f'{ffn}receptance.weight_mx'] if wtype == torch.uint8 else x
1480
+ rrx = w[f'{ffn}receptance.weight_rx'] if wtype == torch.uint8 else x
1481
+ rmy = w[f'{ffn}receptance.weight_my'] if wtype == torch.uint8 else x
1482
+ rry = w[f'{ffn}receptance.weight_ry'] if wtype == torch.uint8 else x
1483
+ if self.version == 4:
1484
+ offset = i*5+4
1485
+ elif int(self.version) in [5,6]:
1486
+ offset = i*3+2
1487
+ if self.version < 6.0:
1488
+ x, state[offset] = FFN(
1489
+ x, state[offset],
1490
+ w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
1491
+ w[f'{ffn}time_mix_k'], w[f'{ffn}time_mix_r'],
1492
+ kw, vw, rw,
1493
+ kmx, krx, kmy, kry,
1494
+ vmx, vrx, vmy, vry,
1495
+ rmx, rrx, rmy, rry,
1496
+ )
1497
+ else:
1498
+ x, state[offset] = FFN(
1499
+ x, state[offset],
1500
+ w[f'{bbb}ln2.weight'], w[f'{bbb}ln2.bias'],
1501
+ w[f'{ffn}time_maa_k'], w[f'{ffn}time_maa_r'],
1502
+ kw, vw, rw,
1503
+ kmx, krx, kmy, kry,
1504
+ vmx, vrx, vmy, vry,
1505
+ rmx, rrx, rmy, rry,
1506
+ )
1507
+ if dd.stream:
1508
+ del kw, vw, rw
1509
+
1510
+ if self.RESCALE_LAYER > 0:
1511
+ if (i+1) % self.RESCALE_LAYER == 0:
1512
+ x = x / 2
1513
+
1514
+ dd = self.strategy[args.n_layer]
1515
+ x = x[-1,:] if (seq_mode and (not full_output)) else x
1516
+ x = x.to(dtype=dd.atype, device=dd.device)
1517
+
1518
+ x = F.layer_norm(x, (args.n_embd,), weight=w['ln_out.weight'], bias=w['ln_out.bias'])
1519
+ if w['head.weight'].dtype != torch.uint8:
1520
+ x = x @ w['head.weight']
1521
+ else:
1522
+ if seq_mode and full_output:
1523
+ x = mm8_seq(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
1524
+ else:
1525
+ x = mm8_one(x, w['head.weight'], w['head.weight_mx'], w['head.weight_rx'], w['head.weight_my'], w['head.weight_ry'])
1526
+
1527
+ return x.float(), state
1528
+
1529
+ if os.environ.get('RWKV_V7_ON') == '1':
1530
+ RWKV = RWKV_x070
rwkv_rocm/rwkv_tokenizer.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ########################################################################################################
2
+ # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
3
+ ########################################################################################################
4
+
5
+ class TRIE:
6
+ __slots__ = tuple("ch,to,values,front".split(","))
7
+ to:list
8
+ values:set
9
+ def __init__(self, front=None, ch=None):
10
+ self.ch = ch
11
+ self.to = [None for ch in range(256)]
12
+ self.values = set()
13
+ self.front = front
14
+
15
+ def __repr__(self):
16
+ fr = self
17
+ ret = []
18
+ while(fr!=None):
19
+ if(fr.ch!=None):
20
+ ret.append(fr.ch)
21
+ fr = fr.front
22
+ return "<TRIE %s %s>"%(ret[::-1], self.values)
23
+
24
+ def add(self, key:bytes, idx:int=0, val=None):
25
+ if(idx == len(key)):
26
+ if(val is None):
27
+ val = key
28
+ self.values.add(val)
29
+ return self
30
+ ch = key[idx]
31
+ if(self.to[ch] is None):
32
+ self.to[ch] = TRIE(front=self, ch=ch)
33
+ return self.to[ch].add(key, idx=idx+1, val=val)
34
+
35
+ def find_longest(self, key:bytes, idx:int=0):
36
+ u:TRIE = self
37
+ ch:int = key[idx]
38
+
39
+ while(u.to[ch] is not None):
40
+ u = u.to[ch]
41
+ idx += 1
42
+ if(u.values):
43
+ ret = idx, u, u.values
44
+ if(idx==len(key)):
45
+ break
46
+ ch = key[idx]
47
+ return ret
48
+
49
+ class TRIE_TOKENIZER():
50
+ def __init__(self, file_name):
51
+ self.idx2token = {}
52
+ sorted = [] # must be already sorted
53
+ with open(file_name, "r", encoding="utf-8") as f:
54
+ lines = f.readlines()
55
+ for l in lines:
56
+ idx = int(l[:l.index(' ')])
57
+ x = eval(l[l.index(' '):l.rindex(' ')])
58
+ x = x.encode("utf-8") if isinstance(x, str) else x
59
+ assert isinstance(x, bytes)
60
+ assert len(x) == int(l[l.rindex(' '):])
61
+ sorted += [x]
62
+ self.idx2token[idx] = x
63
+
64
+ self.token2idx = {}
65
+ for k,v in self.idx2token.items():
66
+ self.token2idx[v] = int(k)
67
+
68
+ self.root = TRIE()
69
+ for t, i in self.token2idx.items():
70
+ _ = self.root.add(t, val=(t, i))
71
+
72
+ def encodeBytes(self, src:bytes):
73
+ idx:int = 0
74
+ tokens = []
75
+ while (idx < len(src)):
76
+ _idx:int = idx
77
+ idx, _, values = self.root.find_longest(src, idx)
78
+ assert(idx != _idx)
79
+ _, token = next(iter(values))
80
+ tokens.append(token)
81
+ return tokens
82
+
83
+ def decodeBytes(self, tokens):
84
+ return b''.join(map(lambda i: self.idx2token[i], tokens))
85
+
86
+ def encode(self, src):
87
+ return self.encodeBytes(src.encode("utf-8"))
88
+
89
+ def decode(self, tokens):
90
+ try:
91
+ return self.decodeBytes(tokens).decode('utf-8')
92
+ except:
93
+ return '\ufffd' # bad utf-8
94
+
95
+ def printTokens(self, tokens):
96
+ for i in tokens:
97
+ s = self.idx2token[i]
98
+ try:
99
+ s = s.decode('utf-8')
100
+ except:
101
+ pass
102
+ print(f'{repr(s)}{i}', end=' ')
103
+ print()
rwkv_rocm/rwkv_vocab_v20230424.txt ADDED
The diff for this file is too large to render. See raw diff
 
rwkv_rocm/utils.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ########################################################################################################
2
+ # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
3
+ ########################################################################################################
4
+
5
+ import os, sys
6
+ import numpy as np
7
+ import torch
8
+ from torch.nn import functional as F
9
+
10
+ class PIPELINE_ARGS():
11
+ def __init__(self, temperature=1.0, top_p=0.85, top_k=0, alpha_frequency=0.2, alpha_presence=0.2, alpha_decay=0.996, token_ban=[], token_stop=[], chunk_len=256):
12
+ self.temperature = temperature
13
+ self.top_p = top_p
14
+ self.top_k = top_k
15
+ self.alpha_frequency = alpha_frequency # Frequency Penalty (as in GPT-3)
16
+ self.alpha_presence = alpha_presence # Presence Penalty (as in GPT-3)
17
+ self.alpha_decay = alpha_decay # gradually decay the penalty
18
+ self.token_ban = token_ban # ban the generation of some tokens
19
+ self.token_stop = token_stop # stop generation whenever you see any token here
20
+ self.chunk_len = chunk_len # split input into chunks to save VRAM (shorter -> slower)
21
+
22
+ class PIPELINE():
23
+ def __init__(self, model, WORD_NAME):
24
+ self.model = model
25
+ if WORD_NAME == 'cl100k_base':
26
+ import tiktoken
27
+ self.tokenizer = tiktoken.get_encoding(WORD_NAME)
28
+ elif WORD_NAME == 'rwkv_vocab_v20230424':
29
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
30
+ from rwkv_tokenizer import TRIE_TOKENIZER
31
+ self.tokenizer = TRIE_TOKENIZER(os.path.dirname(os.path.abspath(__file__)) + '/rwkv_vocab_v20230424.txt')
32
+ else:
33
+ from tokenizers import Tokenizer
34
+ self.tokenizer = Tokenizer.from_file(WORD_NAME)
35
+
36
+ def refine_context(self, context):
37
+ context = context.strip().split('\n')
38
+ for c in range(len(context)):
39
+ context[c] = context[c].strip().strip('\u3000').strip('\r')
40
+ context = list(filter(lambda c: c != '', context))
41
+ context = '\n' + ('\n'.join(context)).strip()
42
+ if context == '':
43
+ context = '\n'
44
+ return context
45
+
46
+ def encode(self, x):
47
+ if 'Tokenizer' in str(type(self.tokenizer)):
48
+ return self.tokenizer.encode(x).ids
49
+ else:
50
+ return self.tokenizer.encode(x)
51
+
52
+ def decode(self, x):
53
+ return self.tokenizer.decode(x)
54
+
55
+ def sample_logits(self, logits, temperature=1.0, top_p=0.85, top_k=0):
56
+ if temperature == 0:
57
+ temperature = 1.0
58
+ top_p = 0
59
+ probs = F.softmax(logits.float(), dim=-1)
60
+ top_k = int(top_k)
61
+ # 'privateuseone' is the type of custom devices like `torch_directml.device()`
62
+ if probs.device.type in ['cpu', 'privateuseone']:
63
+ probs = probs.cpu().numpy()
64
+ sorted_ids = np.argsort(probs)
65
+ sorted_probs = probs[sorted_ids][::-1]
66
+ cumulative_probs = np.cumsum(sorted_probs)
67
+ cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
68
+ probs[probs < cutoff] = 0
69
+ if top_k < len(probs) and top_k > 0:
70
+ probs[sorted_ids[:-top_k]] = 0
71
+ if temperature != 1.0:
72
+ probs = probs ** (1.0 / temperature)
73
+ probs = probs / np.sum(probs)
74
+ out = np.random.choice(a=len(probs), p=probs)
75
+ return int(out)
76
+ else:
77
+ sorted_ids = torch.argsort(probs)
78
+ sorted_probs = probs[sorted_ids]
79
+ sorted_probs = torch.flip(sorted_probs, dims=(0,))
80
+ cumulative_probs = torch.cumsum(sorted_probs, dim=-1).cpu().numpy()
81
+ cutoff = float(sorted_probs[np.argmax(cumulative_probs >= top_p)])
82
+ probs[probs < cutoff] = 0
83
+ if top_k < len(probs) and top_k > 0:
84
+ probs[sorted_ids[:-top_k]] = 0
85
+ if temperature != 1.0:
86
+ probs = probs ** (1.0 / temperature)
87
+ out = torch.multinomial(probs, num_samples=1)[0]
88
+ return int(out)
89
+
90
+ def generate(self, ctx, token_count=100, args=PIPELINE_ARGS(), callback=None, state=None):
91
+ all_tokens = []
92
+ out_last = 0
93
+ out_str = ''
94
+ occurrence = {}
95
+ for i in range(token_count):
96
+
97
+ # forward & adjust prob.
98
+ tokens = self.encode(ctx) if i == 0 else [token]
99
+ while len(tokens) > 0:
100
+ out, state = self.model.forward(tokens[:args.chunk_len], state)
101
+ tokens = tokens[args.chunk_len:]
102
+
103
+ for n in args.token_ban:
104
+ out[n] = -float('inf')
105
+ for n in occurrence:
106
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
107
+
108
+ # sampler
109
+ token = self.sample_logits(out, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k)
110
+ if token in args.token_stop:
111
+ break
112
+ all_tokens += [token]
113
+ for xxx in occurrence:
114
+ occurrence[xxx] *= args.alpha_decay
115
+
116
+ ttt = self.decode([token])
117
+ www = 1
118
+ if ttt in ' \t0123456789':
119
+ www = 0
120
+ # elif ttt in '\r\n,.;?!"\':+-*/=#@$%^&_`~|<>\\()[]{},。;“”:?!()【】':
121
+ # www = 0.5
122
+ if token not in occurrence:
123
+ occurrence[token] = www
124
+ else:
125
+ occurrence[token] += www
126
+ # print(occurrence) # debug
127
+
128
+ # output
129
+ tmp = self.decode(all_tokens[out_last:])
130
+ if '\ufffd' not in tmp: # is valid utf-8 string?
131
+ if callback:
132
+ callback(tmp)
133
+ out_str += tmp
134
+ out_last = i + 1
135
+ return out_str