Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- parrot/lib/libsqlite3.so.0.8.6 +3 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh +132 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp +342 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp +48 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp +639 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp +43 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp +16 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp +268 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp +22 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh +132 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu +126 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu +186 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp +216 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu +128 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu +194 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp +127 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp +112 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h +37 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu +873 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp +1055 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu +335 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu +416 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu +199 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu +153 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu +710 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu +503 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu +74 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp +2020 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu +71 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu +263 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu +562 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu +727 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h +292 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h +435 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h +248 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu +2134 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu +701 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp +29 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam.cpp +16 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam_impl.cpp +247 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/fused_adam_frontend.cpp +25 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/multi_tensor_adam.dp.cpp +159 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/common/custom_cuda_kernel.dp.cpp +92 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/cpu_adagrad.h +120 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/type_shim.h +155 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py +9 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py +26 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py +131 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py +196 -0
.gitattributes
CHANGED
|
@@ -179,3 +179,4 @@ parrot/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.
|
|
| 179 |
parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 180 |
parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
|
| 181 |
parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 179 |
parrot/lib/python3.10/site-packages/mpmath/__pycache__/function_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 180 |
parrot/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
|
| 181 |
parrot/lib/python3.10/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
|
| 182 |
+
parrot/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/libsqlite3.so.0.8.6
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327
|
| 3 |
+
size 1543808
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <ATen/AccumulateType.h>
|
| 13 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 14 |
+
#include <ATen/cuda/Exceptions.h>
|
| 15 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 16 |
+
#include "compat.h"
|
| 17 |
+
|
| 18 |
+
#include <assert.h>
|
| 19 |
+
|
| 20 |
+
// #include <iostream>
|
| 21 |
+
|
| 22 |
+
// This header is the one-stop shop for all your multi-tensor apply needs.
|
| 23 |
+
|
| 24 |
+
// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson)
|
| 25 |
+
constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
|
| 26 |
+
constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
|
| 27 |
+
|
| 28 |
+
template <int n>
|
| 29 |
+
struct TensorListMetadata {
|
| 30 |
+
void* addresses[n][depth_to_max_tensors[n - 1]];
|
| 31 |
+
int sizes[depth_to_max_tensors[n - 1]];
|
| 32 |
+
unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
|
| 33 |
+
int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int.
|
| 34 |
+
int start_tensor_this_launch;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
template <typename T, typename U, typename... ArgTypes>
|
| 38 |
+
__global__ void multi_tensor_apply_kernel(int chunk_size,
|
| 39 |
+
volatile int* noop_flag,
|
| 40 |
+
T tl,
|
| 41 |
+
U callable,
|
| 42 |
+
ArgTypes... args)
|
| 43 |
+
{
|
| 44 |
+
// Hand the chunk information to the user-supplied functor to process however it likes.
|
| 45 |
+
callable(chunk_size, noop_flag, tl, args...);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <int depth, typename T, typename... ArgTypes>
|
| 49 |
+
void multi_tensor_apply(int block_size,
|
| 50 |
+
int chunk_size,
|
| 51 |
+
const at::Tensor& noop_flag,
|
| 52 |
+
const std::vector<std::vector<at::Tensor>>& tensor_lists,
|
| 53 |
+
T callable,
|
| 54 |
+
ArgTypes... args)
|
| 55 |
+
{
|
| 56 |
+
TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth");
|
| 57 |
+
int len0 = tensor_lists[0].size();
|
| 58 |
+
TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0");
|
| 59 |
+
auto ref_device = tensor_lists[0][0].device();
|
| 60 |
+
TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda");
|
| 61 |
+
for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices
|
| 62 |
+
{
|
| 63 |
+
TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists");
|
| 64 |
+
for (int t = 0; t < tensor_lists[l].size(); t++) {
|
| 65 |
+
// TODO: Print which tensor fails.
|
| 66 |
+
bool contiguous_memory = tensor_lists[l][t].is_contiguous();
|
| 67 |
+
#ifdef VERSION_GE_1_5
|
| 68 |
+
contiguous_memory = (contiguous_memory ||
|
| 69 |
+
tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast));
|
| 70 |
+
#endif
|
| 71 |
+
TORCH_CHECK(contiguous_memory, "A tensor was not contiguous.");
|
| 72 |
+
TORCH_CHECK(tensor_lists[l][t].device() == ref_device,
|
| 73 |
+
"A tensor was not on the same device as the first tensor");
|
| 74 |
+
TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch");
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
int ntensors = tensor_lists[0].size();
|
| 79 |
+
|
| 80 |
+
TensorListMetadata<depth> tl;
|
| 81 |
+
|
| 82 |
+
const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0]));
|
| 83 |
+
auto stream = at::cuda::getCurrentCUDAStream();
|
| 84 |
+
|
| 85 |
+
tl.start_tensor_this_launch = 0;
|
| 86 |
+
int loc_block_info = 0;
|
| 87 |
+
int loc_tensor_info = 0;
|
| 88 |
+
for (int t = 0; t < ntensors; t++) {
|
| 89 |
+
tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel();
|
| 90 |
+
for (int d = 0; d < depth; d++)
|
| 91 |
+
tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr();
|
| 92 |
+
loc_tensor_info++;
|
| 93 |
+
|
| 94 |
+
int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
|
| 95 |
+
|
| 96 |
+
for (int chunk = 0; chunk < chunks_this_tensor; chunk++) {
|
| 97 |
+
// std::cout << chunks_this_tensor << std::endl;
|
| 98 |
+
tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
|
| 99 |
+
tl.block_to_chunk[loc_block_info] = chunk;
|
| 100 |
+
loc_block_info++;
|
| 101 |
+
|
| 102 |
+
bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
|
| 103 |
+
chunk == chunks_this_tensor - 1);
|
| 104 |
+
bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]);
|
| 105 |
+
bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1);
|
| 106 |
+
if (tensors_full || blocks_full || last_chunk) {
|
| 107 |
+
// using accscalar_t = acc_type<scalar_t, true>;
|
| 108 |
+
multi_tensor_apply_kernel<<<loc_block_info, block_size, 0, stream>>>(
|
| 109 |
+
chunk_size, noop_flag.DATA_PTR<int>(), tl, callable, args...);
|
| 110 |
+
|
| 111 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 112 |
+
|
| 113 |
+
// Reset. The control flow possibilities here make my brain hurt.
|
| 114 |
+
loc_block_info = 0;
|
| 115 |
+
if (chunk == chunks_this_tensor - 1) {
|
| 116 |
+
// std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 <<
|
| 117 |
+
// std::endl;
|
| 118 |
+
loc_tensor_info = 0;
|
| 119 |
+
tl.start_tensor_this_launch = t + 1;
|
| 120 |
+
} else {
|
| 121 |
+
// std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 <<
|
| 122 |
+
// std::endl;
|
| 123 |
+
tl.sizes[0] = tl.sizes[loc_tensor_info - 1];
|
| 124 |
+
for (int d = 0; d < depth; d++)
|
| 125 |
+
tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1];
|
| 126 |
+
loc_tensor_info = 1;
|
| 127 |
+
tl.start_tensor_this_launch = t;
|
| 128 |
+
}
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <assert.h>
|
| 11 |
+
#include <stdlib.h>
|
| 12 |
+
#include <string.h>
|
| 13 |
+
|
| 14 |
+
#include <fcntl.h>
|
| 15 |
+
#include <libaio.h>
|
| 16 |
+
#include <sys/mman.h>
|
| 17 |
+
#include <sys/stat.h>
|
| 18 |
+
#include <sys/types.h>
|
| 19 |
+
#include <time.h>
|
| 20 |
+
#include <unistd.h>
|
| 21 |
+
|
| 22 |
+
#include <algorithm>
|
| 23 |
+
#include <chrono>
|
| 24 |
+
#include <cmath>
|
| 25 |
+
#include <cstring>
|
| 26 |
+
#include <fstream>
|
| 27 |
+
#include <iostream>
|
| 28 |
+
#include <limits>
|
| 29 |
+
#include <numeric>
|
| 30 |
+
#include <string>
|
| 31 |
+
#include <vector>
|
| 32 |
+
|
| 33 |
+
#include "deepspeed_aio_common.h"
|
| 34 |
+
|
| 35 |
+
using namespace std;
|
| 36 |
+
using namespace std::chrono;
|
| 37 |
+
|
| 38 |
+
#define DEBUG_DS_AIO_PERF 0
|
| 39 |
+
#define DEBUG_DS_AIO_SUBMIT_PERF 0
|
| 40 |
+
|
| 41 |
+
static const std::string c_library_name = "deepspeed_aio";
|
| 42 |
+
|
| 43 |
+
static void _report_aio_statistics(const char* tag,
|
| 44 |
+
const std::vector<std::chrono::duration<double>>& latencies)
|
| 45 |
+
__attribute__((unused));
|
| 46 |
+
|
| 47 |
+
static void _report_aio_statistics(const char* tag,
|
| 48 |
+
const std::vector<std::chrono::duration<double>>& latencies)
|
| 49 |
+
{
|
| 50 |
+
std::vector<double> lat_usec;
|
| 51 |
+
for (auto& lat : latencies) { lat_usec.push_back(lat.count() * 1e6); }
|
| 52 |
+
const auto min_lat = *(std::min_element(lat_usec.begin(), lat_usec.end()));
|
| 53 |
+
const auto max_lat = *(std::max_element(lat_usec.begin(), lat_usec.end()));
|
| 54 |
+
const auto avg_lat = std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size();
|
| 55 |
+
|
| 56 |
+
std::cout << c_library_name << ": latency statistics(usec) " << tag
|
| 57 |
+
<< " min/max/avg = " << min_lat << " " << max_lat << " " << avg_lat << std::endl;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
static void _get_aio_latencies(std::vector<std::chrono::duration<double>>& raw_latencies,
|
| 61 |
+
struct deepspeed_aio_latency_t& summary_latencies)
|
| 62 |
+
{
|
| 63 |
+
std::vector<double> lat_usec;
|
| 64 |
+
for (auto& lat : raw_latencies) { lat_usec.push_back(lat.count() * 1e6); }
|
| 65 |
+
summary_latencies._min_usec = *(std::min_element(lat_usec.begin(), lat_usec.end()));
|
| 66 |
+
summary_latencies._max_usec = *(std::max_element(lat_usec.begin(), lat_usec.end()));
|
| 67 |
+
summary_latencies._avg_usec =
|
| 68 |
+
std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size();
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
static void _do_io_submit_singles(const long long int n_iocbs,
|
| 72 |
+
const long long int iocb_index,
|
| 73 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 74 |
+
std::vector<std::chrono::duration<double>>& submit_times)
|
| 75 |
+
{
|
| 76 |
+
for (auto i = 0; i < n_iocbs; ++i) {
|
| 77 |
+
const auto st = std::chrono::high_resolution_clock::now();
|
| 78 |
+
const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, 1, aio_ctxt->_iocbs.data() + i);
|
| 79 |
+
submit_times.push_back(std::chrono::high_resolution_clock::now() - st);
|
| 80 |
+
#if DEBUG_DS_AIO_SUBMIT_PERF
|
| 81 |
+
printf("submit(usec) %f io_index=%lld buf=%p len=%lu off=%llu \n",
|
| 82 |
+
submit_times.back().count() * 1e6,
|
| 83 |
+
iocb_index,
|
| 84 |
+
aio_ctxt->_iocbs[i]->u.c.buf,
|
| 85 |
+
aio_ctxt->_iocbs[i]->u.c.nbytes,
|
| 86 |
+
aio_ctxt->_iocbs[i]->u.c.offset);
|
| 87 |
+
#endif
|
| 88 |
+
assert(submit_ret > 0);
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
static void _do_io_submit_block(const long long int n_iocbs,
|
| 93 |
+
const long long int iocb_index,
|
| 94 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 95 |
+
std::vector<std::chrono::duration<double>>& submit_times)
|
| 96 |
+
{
|
| 97 |
+
const auto st = std::chrono::high_resolution_clock::now();
|
| 98 |
+
const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, n_iocbs, aio_ctxt->_iocbs.data());
|
| 99 |
+
submit_times.push_back(std::chrono::high_resolution_clock::now() - st);
|
| 100 |
+
#if DEBUG_DS_AIO_SUBMIT_PERF
|
| 101 |
+
printf("submit(usec) %f io_index=%lld nr=%lld buf=%p len=%lu off=%llu \n",
|
| 102 |
+
submit_times.back().count() * 1e6,
|
| 103 |
+
iocb_index,
|
| 104 |
+
n_iocbs,
|
| 105 |
+
aio_ctxt->_iocbs[0]->u.c.buf,
|
| 106 |
+
aio_ctxt->_iocbs[0]->u.c.nbytes,
|
| 107 |
+
aio_ctxt->_iocbs[0]->u.c.offset);
|
| 108 |
+
#endif
|
| 109 |
+
assert(submit_ret > 0);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
static int _do_io_complete(const long long int min_completes,
|
| 113 |
+
const long long int max_completes,
|
| 114 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 115 |
+
std::vector<std::chrono::duration<double>>& reap_times)
|
| 116 |
+
{
|
| 117 |
+
const auto start_time = std::chrono::high_resolution_clock::now();
|
| 118 |
+
long long int n_completes = io_pgetevents(aio_ctxt->_io_ctxt,
|
| 119 |
+
min_completes,
|
| 120 |
+
max_completes,
|
| 121 |
+
aio_ctxt->_io_events.data(),
|
| 122 |
+
nullptr,
|
| 123 |
+
nullptr);
|
| 124 |
+
reap_times.push_back(std::chrono::high_resolution_clock::now() - start_time);
|
| 125 |
+
assert(n_completes >= min_completes);
|
| 126 |
+
return n_completes;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
void do_aio_operation_sequential(const bool read_op,
|
| 130 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 131 |
+
std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 132 |
+
deepspeed_aio_config_t* config,
|
| 133 |
+
deepspeed_aio_perf_t* perf)
|
| 134 |
+
{
|
| 135 |
+
struct io_prep_context prep_ctxt(read_op, xfer_ctxt, aio_ctxt->_block_size, &aio_ctxt->_iocbs);
|
| 136 |
+
|
| 137 |
+
const auto num_io_blocks = static_cast<long long int>(
|
| 138 |
+
ceil(static_cast<double>(xfer_ctxt->_num_bytes) / aio_ctxt->_block_size));
|
| 139 |
+
#if DEBUG_DS_AIO_PERF
|
| 140 |
+
const auto io_op_name = std::string(read_op ? "read" : "write");
|
| 141 |
+
std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes
|
| 142 |
+
<< " bytes with " << num_io_blocks << " io blocks" << std::endl;
|
| 143 |
+
#endif
|
| 144 |
+
|
| 145 |
+
std::vector<std::chrono::duration<double>> submit_times;
|
| 146 |
+
std::vector<std::chrono::duration<double>> reap_times;
|
| 147 |
+
const auto max_queue_bytes =
|
| 148 |
+
static_cast<long long int>(aio_ctxt->_queue_depth * aio_ctxt->_block_size);
|
| 149 |
+
|
| 150 |
+
auto start = std::chrono::high_resolution_clock::now();
|
| 151 |
+
for (long long iocb_index = 0; iocb_index < num_io_blocks;
|
| 152 |
+
iocb_index += aio_ctxt->_queue_depth) {
|
| 153 |
+
const auto start_offset = iocb_index * aio_ctxt->_block_size;
|
| 154 |
+
const auto start_buffer = (char*)xfer_ctxt->_mem_buffer + start_offset;
|
| 155 |
+
const auto n_iocbs =
|
| 156 |
+
min(static_cast<long long>(aio_ctxt->_queue_depth), (num_io_blocks - iocb_index));
|
| 157 |
+
const auto num_bytes = min(max_queue_bytes, (xfer_ctxt->_num_bytes - start_offset));
|
| 158 |
+
prep_ctxt.prep_iocbs(n_iocbs, num_bytes, start_buffer, start_offset);
|
| 159 |
+
|
| 160 |
+
if (config->_single_submit) {
|
| 161 |
+
_do_io_submit_singles(n_iocbs, iocb_index, aio_ctxt, submit_times);
|
| 162 |
+
} else {
|
| 163 |
+
_do_io_submit_block(n_iocbs, iocb_index, aio_ctxt, submit_times);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
_do_io_complete(n_iocbs, n_iocbs, aio_ctxt, reap_times);
|
| 167 |
+
}
|
| 168 |
+
const std::chrono::duration<double> elapsed = std::chrono::high_resolution_clock::now() - start;
|
| 169 |
+
|
| 170 |
+
if (perf) {
|
| 171 |
+
_get_aio_latencies(submit_times, perf->_submit);
|
| 172 |
+
_get_aio_latencies(reap_times, perf->_complete);
|
| 173 |
+
perf->_e2e_usec = elapsed.count() * 1e6;
|
| 174 |
+
perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
#if DEBUG_DS_AIO_PERF
|
| 178 |
+
_report_aio_statistics("submit", submit_times);
|
| 179 |
+
_report_aio_statistics("complete", reap_times);
|
| 180 |
+
#endif
|
| 181 |
+
|
| 182 |
+
#if DEBUG_DS_AIO_PERF
|
| 183 |
+
std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6
|
| 184 |
+
<< " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl;
|
| 185 |
+
#endif
|
| 186 |
+
|
| 187 |
+
#if DEBUG_DS_AIO_PERF
|
| 188 |
+
std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes
|
| 189 |
+
<< " bytes " << std::endl;
|
| 190 |
+
#endif
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
void do_aio_operation_overlap(const bool read_op,
|
| 194 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 195 |
+
std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 196 |
+
deepspeed_aio_config_t* config,
|
| 197 |
+
deepspeed_aio_perf_t* perf)
|
| 198 |
+
{
|
| 199 |
+
struct io_prep_generator io_gen(read_op, xfer_ctxt, aio_ctxt->_block_size);
|
| 200 |
+
|
| 201 |
+
#if DEBUG_DS_AIO_PERF
|
| 202 |
+
const auto io_op_name = std::string(read_op ? "read" : "write");
|
| 203 |
+
std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes
|
| 204 |
+
<< " bytes with " << io_gen._num_io_blocks << " io blocks" << std::endl;
|
| 205 |
+
#endif
|
| 206 |
+
|
| 207 |
+
std::vector<std::chrono::duration<double>> submit_times;
|
| 208 |
+
std::vector<std::chrono::duration<double>> reap_times;
|
| 209 |
+
|
| 210 |
+
auto request_iocbs = aio_ctxt->_queue_depth;
|
| 211 |
+
auto n_pending_iocbs = 0;
|
| 212 |
+
const auto min_completes = 1;
|
| 213 |
+
auto start = std::chrono::high_resolution_clock::now();
|
| 214 |
+
while (true) {
|
| 215 |
+
const auto n_iocbs = io_gen.prep_iocbs(request_iocbs - n_pending_iocbs, &aio_ctxt->_iocbs);
|
| 216 |
+
if (n_iocbs > 0) {
|
| 217 |
+
if (config->_single_submit) {
|
| 218 |
+
_do_io_submit_singles(
|
| 219 |
+
n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times);
|
| 220 |
+
} else {
|
| 221 |
+
_do_io_submit_block(
|
| 222 |
+
n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times);
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
n_pending_iocbs += n_iocbs;
|
| 227 |
+
assert(n_pending_iocbs <= aio_ctxt->_queue_depth);
|
| 228 |
+
|
| 229 |
+
if (n_pending_iocbs == 0) { break; }
|
| 230 |
+
|
| 231 |
+
const auto n_complete =
|
| 232 |
+
_do_io_complete(min_completes, n_pending_iocbs, aio_ctxt, reap_times);
|
| 233 |
+
n_pending_iocbs -= n_complete;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
const std::chrono::duration<double> elapsed = std::chrono::high_resolution_clock::now() - start;
|
| 237 |
+
|
| 238 |
+
if (perf) {
|
| 239 |
+
_get_aio_latencies(submit_times, perf->_submit);
|
| 240 |
+
_get_aio_latencies(reap_times, perf->_complete);
|
| 241 |
+
perf->_e2e_usec = elapsed.count() * 1e6;
|
| 242 |
+
perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
#if DEBUG_DS_AIO_PERF
|
| 246 |
+
_report_aio_statistics("submit", submit_times);
|
| 247 |
+
_report_aio_statistics("complete", reap_times);
|
| 248 |
+
#endif
|
| 249 |
+
|
| 250 |
+
#if DEBUG_DS_AIO_PERF
|
| 251 |
+
std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6
|
| 252 |
+
<< " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl;
|
| 253 |
+
#endif
|
| 254 |
+
|
| 255 |
+
#if DEBUG_DS_AIO_PERF
|
| 256 |
+
std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes
|
| 257 |
+
<< " bytes " << std::endl;
|
| 258 |
+
#endif
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
void report_file_error(const char* filename, const std::string file_op, const int error_code)
|
| 262 |
+
{
|
| 263 |
+
std::string err_msg = file_op + std::string(" failed on ") + std::string(filename) +
|
| 264 |
+
" error = " + std::to_string(error_code);
|
| 265 |
+
std::cerr << c_library_name << ": " << err_msg << std::endl;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
int open_file(const char* filename, const bool read_op)
|
| 269 |
+
{
|
| 270 |
+
const int flags = read_op ? (O_RDONLY | O_DIRECT) : (O_WRONLY | O_CREAT | O_DIRECT);
|
| 271 |
+
#if defined(__ENABLE_CANN__)
|
| 272 |
+
int* flags_ptr = (int*)&flags;
|
| 273 |
+
*flags_ptr = read_op ? (O_RDONLY) : (O_WRONLY | O_CREAT);
|
| 274 |
+
#endif
|
| 275 |
+
const int mode = 0600;
|
| 276 |
+
const auto fd = open(filename, flags, mode);
|
| 277 |
+
if (fd == -1) {
|
| 278 |
+
const auto error_code = errno;
|
| 279 |
+
const auto error_msg = read_op ? " open for read " : " open for write ";
|
| 280 |
+
report_file_error(filename, error_msg, error_code);
|
| 281 |
+
return -1;
|
| 282 |
+
}
|
| 283 |
+
return fd;
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
int regular_read(const char* filename, std::vector<char>& buffer)
|
| 287 |
+
{
|
| 288 |
+
long long int num_bytes;
|
| 289 |
+
const auto f_size = get_file_size(filename, num_bytes);
|
| 290 |
+
assert(f_size != -1);
|
| 291 |
+
buffer.resize(num_bytes);
|
| 292 |
+
const auto fd = open(filename, O_RDONLY, 0600);
|
| 293 |
+
assert(fd != -1);
|
| 294 |
+
long long int read_bytes = 0;
|
| 295 |
+
auto r = 0;
|
| 296 |
+
do {
|
| 297 |
+
const auto buffer_ptr = buffer.data() + read_bytes;
|
| 298 |
+
const auto bytes_to_read = num_bytes - read_bytes;
|
| 299 |
+
r = read(fd, buffer_ptr, bytes_to_read);
|
| 300 |
+
read_bytes += r;
|
| 301 |
+
} while (r > 0);
|
| 302 |
+
|
| 303 |
+
if (read_bytes != num_bytes) {
|
| 304 |
+
std::cerr << "read error "
|
| 305 |
+
<< " read_bytes (read) = " << read_bytes << " num_bytes (fstat) = " << num_bytes
|
| 306 |
+
<< std::endl;
|
| 307 |
+
}
|
| 308 |
+
assert(read_bytes == num_bytes);
|
| 309 |
+
close(fd);
|
| 310 |
+
return 0;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
static bool _validate_buffer(const char* filename, void* aio_buffer, const long long int num_bytes)
|
| 314 |
+
{
|
| 315 |
+
std::vector<char> regular_buffer;
|
| 316 |
+
const auto reg_ret = regular_read(filename, regular_buffer);
|
| 317 |
+
assert(0 == reg_ret);
|
| 318 |
+
std::cout << "regular read of " << filename << " returned " << regular_buffer.size() << " bytes"
|
| 319 |
+
<< std::endl;
|
| 320 |
+
|
| 321 |
+
if (static_cast<long long int>(regular_buffer.size()) != num_bytes) { return false; }
|
| 322 |
+
|
| 323 |
+
return (0 == memcmp(aio_buffer, regular_buffer.data(), regular_buffer.size()));
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
bool validate_aio_operation(const bool read_op,
|
| 327 |
+
const char* filename,
|
| 328 |
+
void* aio_buffer,
|
| 329 |
+
const long long int num_bytes)
|
| 330 |
+
{
|
| 331 |
+
const auto msg_suffix = std::string("deepspeed_aio_") +
|
| 332 |
+
std::string(read_op ? "read()" : "write()") +
|
| 333 |
+
std::string("using read()");
|
| 334 |
+
|
| 335 |
+
if (false == _validate_buffer(filename, aio_buffer, num_bytes)) {
|
| 336 |
+
std::cout << "Fail: correctness of " << msg_suffix << std::endl;
|
| 337 |
+
return false;
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
std::cout << "Pass: correctness of " << msg_suffix << std::endl;
|
| 341 |
+
return true;
|
| 342 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "cpu_adam.h"
|
| 7 |
+
|
| 8 |
+
// C++ interface
|
| 9 |
+
|
| 10 |
+
void multi_tensor_adam(int chunk_size,
|
| 11 |
+
at::Tensor noop_flag,
|
| 12 |
+
std::vector<std::vector<at::Tensor>> tensor_lists, /*gpmv*/
|
| 13 |
+
const float lr,
|
| 14 |
+
const float beta1,
|
| 15 |
+
const float beta2,
|
| 16 |
+
const float epsilon,
|
| 17 |
+
const int step,
|
| 18 |
+
const int mode,
|
| 19 |
+
const int bias_correction,
|
| 20 |
+
const float weight_decay)
|
| 21 |
+
{
|
| 22 |
+
static bool initialized = false;
|
| 23 |
+
if (!initialized) {
|
| 24 |
+
create_adam_optimizer(0);
|
| 25 |
+
initialized = true;
|
| 26 |
+
}
|
| 27 |
+
for (int i = 0; i < tensor_lists[0].size(); i++) {
|
| 28 |
+
ds_adam_step(0,
|
| 29 |
+
step,
|
| 30 |
+
lr,
|
| 31 |
+
beta1,
|
| 32 |
+
beta2,
|
| 33 |
+
epsilon,
|
| 34 |
+
weight_decay,
|
| 35 |
+
bias_correction,
|
| 36 |
+
tensor_lists[1][i],
|
| 37 |
+
tensor_lists[0][i],
|
| 38 |
+
tensor_lists[2][i],
|
| 39 |
+
tensor_lists[3][i]);
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 44 |
+
{
|
| 45 |
+
m.def("multi_tensor_adam",
|
| 46 |
+
&multi_tensor_adam,
|
| 47 |
+
"Compute and apply gradient update to parameters for Adam optimizer");
|
| 48 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
#include <fcntl.h>
|
| 9 |
+
#include <immintrin.h>
|
| 10 |
+
#include <math.h>
|
| 11 |
+
#include <omp.h>
|
| 12 |
+
#include <sys/mman.h>
|
| 13 |
+
#include <sys/stat.h>
|
| 14 |
+
#include <unistd.h>
|
| 15 |
+
#include <atomic>
|
| 16 |
+
#include <cstdlib>
|
| 17 |
+
#include <iostream>
|
| 18 |
+
#include <oneapi/ccl.hpp>
|
| 19 |
+
|
| 20 |
+
// states for collectives
|
| 21 |
+
enum coll_state {
|
| 22 |
+
coll_begin = 0,
|
| 23 |
+
// coll states for naive allreduce
|
| 24 |
+
coll_allreduce_naive__copy_in_done, // this state is for rank != 0
|
| 25 |
+
coll_allreduce_naive__reduce_done, // this state is for rank == 0
|
| 26 |
+
coll_allreduce_naive__copy_out_done, // this state is for rank != 0
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
// SHM building blocks
|
| 30 |
+
struct SharedData {
|
| 31 |
+
const char* name;
|
| 32 |
+
int descriptor;
|
| 33 |
+
void* bytes;
|
| 34 |
+
size_t nbytes;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
void shared_open(SharedData* data, const char* name, size_t nbytes)
|
| 38 |
+
{
|
| 39 |
+
int d = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR);
|
| 40 |
+
if (d != -1) {
|
| 41 |
+
void* bytes = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_SHARED, d, 0);
|
| 42 |
+
data->name = name;
|
| 43 |
+
data->descriptor = d;
|
| 44 |
+
data->bytes = bytes;
|
| 45 |
+
data->nbytes = nbytes;
|
| 46 |
+
} else {
|
| 47 |
+
printf("shared_open %s failed\n", name);
|
| 48 |
+
data->descriptor = -1;
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
void shared_create(SharedData* data, const char* name, void* bytes, size_t nbytes)
|
| 53 |
+
{
|
| 54 |
+
int d = shm_open(name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
|
| 55 |
+
if (d != -1) {
|
| 56 |
+
if (nbytes = write(d, bytes, nbytes)) { shared_open(data, name, nbytes); }
|
| 57 |
+
} else {
|
| 58 |
+
printf("shared_create %s failed\n", name);
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
void shared_close(SharedData* data)
|
| 63 |
+
{
|
| 64 |
+
if (data->descriptor != -1) {
|
| 65 |
+
munmap(data->bytes, data->nbytes);
|
| 66 |
+
shm_unlink(data->name);
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
// SHM based allreduce helper functions
|
| 71 |
+
// buffer that holds shm name
|
| 72 |
+
#define NAME_BUF_SIZE 1000
|
| 73 |
+
#define MAX_BUF_SIZE 1048576
|
| 74 |
+
#define SHM_BUFFER_NAME "deepspeed_allreduce_buffer"
|
| 75 |
+
SharedData allreduce_buffer;
|
| 76 |
+
struct allreduce_workspace {
|
| 77 |
+
enum coll_state state;
|
| 78 |
+
char buffer[MAX_BUF_SIZE];
|
| 79 |
+
};
|
| 80 |
+
struct allreduce_workspace* workspace;
|
| 81 |
+
|
| 82 |
+
void wait_buffer_state_until(int index, enum coll_state state)
|
| 83 |
+
{
|
| 84 |
+
volatile enum coll_state* state_ptr = &(workspace[index].state);
|
| 85 |
+
|
| 86 |
+
while (*state_ptr != state)
|
| 87 |
+
;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
void wait_buffer_state_until_not(int index, enum coll_state state)
|
| 91 |
+
{
|
| 92 |
+
volatile enum coll_state* state_ptr = &(workspace[index].state);
|
| 93 |
+
|
| 94 |
+
while (*state_ptr == state)
|
| 95 |
+
;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
__m512 cvt_bf16_to_fp32(const __m256i src) __attribute__((target("avx512bw")));
|
| 99 |
+
inline __m512 cvt_bf16_to_fp32(const __m256i src)
|
| 100 |
+
{
|
| 101 |
+
auto y = _mm512_cvtepu16_epi32(src);
|
| 102 |
+
return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2));
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
inline __m256i cvt_fp32_to_bf16(const __m512 src) __attribute__((target("avx512bw")));
|
| 106 |
+
inline __m256i cvt_fp32_to_bf16(const __m512 src)
|
| 107 |
+
{
|
| 108 |
+
__m512i value = _mm512_castps_si512(src);
|
| 109 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 110 |
+
auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
|
| 111 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 112 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 113 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 114 |
+
auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
|
| 115 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 116 |
+
t_value = _mm512_add_epi32(t_value, vec_bias);
|
| 117 |
+
// input += rounding_bias;
|
| 118 |
+
t_value = _mm512_add_epi32(t_value, value);
|
| 119 |
+
// input = input >> 16;
|
| 120 |
+
t_value = _mm512_srli_epi32(t_value, 16);
|
| 121 |
+
// Check NaN before converting back to bf16
|
| 122 |
+
t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
|
| 123 |
+
return _mm512_cvtusepi32_epi16(t_value);
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
void reduce_2_bf16_buffers(int num_elements, void* in_out, void* in)
|
| 127 |
+
__attribute__((target("avx512bw")));
|
| 128 |
+
|
| 129 |
+
void reduce_bf16_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace)
|
| 130 |
+
__attribute__((target("avx512bw")));
|
| 131 |
+
|
| 132 |
+
void reduce_2_fp32_buffers(int num_elements, void* in_out, void* in)
|
| 133 |
+
__attribute__((target("avx512bw")));
|
| 134 |
+
|
| 135 |
+
void reduce_fp32_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace)
|
| 136 |
+
__attribute__((target("avx512bw")));
|
| 137 |
+
|
| 138 |
+
// N_REDUCE_LIMIT is the number of buffers that can be reduced together in one shot.
|
| 139 |
+
// Compared with do N-1 2-reduces which needs 2*(N-1) read and N-1 write,
|
| 140 |
+
// N-reduce only needs N read and 1 write, this saves 2/3 memory bandwidth.
|
| 141 |
+
// When increase N_REDUCE_LIMIT to a bigger number, do the following steps
|
| 142 |
+
// 1. Extend REPEAT_<X> macros list down below
|
| 143 |
+
// 2. Extend switch cases which call "REPEAT(X, ...)" down below
|
| 144 |
+
#define N_REDUCE_LIMIT 8
|
| 145 |
+
|
| 146 |
+
void reduce_all_buffers(struct allreduce_workspace* workspace,
|
| 147 |
+
int num_elements,
|
| 148 |
+
c10::ScalarType scalar_type,
|
| 149 |
+
int num_buffers)
|
| 150 |
+
{
|
| 151 |
+
switch (scalar_type) {
|
| 152 |
+
case c10::ScalarType::BFloat16:
|
| 153 |
+
if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) {
|
| 154 |
+
reduce_bf16_buffers(num_elements, num_buffers, workspace);
|
| 155 |
+
} else {
|
| 156 |
+
for (int i = 1; i < num_buffers; i++) {
|
| 157 |
+
reduce_2_bf16_buffers(num_elements, workspace[0].buffer, workspace[i].buffer);
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
break;
|
| 161 |
+
case c10::ScalarType::Float:
|
| 162 |
+
if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) {
|
| 163 |
+
reduce_fp32_buffers(num_elements, num_buffers, workspace);
|
| 164 |
+
} else {
|
| 165 |
+
for (int i = 1; i < num_buffers; i++) {
|
| 166 |
+
reduce_2_fp32_buffers(num_elements, workspace[0].buffer, workspace[i].buffer);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
break;
|
| 170 |
+
default: assert(!"Should not get here");
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
#define REPEAT(N, x) REPEAT_##N(x)
|
| 175 |
+
#define REPEAT_1(x) x(1)
|
| 176 |
+
#define REPEAT_2(x) \
|
| 177 |
+
REPEAT_1(x); \
|
| 178 |
+
x(2)
|
| 179 |
+
#define REPEAT_3(x) \
|
| 180 |
+
REPEAT_2(x); \
|
| 181 |
+
x(3)
|
| 182 |
+
#define REPEAT_4(x) \
|
| 183 |
+
REPEAT_3(x); \
|
| 184 |
+
x(4)
|
| 185 |
+
#define REPEAT_5(x) \
|
| 186 |
+
REPEAT_4(x); \
|
| 187 |
+
x(5)
|
| 188 |
+
#define REPEAT_6(x) \
|
| 189 |
+
REPEAT_5(x); \
|
| 190 |
+
x(6)
|
| 191 |
+
#define REPEAT_7(x) \
|
| 192 |
+
REPEAT_6(x); \
|
| 193 |
+
x(7)
|
| 194 |
+
|
| 195 |
+
#define CVT_ADD_BF16(x) \
|
| 196 |
+
do { \
|
| 197 |
+
auto in##x##_val = \
|
| 198 |
+
cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[x].buffer + i))); \
|
| 199 |
+
inout_val = _mm512_add_ps(inout_val, in##x##_val); \
|
| 200 |
+
} while (0)
|
| 201 |
+
|
| 202 |
+
// Reduce functions down below use vectorized algorithm, the number of bytes processed each
|
| 203 |
+
// iteration depends on vector length. 256bit vector ==> 32 bytes, 512bit vector ==> 64 bytes
|
| 204 |
+
// If you change implementation of reduce_2_bf16_buffers or reduce_2_fp32_buffers, check
|
| 205 |
+
// whether this number needs to be changed
|
| 206 |
+
#define VECTOR_LENGTH_IN_BYTES 32
|
| 207 |
+
|
| 208 |
+
// num_elements must be divisible by 16 (caller check)
|
| 209 |
+
void reduce_bf16_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace)
|
| 210 |
+
{
|
| 211 |
+
#pragma omp parallel for
|
| 212 |
+
for (int i = 0; i < num_elements * 2; i += VECTOR_LENGTH_IN_BYTES) {
|
| 213 |
+
auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[0].buffer + i)));
|
| 214 |
+
switch (num_buffers) {
|
| 215 |
+
case 8: REPEAT(7, CVT_ADD_BF16); break;
|
| 216 |
+
case 7: REPEAT(6, CVT_ADD_BF16); break;
|
| 217 |
+
case 6: REPEAT(5, CVT_ADD_BF16); break;
|
| 218 |
+
case 5: REPEAT(4, CVT_ADD_BF16); break;
|
| 219 |
+
case 4: REPEAT(3, CVT_ADD_BF16); break;
|
| 220 |
+
case 3: REPEAT(2, CVT_ADD_BF16); break;
|
| 221 |
+
default: assert(!"Should not get here.");
|
| 222 |
+
}
|
| 223 |
+
_mm256_storeu_si256((__m256i*)(workspace[0].buffer + i), cvt_fp32_to_bf16(inout_val));
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
void reduce_2_bf16_buffers(int num_elements, void* in_out, void* in1)
|
| 228 |
+
{
|
| 229 |
+
#pragma omp parallel for
|
| 230 |
+
for (int i = 0; i < num_elements * 2; i += VECTOR_LENGTH_IN_BYTES) {
|
| 231 |
+
auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in_out + i)));
|
| 232 |
+
auto in1_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in1 + i)));
|
| 233 |
+
inout_val = _mm512_add_ps(inout_val, in1_val);
|
| 234 |
+
_mm256_storeu_si256((__m256i*)((char*)in_out + i), cvt_fp32_to_bf16(inout_val));
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
#define CVT_ADD_F32(x) \
|
| 239 |
+
do { \
|
| 240 |
+
auto in##x##_val = _mm256_loadu_ps((float*)(workspace[x].buffer + i)); \
|
| 241 |
+
inout_val = _mm256_add_ps(inout_val, in##x##_val); \
|
| 242 |
+
} while (0)
|
| 243 |
+
|
| 244 |
+
// num_elements must be divisible by 16 (caller check)
|
| 245 |
+
void reduce_fp32_buffers(int num_elements, int num_buffers, struct allreduce_workspace* workspace)
|
| 246 |
+
{
|
| 247 |
+
#pragma omp parallel for
|
| 248 |
+
for (int i = 0; i < num_elements * 4; i += VECTOR_LENGTH_IN_BYTES) {
|
| 249 |
+
auto inout_val = _mm256_loadu_ps((float*)(workspace[0].buffer + i));
|
| 250 |
+
switch (num_buffers) {
|
| 251 |
+
case 8: REPEAT(7, CVT_ADD_F32); break;
|
| 252 |
+
case 7: REPEAT(6, CVT_ADD_F32); break;
|
| 253 |
+
case 6: REPEAT(5, CVT_ADD_F32); break;
|
| 254 |
+
case 5: REPEAT(4, CVT_ADD_F32); break;
|
| 255 |
+
case 4: REPEAT(3, CVT_ADD_F32); break;
|
| 256 |
+
case 3: REPEAT(2, CVT_ADD_F32); break;
|
| 257 |
+
default: assert(!"Should not get here.");
|
| 258 |
+
}
|
| 259 |
+
_mm256_storeu_ps((float*)(workspace[0].buffer + i), inout_val);
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
void reduce_2_fp32_buffers(int num_elements, void* in_out, void* in1)
|
| 264 |
+
{
|
| 265 |
+
#pragma omp parallel for
|
| 266 |
+
for (int i = 0; i < num_elements * 4; i += VECTOR_LENGTH_IN_BYTES) {
|
| 267 |
+
auto inout_val = _mm256_loadu_ps((float*)((char*)in_out + i));
|
| 268 |
+
auto in1_val = _mm256_loadu_ps((float*)((char*)in1 + i));
|
| 269 |
+
inout_val = _mm256_add_ps(inout_val, in1_val);
|
| 270 |
+
_mm256_storeu_ps((float*)((char*)in_out + i), inout_val);
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
// Communicatiooon settings
|
| 275 |
+
int world_rank = -1;
|
| 276 |
+
int world_size = -1;
|
| 277 |
+
|
| 278 |
+
std::set<int> _comm_ids;
|
| 279 |
+
std::set<int> _colors;
|
| 280 |
+
std::vector<ccl::communicator> _ccl_comms;
|
| 281 |
+
ccl::shared_ptr_class<ccl::kvs> sub_kvs;
|
| 282 |
+
std::map<std::vector<int>, int> group_to_comm_id;
|
| 283 |
+
|
| 284 |
+
ccl::communicator& _get_comm_from_group() { return _ccl_comms[0]; }
|
| 285 |
+
ccl::communicator& _get_comm_from_group(py::object group) { return _ccl_comms[0]; }
|
| 286 |
+
ccl::communicator& _get_comm_from_group(std::vector<int> ranks)
|
| 287 |
+
{
|
| 288 |
+
if (group_to_comm_id.find(ranks) != group_to_comm_id.end()) {
|
| 289 |
+
auto id = group_to_comm_id.find(ranks);
|
| 290 |
+
return _ccl_comms[id->second];
|
| 291 |
+
}
|
| 292 |
+
return _ccl_comms[0];
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
#define CCLCHECK(cmd) \
|
| 296 |
+
do { \
|
| 297 |
+
cmd; \
|
| 298 |
+
} while (0)
|
| 299 |
+
|
| 300 |
+
#define KVS_CREATE_SUCCESS 0
|
| 301 |
+
#define KVS_CREATE_FAILURE -1
|
| 302 |
+
|
| 303 |
+
bool is_initialized = 0;
|
| 304 |
+
|
| 305 |
+
ccl::shared_ptr_class<ccl::kvs> kvs;
|
| 306 |
+
|
| 307 |
+
bool all_ranks_local_p = false;
|
| 308 |
+
|
| 309 |
+
void initialize(int size, int rank, torch::Tensor& kvs_data)
|
| 310 |
+
{
|
| 311 |
+
if (is_initialized) return;
|
| 312 |
+
|
| 313 |
+
// Check whether all ranks is on the same physical machine.
|
| 314 |
+
// If true, we will use an SHM based low latency allreduce
|
| 315 |
+
|
| 316 |
+
auto ls_string = std::getenv("LOCAL_SIZE");
|
| 317 |
+
int ls = 0;
|
| 318 |
+
if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); }
|
| 319 |
+
|
| 320 |
+
if (size >= 1 && size == ls) { all_ranks_local_p = true; }
|
| 321 |
+
|
| 322 |
+
world_size = size;
|
| 323 |
+
world_rank = rank;
|
| 324 |
+
is_initialized = 1;
|
| 325 |
+
|
| 326 |
+
ccl::kvs::address_type main_addr;
|
| 327 |
+
|
| 328 |
+
if (rank != 0) {
|
| 329 |
+
memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size());
|
| 330 |
+
kvs = ccl::create_kvs(main_addr);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
_ccl_comms.emplace_back(ccl::create_communicator(size, rank, kvs));
|
| 334 |
+
|
| 335 |
+
auto addr_string = std::getenv("MASTER_ADDR");
|
| 336 |
+
if (addr_string == NULL) { addr_string = ""; }
|
| 337 |
+
auto port_string = std::getenv("MASTER_PORT");
|
| 338 |
+
if (port_string == NULL) { port_string = ""; }
|
| 339 |
+
char shm_name[NAME_BUF_SIZE];
|
| 340 |
+
snprintf(shm_name,
|
| 341 |
+
NAME_BUF_SIZE,
|
| 342 |
+
"%s_%d_%s_%s",
|
| 343 |
+
SHM_BUFFER_NAME,
|
| 344 |
+
getuid(),
|
| 345 |
+
addr_string,
|
| 346 |
+
port_string);
|
| 347 |
+
// create shared workspace for SHM based allreduce
|
| 348 |
+
if (all_ranks_local_p) {
|
| 349 |
+
if (rank == 0) {
|
| 350 |
+
workspace =
|
| 351 |
+
(struct allreduce_workspace*)malloc(size * sizeof(struct allreduce_workspace));
|
| 352 |
+
shared_create(
|
| 353 |
+
&allreduce_buffer, shm_name, workspace, size * sizeof(struct allreduce_workspace));
|
| 354 |
+
workspace = (struct allreduce_workspace*)allreduce_buffer.bytes;
|
| 355 |
+
for (int i = 0; i < size; i++) { workspace[i].state = coll_begin; }
|
| 356 |
+
}
|
| 357 |
+
CCLCHECK(ccl::barrier(_get_comm_from_group()).wait());
|
| 358 |
+
if (rank != 0) {
|
| 359 |
+
shared_open(&allreduce_buffer, shm_name, size * sizeof(struct allreduce_workspace));
|
| 360 |
+
}
|
| 361 |
+
workspace = (struct allreduce_workspace*)allreduce_buffer.bytes;
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
/*
|
| 366 |
+
rank == 0: create main kvs and return its address
|
| 367 |
+
rank == else: return an empty address
|
| 368 |
+
*/
|
| 369 |
+
std::vector<uint8_t> get_kvs_addr(int rank)
|
| 370 |
+
{
|
| 371 |
+
if (rank == 0) {
|
| 372 |
+
kvs = ccl::create_main_kvs();
|
| 373 |
+
ccl::kvs::address_type main_addr = kvs->get_address();
|
| 374 |
+
auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
|
| 375 |
+
return ccl_kvs_addr;
|
| 376 |
+
} else {
|
| 377 |
+
ccl::kvs::address_type main_addr;
|
| 378 |
+
auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
|
| 379 |
+
return ccl_kvs_addr;
|
| 380 |
+
}
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
int get_rank(int group = 0) { return world_rank; }
|
| 384 |
+
|
| 385 |
+
int get_world_size(int group = 0) { return world_size; }
|
| 386 |
+
|
| 387 |
+
// Find the next ordered, unique value to a set. E.g. <0,1,2,7> --> 3
|
| 388 |
+
int next_unique_val(std::set<int> s)
|
| 389 |
+
{
|
| 390 |
+
std::set<int>::iterator itr;
|
| 391 |
+
// Base case. Add 0 to start of set.
|
| 392 |
+
if (s.empty() || *s.begin() != 0) {
|
| 393 |
+
return 0;
|
| 394 |
+
// second base case where s = {0} (the case of s = {n != 0} is caught above)
|
| 395 |
+
} else if (s.size() == 1) {
|
| 396 |
+
return 1;
|
| 397 |
+
} else {
|
| 398 |
+
int prev_val = *s.begin();
|
| 399 |
+
for (itr = std::next(s.begin()); itr != s.end(); itr++) {
|
| 400 |
+
if (*itr != prev_val + 1) { return prev_val + 1; }
|
| 401 |
+
prev_val = *itr;
|
| 402 |
+
}
|
| 403 |
+
return *(s.end()) + 1;
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
std::vector<uint8_t> get_sub_kvs_addr(bool first)
|
| 408 |
+
{
|
| 409 |
+
if (first) {
|
| 410 |
+
sub_kvs = ccl::create_main_kvs();
|
| 411 |
+
ccl::kvs::address_type main_addr = sub_kvs->get_address();
|
| 412 |
+
auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
|
| 413 |
+
return ccl_kvs_addr;
|
| 414 |
+
} else {
|
| 415 |
+
ccl::kvs::address_type main_addr;
|
| 416 |
+
auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
|
| 417 |
+
return ccl_kvs_addr;
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
void initialize_sub_comm(int size, int rank, torch::Tensor& kvs_data, std::vector<int> ranks)
|
| 422 |
+
{
|
| 423 |
+
ccl::kvs::address_type main_addr;
|
| 424 |
+
if (rank != 0) {
|
| 425 |
+
memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size());
|
| 426 |
+
sub_kvs = ccl::create_kvs(main_addr);
|
| 427 |
+
}
|
| 428 |
+
_ccl_comms.push_back(ccl::create_communicator(size, rank, sub_kvs));
|
| 429 |
+
group_to_comm_id[ranks] = _ccl_comms.size() - 1;
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
ccl::datatype get_ccl_datatype(c10::ScalarType type)
|
| 433 |
+
{
|
| 434 |
+
ccl::datatype ccl_type;
|
| 435 |
+
switch (type) {
|
| 436 |
+
case c10::ScalarType::Int: ccl_type = ccl::datatype::int32; break;
|
| 437 |
+
case c10::ScalarType::Long: ccl_type = ccl::datatype::int64; break;
|
| 438 |
+
case c10::ScalarType::Float: ccl_type = ccl::datatype::float32; break;
|
| 439 |
+
case c10::ScalarType::Double: ccl_type = ccl::datatype::float64; break;
|
| 440 |
+
case c10::ScalarType::BFloat16: ccl_type = ccl::datatype::bfloat16; break;
|
| 441 |
+
case c10::ScalarType::Half: ccl_type = ccl::datatype::float16; break;
|
| 442 |
+
default: ccl_type = ccl::datatype::int8;
|
| 443 |
+
}
|
| 444 |
+
return ccl_type;
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
ccl::reduction get_ccl_reduce_op(py::object op, at::Tensor& input)
|
| 448 |
+
{
|
| 449 |
+
py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp");
|
| 450 |
+
if (!py::isinstance(op, ReduceOp)) {
|
| 451 |
+
throw std::runtime_error("Error: Op must be of type ReduceOp");
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
int op_val = py::int_(op.attr("value"));
|
| 455 |
+
ccl::reduction ccl_op;
|
| 456 |
+
|
| 457 |
+
if (input.scalar_type() == at::kBool) {
|
| 458 |
+
if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) {
|
| 459 |
+
// For bool tensors, map sum to max, which both represent a bitwise or.
|
| 460 |
+
// This is to prevent overflow issues with sum, since we use uint8 to
|
| 461 |
+
// represent a bool (see cclDataType mapping).
|
| 462 |
+
ccl_op = ccl::reduction::max;
|
| 463 |
+
} else if (op_val == (int)py::int_(ReduceOp.attr("AVG").attr("value"))) {
|
| 464 |
+
throw std::runtime_error("Error: For bool tensors, op must be of type ReduceOp");
|
| 465 |
+
}
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) {
|
| 469 |
+
ccl_op = ccl::reduction::sum;
|
| 470 |
+
} else if (op_val == (int)py::int_(ReduceOp.attr("MIN").attr("value"))) {
|
| 471 |
+
ccl_op = ccl::reduction::min;
|
| 472 |
+
} else if (op_val == (int)py::int_(ReduceOp.attr("MAX").attr("value"))) {
|
| 473 |
+
ccl_op = ccl::reduction::max;
|
| 474 |
+
} else if (op_val == (int)py::int_(ReduceOp.attr("PRODUCT").attr("value"))) {
|
| 475 |
+
ccl_op = ccl::reduction::prod;
|
| 476 |
+
} else {
|
| 477 |
+
throw std::runtime_error("Error: Unrecognized ReduceOp type");
|
| 478 |
+
}
|
| 479 |
+
return ccl_op;
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
void broadcast(torch::Tensor& data, int src, std::vector<int> group, bool async_op)
|
| 483 |
+
{
|
| 484 |
+
CCLCHECK(ccl::broadcast(data.data_ptr(),
|
| 485 |
+
data.numel(),
|
| 486 |
+
get_ccl_datatype(data.scalar_type()),
|
| 487 |
+
src,
|
| 488 |
+
_get_comm_from_group(group))
|
| 489 |
+
.wait());
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
// TODO: implement torch's async_op behavior, document it.
|
| 493 |
+
void all_reduce(torch::Tensor& data, py::object op, std::vector<int> group, bool async_op)
|
| 494 |
+
{
|
| 495 |
+
CCLCHECK(ccl::allreduce(data.data_ptr(),
|
| 496 |
+
data.data_ptr(),
|
| 497 |
+
data.numel(),
|
| 498 |
+
get_ccl_datatype(data.scalar_type()),
|
| 499 |
+
get_ccl_reduce_op(op, data),
|
| 500 |
+
_get_comm_from_group(group))
|
| 501 |
+
.wait());
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
void all_reduce_caching(torch::Tensor& data,
|
| 505 |
+
py::object op,
|
| 506 |
+
std::string match_id,
|
| 507 |
+
std::vector<int> group,
|
| 508 |
+
bool async_op)
|
| 509 |
+
{
|
| 510 |
+
ccl::allreduce_attr attr = ccl::default_allreduce_attr;
|
| 511 |
+
auto match_str = ccl::v1::string(match_id);
|
| 512 |
+
attr.template set<ccl::operation_attr_id::to_cache>(true);
|
| 513 |
+
attr.template set<ccl::operation_attr_id::match_id>(match_str);
|
| 514 |
+
// To control this, use operation attribute and set true value for to_cache field and unique
|
| 515 |
+
// string (for example, tensor name) for match_id field. Note that:
|
| 516 |
+
// match_id should be the same for a specific communication operation across all ranks.
|
| 517 |
+
// If the same tensor is a part of different communication operations, match_id should have
|
| 518 |
+
// different values for each of these operations.
|
| 519 |
+
CCLCHECK(ccl::allreduce(data.data_ptr(),
|
| 520 |
+
data.data_ptr(),
|
| 521 |
+
data.numel(),
|
| 522 |
+
get_ccl_datatype(data.scalar_type()),
|
| 523 |
+
get_ccl_reduce_op(op, data),
|
| 524 |
+
_get_comm_from_group(group),
|
| 525 |
+
attr)
|
| 526 |
+
.wait());
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
static void parallel_memcpy(void* to, void* from, size_t n_bytes)
|
| 530 |
+
__attribute__((target("avx512bw")));
|
| 531 |
+
static void parallel_memcpy(void* to, void* from, size_t n_bytes)
|
| 532 |
+
{
|
| 533 |
+
#pragma omp parallel for
|
| 534 |
+
for (int i = 0; i < n_bytes; i += VECTOR_LENGTH_IN_BYTES) {
|
| 535 |
+
auto val = _mm256_loadu_si256((__m256i*)((char*)from + i));
|
| 536 |
+
_mm256_storeu_si256((__m256i*)((char*)to + i), val);
|
| 537 |
+
}
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
void inference_all_reduce(torch::Tensor& data, py::object op, bool async_op)
|
| 541 |
+
{
|
| 542 |
+
static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp");
|
| 543 |
+
static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value"));
|
| 544 |
+
|
| 545 |
+
assert(py::int_(op.attr("value")) == ReduceOpSum);
|
| 546 |
+
|
| 547 |
+
auto numel = data.numel();
|
| 548 |
+
|
| 549 |
+
int data_size = 0;
|
| 550 |
+
bool data_type_fallback = false;
|
| 551 |
+
|
| 552 |
+
switch (data.scalar_type()) {
|
| 553 |
+
case c10::ScalarType::BFloat16: data_size = numel * 2; break;
|
| 554 |
+
case c10::ScalarType::Float: data_size = numel * 4; break;
|
| 555 |
+
default: data_type_fallback = true;
|
| 556 |
+
}
|
| 557 |
+
|
| 558 |
+
if (data_type_fallback || (data_size % VECTOR_LENGTH_IN_BYTES) != 0 || !all_ranks_local_p) {
|
| 559 |
+
// fallback to oneccl allreduce
|
| 560 |
+
CCLCHECK(ccl::allreduce(data.data_ptr(),
|
| 561 |
+
data.data_ptr(),
|
| 562 |
+
data.numel(),
|
| 563 |
+
get_ccl_datatype(data.scalar_type()),
|
| 564 |
+
get_ccl_reduce_op(op, data),
|
| 565 |
+
_get_comm_from_group())
|
| 566 |
+
.wait());
|
| 567 |
+
return;
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
for (int offset = 0; offset < data_size; offset += MAX_BUF_SIZE) {
|
| 571 |
+
auto data_ptr = ((char*)(data.data_ptr()) + offset);
|
| 572 |
+
size_t chunk_size = data_size - offset > MAX_BUF_SIZE ? MAX_BUF_SIZE : data_size - offset;
|
| 573 |
+
size_t chunk_el = chunk_size / (data_size / numel);
|
| 574 |
+
|
| 575 |
+
parallel_memcpy(workspace[world_rank].buffer, data_ptr, chunk_size);
|
| 576 |
+
std::atomic_thread_fence(std::memory_order_release);
|
| 577 |
+
workspace[world_rank].state = coll_allreduce_naive__copy_in_done;
|
| 578 |
+
|
| 579 |
+
if (world_rank == 0) {
|
| 580 |
+
// compute allreduce result on rank 0
|
| 581 |
+
for (int i = 1; i < world_size; i++) {
|
| 582 |
+
// wait until the other rank copy the buffer
|
| 583 |
+
wait_buffer_state_until(i, coll_allreduce_naive__copy_in_done);
|
| 584 |
+
}
|
| 585 |
+
reduce_all_buffers(workspace, chunk_el, data.scalar_type(), world_size);
|
| 586 |
+
std::atomic_thread_fence(std::memory_order_release);
|
| 587 |
+
workspace[world_rank].state = coll_allreduce_naive__reduce_done;
|
| 588 |
+
parallel_memcpy(data_ptr, workspace[0].buffer, chunk_size);
|
| 589 |
+
}
|
| 590 |
+
if (world_rank != 0) {
|
| 591 |
+
wait_buffer_state_until(0, coll_allreduce_naive__reduce_done);
|
| 592 |
+
parallel_memcpy(data_ptr, workspace[0].buffer, chunk_size);
|
| 593 |
+
std::atomic_thread_fence(std::memory_order_release);
|
| 594 |
+
workspace[world_rank].state = coll_allreduce_naive__copy_out_done;
|
| 595 |
+
}
|
| 596 |
+
if (world_rank == 0) {
|
| 597 |
+
for (int i = 1; i < world_size; i++) {
|
| 598 |
+
wait_buffer_state_until(i, coll_allreduce_naive__copy_out_done);
|
| 599 |
+
}
|
| 600 |
+
std::atomic_thread_fence(std::memory_order_release);
|
| 601 |
+
workspace[world_rank].state = coll_begin;
|
| 602 |
+
}
|
| 603 |
+
if (world_rank != 0) {
|
| 604 |
+
// if rank 0 spin too fast it could be in state 1 of next allreduce
|
| 605 |
+
// in this case wait_buffer_state_until(0, 0) may cause deadlock
|
| 606 |
+
// what we are certain is when rank 0 finishes the state won't be 2
|
| 607 |
+
wait_buffer_state_until_not(0, coll_allreduce_naive__reduce_done);
|
| 608 |
+
workspace[world_rank].state = coll_begin;
|
| 609 |
+
}
|
| 610 |
+
}
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
void barrier(std::vector<int> group, bool async_op)
|
| 614 |
+
{
|
| 615 |
+
CCLCHECK(ccl::barrier(_get_comm_from_group(group)).wait());
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
std::vector<std::string> get_available_coll()
|
| 619 |
+
{
|
| 620 |
+
std::vector<std::string> colls{
|
| 621 |
+
"broadcast", "all_reduce", "inference_all_reduce", "all_reduce_caching", "barrier"};
|
| 622 |
+
return colls;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 626 |
+
{
|
| 627 |
+
m.def("get_kvs_addr", &get_kvs_addr, "create and get main kvs addr");
|
| 628 |
+
m.def("initialize", &initialize, "ccl initialize");
|
| 629 |
+
m.def("get_rank", &get_rank, "get rank");
|
| 630 |
+
m.def("get_world_size", &get_world_size, "get world size");
|
| 631 |
+
m.def("broadcast", &broadcast, "ccl broadcast");
|
| 632 |
+
m.def("all_reduce", &all_reduce, "ccl all_reduce");
|
| 633 |
+
m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation");
|
| 634 |
+
m.def("all_reduce_caching", &all_reduce_caching, "ccl all_reduce with caching");
|
| 635 |
+
m.def("barrier", &barrier, "barrier");
|
| 636 |
+
m.def("initialize_sub_comm", &initialize_sub_comm, "initialize_sub_comm");
|
| 637 |
+
m.def("get_sub_kvs_addr", &get_sub_kvs_addr, "get_sub_kvs_addr");
|
| 638 |
+
m.def("get_available_coll", &get_available_coll, "get_available_coll");
|
| 639 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "cpu_lion.h"
|
| 7 |
+
|
| 8 |
+
// C++ interface
|
| 9 |
+
|
| 10 |
+
void multi_tensor_lion(int chunk_size,
|
| 11 |
+
at::Tensor noop_flag,
|
| 12 |
+
std::vector<std::vector<at::Tensor>> tensor_lists, /*gpmv*/
|
| 13 |
+
const float lr,
|
| 14 |
+
const float beta1,
|
| 15 |
+
const float beta2,
|
| 16 |
+
const int step,
|
| 17 |
+
const int mode,
|
| 18 |
+
const float weight_decay)
|
| 19 |
+
{
|
| 20 |
+
static bool initialized = false;
|
| 21 |
+
if (!initialized) {
|
| 22 |
+
create_lion_optimizer(0);
|
| 23 |
+
initialized = true;
|
| 24 |
+
}
|
| 25 |
+
for (int i = 0; i < tensor_lists[0].size(); i++) {
|
| 26 |
+
ds_lion_step(0,
|
| 27 |
+
step,
|
| 28 |
+
lr,
|
| 29 |
+
beta1,
|
| 30 |
+
beta2,
|
| 31 |
+
weight_decay,
|
| 32 |
+
tensor_lists[1][i],
|
| 33 |
+
tensor_lists[0][i],
|
| 34 |
+
tensor_lists[2][i]);
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 39 |
+
{
|
| 40 |
+
m.def("multi_tensor_lion",
|
| 41 |
+
&multi_tensor_lion,
|
| 42 |
+
"Compute and apply gradient update to parameters for Lion optimizer");
|
| 43 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "cpu_lion.h"
|
| 7 |
+
|
| 8 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 9 |
+
{
|
| 10 |
+
m.def("lion_update", &ds_lion_step, "DeepSpeed CPU Lion update (C++)");
|
| 11 |
+
m.def("lion_update_copy",
|
| 12 |
+
&ds_lion_step_plus_copy,
|
| 13 |
+
"DeepSpeed CPU Lion update and param copy (C++)");
|
| 14 |
+
m.def("create_lion", &create_lion_optimizer, "DeepSpeed CPU Lion (C++)");
|
| 15 |
+
m.def("destroy_lion", &destroy_lion_optimizer, "DeepSpeed CPU Lion destroy (C++)");
|
| 16 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
#include <cassert>
|
| 8 |
+
#include <cmath>
|
| 9 |
+
#include <iostream>
|
| 10 |
+
#include <memory>
|
| 11 |
+
#include <type_traits>
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include "cpu_lion.h"
|
| 14 |
+
|
| 15 |
+
#if defined(__ENABLE_CUDA__)
|
| 16 |
+
#include <cuda_runtime_api.h>
|
| 17 |
+
#include "cublas_v2.h"
|
| 18 |
+
#include "cuda.h"
|
| 19 |
+
#include "curand.h"
|
| 20 |
+
#include "custom_cuda_layers.h"
|
| 21 |
+
#endif
|
| 22 |
+
|
| 23 |
+
static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
|
| 24 |
+
|
| 25 |
+
// C++ interface
|
| 26 |
+
|
| 27 |
+
void Lion_Optimizer::Step_1(float* _params,
|
| 28 |
+
float* grads,
|
| 29 |
+
float* _exp_avg,
|
| 30 |
+
size_t _param_size,
|
| 31 |
+
ds_half_precision_t* dev_params,
|
| 32 |
+
bool half_precision)
|
| 33 |
+
{
|
| 34 |
+
size_t rounded_size = 0;
|
| 35 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 36 |
+
Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision);
|
| 37 |
+
#endif
|
| 38 |
+
if (_param_size > rounded_size) {
|
| 39 |
+
float betta1_minus1 = 1 - _betta1;
|
| 40 |
+
float betta2_minus1 = 1 - _betta2;
|
| 41 |
+
|
| 42 |
+
float alpha = _alpha;
|
| 43 |
+
float after_decay = 1 - alpha * _weight_decay;
|
| 44 |
+
ds_half_precision_t* grads_cast_h;
|
| 45 |
+
ds_half_precision_t* params_cast_h;
|
| 46 |
+
if (half_precision) {
|
| 47 |
+
grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
|
| 48 |
+
params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
for (size_t t = rounded_size; t < _param_size; t += TILE) {
|
| 52 |
+
size_t copy_size = TILE;
|
| 53 |
+
if ((t + TILE) > _param_size) copy_size = _param_size - t;
|
| 54 |
+
size_t offset = copy_size + t;
|
| 55 |
+
#if defined(__ENABLE_CUDA__)
|
| 56 |
+
if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
|
| 57 |
+
#elif defined(__ENABLE_CANN__)
|
| 58 |
+
if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
|
| 59 |
+
#endif
|
| 60 |
+
#pragma omp parallel for
|
| 61 |
+
for (size_t k = t; k < offset; k++) {
|
| 62 |
+
float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
|
| 63 |
+
float param = half_precision ? (float)params_cast_h[k] : _params[k];
|
| 64 |
+
float momentum = _exp_avg[k];
|
| 65 |
+
float tmp = momentum * _betta1;
|
| 66 |
+
tmp = grad * betta1_minus1 + tmp;
|
| 67 |
+
// Rely on portable C++ methods to manipulate the sign bit of a floating-point
|
| 68 |
+
// number.
|
| 69 |
+
tmp = -std::copysignf(alpha, tmp);
|
| 70 |
+
if (_weight_decay > 0) {
|
| 71 |
+
param = param * after_decay + tmp;
|
| 72 |
+
} else {
|
| 73 |
+
param = param + tmp;
|
| 74 |
+
}
|
| 75 |
+
momentum = momentum * _betta2;
|
| 76 |
+
momentum = grad * betta2_minus1 + momentum;
|
| 77 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 78 |
+
if (dev_params) _doubled_buffer[_buf_index][k - t] = param;
|
| 79 |
+
#endif
|
| 80 |
+
if (half_precision)
|
| 81 |
+
params_cast_h[k] = (ds_half_precision_t)param;
|
| 82 |
+
else
|
| 83 |
+
_params[k] = param;
|
| 84 |
+
_exp_avg[k] = momentum;
|
| 85 |
+
}
|
| 86 |
+
#if defined(__ENABLE_CUDA__)
|
| 87 |
+
if (dev_params) {
|
| 88 |
+
launch_param_update(
|
| 89 |
+
_doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]);
|
| 90 |
+
|
| 91 |
+
_buf_index = !_buf_index;
|
| 92 |
+
}
|
| 93 |
+
#elif defined(__ENABLE_CANN__)
|
| 94 |
+
if (dev_params) {
|
| 95 |
+
size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
|
| 96 |
+
aclrtMemcpy(dev_params + t,
|
| 97 |
+
memcpy_size,
|
| 98 |
+
_doubled_buffer[_buf_index],
|
| 99 |
+
memcpy_size,
|
| 100 |
+
aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
|
| 101 |
+
|
| 102 |
+
_buf_index = !_buf_index;
|
| 103 |
+
}
|
| 104 |
+
#endif
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
void Lion_Optimizer::Step_4(float* _params,
|
| 110 |
+
float* grads,
|
| 111 |
+
float* _exp_avg,
|
| 112 |
+
size_t _param_size,
|
| 113 |
+
ds_half_precision_t* dev_params,
|
| 114 |
+
bool half_precision)
|
| 115 |
+
{
|
| 116 |
+
size_t rounded_size = 0;
|
| 117 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 118 |
+
Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision);
|
| 119 |
+
#endif
|
| 120 |
+
if (_param_size > rounded_size)
|
| 121 |
+
Step_1((_params + rounded_size),
|
| 122 |
+
(grads + rounded_size),
|
| 123 |
+
(_exp_avg + rounded_size),
|
| 124 |
+
(_param_size - rounded_size),
|
| 125 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 126 |
+
half_precision);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
int create_lion_optimizer(int optimizer_id,
|
| 130 |
+
float alpha,
|
| 131 |
+
float betta1,
|
| 132 |
+
float betta2,
|
| 133 |
+
float weight_decay,
|
| 134 |
+
bool should_log)
|
| 135 |
+
{
|
| 136 |
+
auto opt = std::make_shared<Lion_Optimizer>(alpha, betta1, betta2, weight_decay);
|
| 137 |
+
|
| 138 |
+
s_optimizers[optimizer_id] = opt;
|
| 139 |
+
|
| 140 |
+
if (should_log) {
|
| 141 |
+
std::string avx_type = "";
|
| 142 |
+
#if defined(__AVX512__)
|
| 143 |
+
avx_type = "AVX512";
|
| 144 |
+
#else
|
| 145 |
+
#if defined(__AVX256__)
|
| 146 |
+
avx_type = "AVX2";
|
| 147 |
+
#else
|
| 148 |
+
avx_type = "scalar";
|
| 149 |
+
#endif
|
| 150 |
+
#endif
|
| 151 |
+
|
| 152 |
+
printf("Lion Optimizer #%d is created with %s arithmetic capability.\n",
|
| 153 |
+
optimizer_id,
|
| 154 |
+
avx_type.c_str());
|
| 155 |
+
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f\n",
|
| 156 |
+
alpha,
|
| 157 |
+
betta1,
|
| 158 |
+
betta2,
|
| 159 |
+
weight_decay);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
return 0;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
void Lion_Optimizer::Step_8(float* _params,
|
| 166 |
+
float* grads,
|
| 167 |
+
float* _exp_avg,
|
| 168 |
+
size_t _param_size,
|
| 169 |
+
ds_half_precision_t* dev_params,
|
| 170 |
+
bool half_precision)
|
| 171 |
+
{
|
| 172 |
+
size_t rounded_size = 0;
|
| 173 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 174 |
+
Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision);
|
| 175 |
+
#endif
|
| 176 |
+
if (_param_size > rounded_size)
|
| 177 |
+
Step_4((_params + rounded_size),
|
| 178 |
+
(grads + rounded_size),
|
| 179 |
+
(_exp_avg + rounded_size),
|
| 180 |
+
(_param_size - rounded_size),
|
| 181 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 182 |
+
half_precision);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
int ds_lion_step(int optimizer_id,
|
| 186 |
+
size_t step,
|
| 187 |
+
float lr,
|
| 188 |
+
float beta1,
|
| 189 |
+
float beta2,
|
| 190 |
+
float weight_decay,
|
| 191 |
+
torch::Tensor& params,
|
| 192 |
+
torch::Tensor& grads,
|
| 193 |
+
torch::Tensor& exp_avg)
|
| 194 |
+
{
|
| 195 |
+
auto params_c = params.contiguous();
|
| 196 |
+
auto grads_c = grads.contiguous();
|
| 197 |
+
auto exp_avg_c = exp_avg.contiguous();
|
| 198 |
+
|
| 199 |
+
// assert(params.options().dtype() == grads.options().dtype());
|
| 200 |
+
|
| 201 |
+
float* params_ptr = (float*)params_c.data_ptr();
|
| 202 |
+
float* grads_ptr = (float*)grads_c.data_ptr();
|
| 203 |
+
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
|
| 204 |
+
|
| 205 |
+
std::shared_ptr<Lion_Optimizer> opt =
|
| 206 |
+
std::static_pointer_cast<Lion_Optimizer>(s_optimizers[optimizer_id]);
|
| 207 |
+
opt->IncrementStep(step, beta1, beta2);
|
| 208 |
+
opt->update_state(lr, weight_decay);
|
| 209 |
+
|
| 210 |
+
opt->Step_8(params_ptr,
|
| 211 |
+
grads_ptr,
|
| 212 |
+
exp_avg_ptr,
|
| 213 |
+
params_c.numel(),
|
| 214 |
+
nullptr,
|
| 215 |
+
(params.options().dtype() == at::kHalf));
|
| 216 |
+
|
| 217 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 218 |
+
opt->SynchronizeStreams();
|
| 219 |
+
#endif
|
| 220 |
+
return 0;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
int ds_lion_step_plus_copy(int optimizer_id,
|
| 224 |
+
size_t step,
|
| 225 |
+
float lr,
|
| 226 |
+
float beta1,
|
| 227 |
+
float beta2,
|
| 228 |
+
float weight_decay,
|
| 229 |
+
torch::Tensor& params,
|
| 230 |
+
torch::Tensor& grads,
|
| 231 |
+
torch::Tensor& exp_avg,
|
| 232 |
+
torch::Tensor& gpu_params)
|
| 233 |
+
{
|
| 234 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 235 |
+
auto params_c = params.contiguous();
|
| 236 |
+
auto gpu_params_c = gpu_params.contiguous();
|
| 237 |
+
auto exp_avg_c = exp_avg.contiguous();
|
| 238 |
+
auto grads_c = grads.contiguous();
|
| 239 |
+
|
| 240 |
+
float* params_ptr = (float*)params_c.data_ptr();
|
| 241 |
+
float* grads_ptr = (float*)grads_c.data_ptr();
|
| 242 |
+
ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr();
|
| 243 |
+
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
|
| 244 |
+
|
| 245 |
+
std::shared_ptr<Lion_Optimizer> opt =
|
| 246 |
+
std::static_pointer_cast<Lion_Optimizer>(s_optimizers[optimizer_id]);
|
| 247 |
+
opt->IncrementStep(step, beta1, beta2);
|
| 248 |
+
opt->update_state(lr, weight_decay);
|
| 249 |
+
opt->Step_8(params_ptr,
|
| 250 |
+
grads_ptr,
|
| 251 |
+
exp_avg_ptr,
|
| 252 |
+
params_c.numel(),
|
| 253 |
+
gpu_params_ptr,
|
| 254 |
+
(params.options().dtype() == at::kHalf));
|
| 255 |
+
|
| 256 |
+
opt->SynchronizeStreams();
|
| 257 |
+
#else
|
| 258 |
+
assert(false);
|
| 259 |
+
#endif
|
| 260 |
+
return 0;
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
int destroy_lion_optimizer(int optimizer_id)
|
| 264 |
+
{
|
| 265 |
+
s_optimizers.erase(optimizer_id);
|
| 266 |
+
|
| 267 |
+
return 0;
|
| 268 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
void multi_tensor_lion_cuda(int chunk_size,
|
| 9 |
+
at::Tensor noop_flag,
|
| 10 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 11 |
+
const float lr,
|
| 12 |
+
const float beta1,
|
| 13 |
+
const float beta2,
|
| 14 |
+
const int step,
|
| 15 |
+
const float weight_decay);
|
| 16 |
+
|
| 17 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 18 |
+
{
|
| 19 |
+
m.def("multi_tensor_lion",
|
| 20 |
+
&multi_tensor_lion_cuda,
|
| 21 |
+
"Compute and apply gradient update to parameters for Lion optimizer");
|
| 22 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <ATen/AccumulateType.h>
|
| 13 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 14 |
+
#include <ATen/cuda/Exceptions.h>
|
| 15 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 16 |
+
#include "compat.h"
|
| 17 |
+
|
| 18 |
+
#include <assert.h>
|
| 19 |
+
|
| 20 |
+
// #include <iostream>
|
| 21 |
+
|
| 22 |
+
// This header is the one-stop shop for all your multi-tensor apply needs.
|
| 23 |
+
|
| 24 |
+
// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson)
|
| 25 |
+
constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
|
| 26 |
+
constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
|
| 27 |
+
|
| 28 |
+
template <int n>
|
| 29 |
+
struct TensorListMetadata {
|
| 30 |
+
void* addresses[n][depth_to_max_tensors[n - 1]];
|
| 31 |
+
int sizes[depth_to_max_tensors[n - 1]];
|
| 32 |
+
unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
|
| 33 |
+
int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int.
|
| 34 |
+
int start_tensor_this_launch;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
template <typename T, typename U, typename... ArgTypes>
|
| 38 |
+
__global__ void multi_tensor_apply_kernel(int chunk_size,
|
| 39 |
+
volatile int* noop_flag,
|
| 40 |
+
T tl,
|
| 41 |
+
U callable,
|
| 42 |
+
ArgTypes... args)
|
| 43 |
+
{
|
| 44 |
+
// Hand the chunk information to the user-supplied functor to process however it likes.
|
| 45 |
+
callable(chunk_size, noop_flag, tl, args...);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <int depth, typename T, typename... ArgTypes>
|
| 49 |
+
void multi_tensor_apply(int block_size,
|
| 50 |
+
int chunk_size,
|
| 51 |
+
const at::Tensor& noop_flag,
|
| 52 |
+
const std::vector<std::vector<at::Tensor>>& tensor_lists,
|
| 53 |
+
T callable,
|
| 54 |
+
ArgTypes... args)
|
| 55 |
+
{
|
| 56 |
+
TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth");
|
| 57 |
+
int len0 = tensor_lists[0].size();
|
| 58 |
+
TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0");
|
| 59 |
+
auto ref_device = tensor_lists[0][0].device();
|
| 60 |
+
TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda");
|
| 61 |
+
for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices
|
| 62 |
+
{
|
| 63 |
+
TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists");
|
| 64 |
+
for (int t = 0; t < tensor_lists[l].size(); t++) {
|
| 65 |
+
// TODO: Print which tensor fails.
|
| 66 |
+
bool contiguous_memory = tensor_lists[l][t].is_contiguous();
|
| 67 |
+
#ifdef VERSION_GE_1_5
|
| 68 |
+
contiguous_memory = (contiguous_memory ||
|
| 69 |
+
tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast));
|
| 70 |
+
#endif
|
| 71 |
+
TORCH_CHECK(contiguous_memory, "A tensor was not contiguous.");
|
| 72 |
+
TORCH_CHECK(tensor_lists[l][t].device() == ref_device,
|
| 73 |
+
"A tensor was not on the same device as the first tensor");
|
| 74 |
+
TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch");
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
int ntensors = tensor_lists[0].size();
|
| 79 |
+
|
| 80 |
+
TensorListMetadata<depth> tl;
|
| 81 |
+
|
| 82 |
+
const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0]));
|
| 83 |
+
auto stream = at::cuda::getCurrentCUDAStream();
|
| 84 |
+
|
| 85 |
+
tl.start_tensor_this_launch = 0;
|
| 86 |
+
int loc_block_info = 0;
|
| 87 |
+
int loc_tensor_info = 0;
|
| 88 |
+
for (int t = 0; t < ntensors; t++) {
|
| 89 |
+
tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel();
|
| 90 |
+
for (int d = 0; d < depth; d++)
|
| 91 |
+
tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr();
|
| 92 |
+
loc_tensor_info++;
|
| 93 |
+
|
| 94 |
+
int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
|
| 95 |
+
|
| 96 |
+
for (int chunk = 0; chunk < chunks_this_tensor; chunk++) {
|
| 97 |
+
// std::cout << chunks_this_tensor << std::endl;
|
| 98 |
+
tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
|
| 99 |
+
tl.block_to_chunk[loc_block_info] = chunk;
|
| 100 |
+
loc_block_info++;
|
| 101 |
+
|
| 102 |
+
bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
|
| 103 |
+
chunk == chunks_this_tensor - 1);
|
| 104 |
+
bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]);
|
| 105 |
+
bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1);
|
| 106 |
+
if (tensors_full || blocks_full || last_chunk) {
|
| 107 |
+
// using accscalar_t = acc_type<scalar_t, true>;
|
| 108 |
+
multi_tensor_apply_kernel<<<loc_block_info, block_size, 0, stream>>>(
|
| 109 |
+
chunk_size, noop_flag.DATA_PTR<int>(), tl, callable, args...);
|
| 110 |
+
|
| 111 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 112 |
+
|
| 113 |
+
// Reset. The control flow possibilities here make my brain hurt.
|
| 114 |
+
loc_block_info = 0;
|
| 115 |
+
if (chunk == chunks_this_tensor - 1) {
|
| 116 |
+
// std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 <<
|
| 117 |
+
// std::endl;
|
| 118 |
+
loc_tensor_info = 0;
|
| 119 |
+
tl.start_tensor_this_launch = t + 1;
|
| 120 |
+
} else {
|
| 121 |
+
// std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 <<
|
| 122 |
+
// std::endl;
|
| 123 |
+
tl.sizes[0] = tl.sizes[loc_tensor_info - 1];
|
| 124 |
+
for (int d = 0; d < depth; d++)
|
| 125 |
+
tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1];
|
| 126 |
+
loc_tensor_info = 1;
|
| 127 |
+
tl.start_tensor_this_launch = t;
|
| 128 |
+
}
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <ATen/AccumulateType.h>
|
| 13 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 14 |
+
#include <ATen/cuda/Exceptions.h>
|
| 15 |
+
// Another possibility:
|
| 16 |
+
// #include <torch/all.h>
|
| 17 |
+
|
| 18 |
+
#include <assert.h>
|
| 19 |
+
|
| 20 |
+
#include "multi_tensor_apply.cuh"
|
| 21 |
+
#include "type_shim.h"
|
| 22 |
+
|
| 23 |
+
#define BLOCK_SIZE 512
|
| 24 |
+
#define ILP 4
|
| 25 |
+
|
| 26 |
+
using MATH_T = float;
|
| 27 |
+
|
| 28 |
+
template <typename T>
|
| 29 |
+
struct LionFunctor {
|
| 30 |
+
__device__ __forceinline__ void operator()(int chunk_size,
|
| 31 |
+
volatile int* noop_gmem,
|
| 32 |
+
TensorListMetadata<3>& tl,
|
| 33 |
+
const float beta1,
|
| 34 |
+
const float beta2,
|
| 35 |
+
const float lr,
|
| 36 |
+
const float decay)
|
| 37 |
+
{
|
| 38 |
+
// I'd like this kernel to propagate infs/nans.
|
| 39 |
+
// if(*noop_gmem == 1)
|
| 40 |
+
// return;
|
| 41 |
+
|
| 42 |
+
int tensor_loc = tl.block_to_tensor[blockIdx.x];
|
| 43 |
+
|
| 44 |
+
// potentially use to pass in list of scalar
|
| 45 |
+
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
|
| 46 |
+
|
| 47 |
+
int chunk_idx = tl.block_to_chunk[blockIdx.x];
|
| 48 |
+
int n = tl.sizes[tensor_loc];
|
| 49 |
+
|
| 50 |
+
T* g = (T*)tl.addresses[0][tensor_loc];
|
| 51 |
+
g += chunk_idx * chunk_size;
|
| 52 |
+
|
| 53 |
+
T* p = (T*)tl.addresses[1][tensor_loc];
|
| 54 |
+
p += chunk_idx * chunk_size;
|
| 55 |
+
|
| 56 |
+
T* m = (T*)tl.addresses[2][tensor_loc];
|
| 57 |
+
m += chunk_idx * chunk_size;
|
| 58 |
+
|
| 59 |
+
n -= chunk_idx * chunk_size;
|
| 60 |
+
|
| 61 |
+
MATH_T after_decay = 1.0f - lr * decay;
|
| 62 |
+
|
| 63 |
+
// see note in multi_tensor_scale_kernel.cu
|
| 64 |
+
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) {
|
| 65 |
+
MATH_T r_g[ILP];
|
| 66 |
+
MATH_T r_p[ILP];
|
| 67 |
+
MATH_T r_m[ILP];
|
| 68 |
+
#pragma unroll
|
| 69 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 70 |
+
int i = i_start + threadIdx.x + ii * blockDim.x;
|
| 71 |
+
if (i < n && i < chunk_size) {
|
| 72 |
+
r_g[ii] = g[i];
|
| 73 |
+
r_p[ii] = p[i];
|
| 74 |
+
r_m[ii] = m[i];
|
| 75 |
+
} else {
|
| 76 |
+
r_g[ii] = MATH_T(0);
|
| 77 |
+
r_p[ii] = MATH_T(0);
|
| 78 |
+
r_m[ii] = MATH_T(0);
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
#pragma unroll
|
| 82 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 83 |
+
MATH_T c = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
|
| 84 |
+
MATH_T update = c > 0 ? (-lr) : lr;
|
| 85 |
+
r_p[ii] = r_p[ii] * after_decay + update;
|
| 86 |
+
r_m[ii] = beta2 * r_m[ii] + (1 - beta2) * r_g[ii];
|
| 87 |
+
}
|
| 88 |
+
#pragma unroll
|
| 89 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 90 |
+
int i = i_start + threadIdx.x + ii * blockDim.x;
|
| 91 |
+
if (i < n && i < chunk_size) {
|
| 92 |
+
p[i] = r_p[ii];
|
| 93 |
+
m[i] = r_m[ii];
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
};
|
| 99 |
+
|
| 100 |
+
void multi_tensor_lion_cuda(int chunk_size,
|
| 101 |
+
at::Tensor noop_flag,
|
| 102 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 103 |
+
const float lr,
|
| 104 |
+
const float beta1,
|
| 105 |
+
const float beta2,
|
| 106 |
+
const int step,
|
| 107 |
+
const float weight_decay)
|
| 108 |
+
{
|
| 109 |
+
using namespace at;
|
| 110 |
+
|
| 111 |
+
// Assume single type across p,g,m1,m2 now
|
| 112 |
+
DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
|
| 113 |
+
0,
|
| 114 |
+
"lion",
|
| 115 |
+
multi_tensor_apply<3>(BLOCK_SIZE,
|
| 116 |
+
chunk_size,
|
| 117 |
+
noop_flag,
|
| 118 |
+
tensor_lists,
|
| 119 |
+
LionFunctor<scalar_t_0>(),
|
| 120 |
+
beta1,
|
| 121 |
+
beta2,
|
| 122 |
+
lr,
|
| 123 |
+
weight_decay);)
|
| 124 |
+
|
| 125 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 126 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
#include "memory_access_utils.h"
|
| 8 |
+
|
| 9 |
+
namespace cg = cooperative_groups;
|
| 10 |
+
|
| 11 |
+
namespace td_data {
|
| 12 |
+
constexpr int granularity = 16;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
template <typename T>
|
| 16 |
+
__global__ void gather_tokens_impl(T* retained_tokens,
|
| 17 |
+
const T* activations,
|
| 18 |
+
int32_t* gather_indices,
|
| 19 |
+
int32_t sampled_tokens,
|
| 20 |
+
int32_t channels,
|
| 21 |
+
int32_t read_batch_stride,
|
| 22 |
+
int32_t read_seq_stride,
|
| 23 |
+
int32_t write_batch_stride,
|
| 24 |
+
int32_t write_seq_stride)
|
| 25 |
+
{
|
| 26 |
+
constexpr int mem_vals_t = td_data::granularity / sizeof(T);
|
| 27 |
+
|
| 28 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 29 |
+
|
| 30 |
+
const int gather_idx = gather_indices[tb.group_index().x * sampled_tokens + tb.group_index().y];
|
| 31 |
+
|
| 32 |
+
const int read_offset = read_batch_stride * tb.group_index().x + read_seq_stride * gather_idx;
|
| 33 |
+
const int write_offset =
|
| 34 |
+
write_batch_stride * tb.group_index().x + write_seq_stride * tb.group_index().y;
|
| 35 |
+
|
| 36 |
+
for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += blockDim.x * mem_vals_t) {
|
| 37 |
+
T local_data[mem_vals_t];
|
| 38 |
+
mem_access::load_global<td_data::granularity>(local_data, activations + read_offset + i);
|
| 39 |
+
mem_access::store_global<td_data::granularity>(retained_tokens + write_offset + i,
|
| 40 |
+
local_data);
|
| 41 |
+
}
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template <typename T>
|
| 45 |
+
void launch_gather_tokens(T* retained_tokens,
|
| 46 |
+
T* activations,
|
| 47 |
+
int32_t* gather_indices,
|
| 48 |
+
int32_t batch_size,
|
| 49 |
+
int32_t sampled_tokens,
|
| 50 |
+
int32_t channels,
|
| 51 |
+
int32_t read_batch_stride,
|
| 52 |
+
int32_t read_seq_stride,
|
| 53 |
+
int32_t write_batch_stride,
|
| 54 |
+
int32_t write_seq_stride,
|
| 55 |
+
cudaStream_t stream)
|
| 56 |
+
{
|
| 57 |
+
constexpr int mem_vals_t = td_data::granularity / sizeof(T);
|
| 58 |
+
|
| 59 |
+
const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t;
|
| 60 |
+
const int threads = (load_steps >= 1024) ? 1024 : load_steps;
|
| 61 |
+
|
| 62 |
+
dim3 block(threads);
|
| 63 |
+
dim3 grid(batch_size, sampled_tokens);
|
| 64 |
+
|
| 65 |
+
gather_tokens_impl<T><<<grid, block, 0, stream>>>(retained_tokens,
|
| 66 |
+
activations,
|
| 67 |
+
gather_indices,
|
| 68 |
+
sampled_tokens,
|
| 69 |
+
channels,
|
| 70 |
+
read_batch_stride,
|
| 71 |
+
read_seq_stride,
|
| 72 |
+
write_batch_stride,
|
| 73 |
+
write_seq_stride);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template void launch_gather_tokens<float>(float*,
|
| 77 |
+
float*,
|
| 78 |
+
int32_t*,
|
| 79 |
+
int32_t,
|
| 80 |
+
int32_t,
|
| 81 |
+
int32_t,
|
| 82 |
+
int32_t,
|
| 83 |
+
int32_t,
|
| 84 |
+
int32_t,
|
| 85 |
+
int32_t,
|
| 86 |
+
cudaStream_t);
|
| 87 |
+
|
| 88 |
+
template void launch_gather_tokens<__half>(__half*,
|
| 89 |
+
__half*,
|
| 90 |
+
int32_t*,
|
| 91 |
+
int32_t,
|
| 92 |
+
int32_t,
|
| 93 |
+
int32_t,
|
| 94 |
+
int32_t,
|
| 95 |
+
int32_t,
|
| 96 |
+
int32_t,
|
| 97 |
+
int32_t,
|
| 98 |
+
cudaStream_t);
|
| 99 |
+
|
| 100 |
+
template <typename T>
|
| 101 |
+
__global__ void scatter_tokens_impl(T* all_activations,
|
| 102 |
+
const T* layer_activations,
|
| 103 |
+
int32_t* gather_indices,
|
| 104 |
+
int32_t retained_tokens,
|
| 105 |
+
int32_t channels,
|
| 106 |
+
int32_t read_batch_stride,
|
| 107 |
+
int32_t read_seq_stride,
|
| 108 |
+
int32_t write_batch_stride,
|
| 109 |
+
int32_t write_seq_stride)
|
| 110 |
+
{
|
| 111 |
+
constexpr int mem_vals_t = td_data::granularity / sizeof(T);
|
| 112 |
+
|
| 113 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 114 |
+
|
| 115 |
+
const int gather_idx =
|
| 116 |
+
gather_indices[tb.group_index().x * retained_tokens + tb.group_index().y];
|
| 117 |
+
|
| 118 |
+
const int read_offset =
|
| 119 |
+
read_batch_stride * tb.group_index().x + read_seq_stride * tb.group_index().y;
|
| 120 |
+
const int write_offset =
|
| 121 |
+
write_batch_stride * tb.group_index().x + write_seq_stride * gather_idx;
|
| 122 |
+
|
| 123 |
+
for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += mem_vals_t * blockDim.x) {
|
| 124 |
+
T local_data[mem_vals_t];
|
| 125 |
+
mem_access::load_global<td_data::granularity>(local_data,
|
| 126 |
+
layer_activations + read_offset + i);
|
| 127 |
+
mem_access::store_global<td_data::granularity>(all_activations + write_offset + i,
|
| 128 |
+
local_data);
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
template <typename T>
|
| 133 |
+
void launch_scatter_tokens(T* all_activations,
|
| 134 |
+
T* layer_activations,
|
| 135 |
+
int32_t* gather_indices,
|
| 136 |
+
int32_t batch_size,
|
| 137 |
+
int32_t sampled_tokens,
|
| 138 |
+
int32_t channels,
|
| 139 |
+
int32_t read_batch_stride,
|
| 140 |
+
int32_t read_seq_stride,
|
| 141 |
+
int32_t write_batch_stride,
|
| 142 |
+
int32_t write_seq_stride,
|
| 143 |
+
cudaStream_t stream)
|
| 144 |
+
{
|
| 145 |
+
constexpr int mem_vals_t = td_data::granularity / sizeof(T);
|
| 146 |
+
|
| 147 |
+
const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t;
|
| 148 |
+
const int threads = (load_steps >= 1024) ? 1024 : load_steps;
|
| 149 |
+
|
| 150 |
+
dim3 block(threads);
|
| 151 |
+
dim3 grid(batch_size, sampled_tokens);
|
| 152 |
+
|
| 153 |
+
scatter_tokens_impl<T><<<grid, block, 0, stream>>>(all_activations,
|
| 154 |
+
layer_activations,
|
| 155 |
+
gather_indices,
|
| 156 |
+
sampled_tokens,
|
| 157 |
+
channels,
|
| 158 |
+
read_batch_stride,
|
| 159 |
+
read_seq_stride,
|
| 160 |
+
write_batch_stride,
|
| 161 |
+
write_seq_stride);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template void launch_scatter_tokens<float>(float*,
|
| 165 |
+
float*,
|
| 166 |
+
int32_t*,
|
| 167 |
+
int32_t,
|
| 168 |
+
int32_t,
|
| 169 |
+
int32_t,
|
| 170 |
+
int32_t,
|
| 171 |
+
int32_t,
|
| 172 |
+
int32_t,
|
| 173 |
+
int32_t,
|
| 174 |
+
cudaStream_t);
|
| 175 |
+
|
| 176 |
+
template void launch_scatter_tokens<__half>(__half*,
|
| 177 |
+
__half*,
|
| 178 |
+
int32_t*,
|
| 179 |
+
int32_t,
|
| 180 |
+
int32_t,
|
| 181 |
+
int32_t,
|
| 182 |
+
int32_t,
|
| 183 |
+
int32_t,
|
| 184 |
+
int32_t,
|
| 185 |
+
int32_t,
|
| 186 |
+
cudaStream_t);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
#include "custom_cuda_layers.h"
|
| 9 |
+
|
| 10 |
+
torch::Tensor token_sort_(torch::Tensor& unsorted_token_ids, int64_t original_tokens)
|
| 11 |
+
{
|
| 12 |
+
const int layers = unsorted_token_ids.size(0);
|
| 13 |
+
const int batch_size = unsorted_token_ids.size(1);
|
| 14 |
+
const int reserved_tokens = unsorted_token_ids.size(2);
|
| 15 |
+
|
| 16 |
+
launch_token_sort(unsorted_token_ids.data_ptr<int32_t>(),
|
| 17 |
+
layers,
|
| 18 |
+
batch_size,
|
| 19 |
+
reserved_tokens,
|
| 20 |
+
original_tokens,
|
| 21 |
+
c10::cuda::getCurrentCUDAStream());
|
| 22 |
+
|
| 23 |
+
return unsorted_token_ids;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
torch::Tensor token_gather(torch::Tensor& activations,
|
| 27 |
+
torch::Tensor& sorted_indices,
|
| 28 |
+
bool batch_first)
|
| 29 |
+
{
|
| 30 |
+
// Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is
|
| 31 |
+
// always in [N, retained]
|
| 32 |
+
/*
|
| 33 |
+
TORCH_CHECK(sorted_indices.size(0) == activations.size(0) ||
|
| 34 |
+
sorted_indices.size(0) == activations.size(1),
|
| 35 |
+
"Unable to match the batch size of the sorted indices to the activation
|
| 36 |
+
shape."); TORCH_CHECK(activations.size(2) % 8 == 0, "Channels must be divisible by 8 to align
|
| 37 |
+
with vectorized loads.");
|
| 38 |
+
*/
|
| 39 |
+
// bool batch_first = sorted_indices.size(0) == activations.size(0);
|
| 40 |
+
|
| 41 |
+
const int64_t dim_0 = (batch_first) ? sorted_indices.size(0) : sorted_indices.size(1);
|
| 42 |
+
const int64_t dim_1 = (batch_first) ? sorted_indices.size(1) : sorted_indices.size(0);
|
| 43 |
+
const int64_t dim_2 = activations.size(2);
|
| 44 |
+
|
| 45 |
+
auto output = torch::empty({dim_0, dim_1, dim_2}, activations.options());
|
| 46 |
+
|
| 47 |
+
const int batch_size = sorted_indices.size(0);
|
| 48 |
+
const int channels = dim_2;
|
| 49 |
+
const int retained_tokens = sorted_indices.size(1);
|
| 50 |
+
const int read_batch_stride = (batch_first) ? activations.stride(0) : activations.stride(1);
|
| 51 |
+
const int read_seq_stride = (batch_first) ? activations.stride(1) : activations.stride(0);
|
| 52 |
+
const int write_batch_stride = (batch_first) ? output.stride(0) : output.stride(1);
|
| 53 |
+
const int write_seq_stride = (batch_first) ? output.stride(1) : output.stride(0);
|
| 54 |
+
|
| 55 |
+
if (activations.options().dtype() == torch::kFloat) {
|
| 56 |
+
launch_gather_tokens((float*)output.data_ptr(),
|
| 57 |
+
(float*)activations.data_ptr(),
|
| 58 |
+
(int32_t*)sorted_indices.data_ptr(),
|
| 59 |
+
batch_size,
|
| 60 |
+
retained_tokens,
|
| 61 |
+
channels,
|
| 62 |
+
read_batch_stride,
|
| 63 |
+
read_seq_stride,
|
| 64 |
+
write_batch_stride,
|
| 65 |
+
write_seq_stride,
|
| 66 |
+
c10::cuda::getCurrentCUDAStream());
|
| 67 |
+
} else {
|
| 68 |
+
launch_gather_tokens((__half*)output.data_ptr(),
|
| 69 |
+
(__half*)activations.data_ptr(),
|
| 70 |
+
(int32_t*)sorted_indices.data_ptr(),
|
| 71 |
+
batch_size,
|
| 72 |
+
retained_tokens,
|
| 73 |
+
channels,
|
| 74 |
+
read_batch_stride,
|
| 75 |
+
read_seq_stride,
|
| 76 |
+
write_batch_stride,
|
| 77 |
+
write_seq_stride,
|
| 78 |
+
c10::cuda::getCurrentCUDAStream());
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
return output;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
torch::Tensor token_scatter_(torch::Tensor& all_activations,
|
| 85 |
+
torch::Tensor& layer_activations,
|
| 86 |
+
torch::Tensor& sorted_indices,
|
| 87 |
+
bool batch_first)
|
| 88 |
+
{
|
| 89 |
+
// Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is
|
| 90 |
+
// always in [N, retained]
|
| 91 |
+
/*
|
| 92 |
+
TORCH_CHECK(sorted_indices.size(0) == all_activations.size(0) ||
|
| 93 |
+
sorted_indices.size(0) == all_activations.size(1),
|
| 94 |
+
"Unable to match the batch size of the sorted indices to the activation
|
| 95 |
+
shape."); TORCH_CHECK(all_activations.size(2) % 8 != 0, "Channels must be divisible by 8 to
|
| 96 |
+
align with vectorized loads.");
|
| 97 |
+
*/
|
| 98 |
+
// bool batch_first = sorted_indices.size(0) == all_activations.size(0);
|
| 99 |
+
|
| 100 |
+
const int batch_size = sorted_indices.size(0);
|
| 101 |
+
const int channels = all_activations.size(2);
|
| 102 |
+
const int retained_tokens = sorted_indices.size(1);
|
| 103 |
+
const int read_batch_stride = (batch_first) ? layer_activations.stride(0)
|
| 104 |
+
: layer_activations.stride(1);
|
| 105 |
+
const int read_seq_stride = (batch_first) ? layer_activations.stride(1)
|
| 106 |
+
: layer_activations.stride(0);
|
| 107 |
+
const int write_batch_stride = (batch_first) ? all_activations.stride(0)
|
| 108 |
+
: all_activations.stride(1);
|
| 109 |
+
const int write_seq_stride = (batch_first) ? all_activations.stride(1)
|
| 110 |
+
: all_activations.stride(0);
|
| 111 |
+
|
| 112 |
+
if (all_activations.options().dtype() == torch::kFloat) {
|
| 113 |
+
launch_scatter_tokens((float*)all_activations.data_ptr(),
|
| 114 |
+
(float*)layer_activations.data_ptr(),
|
| 115 |
+
(int32_t*)sorted_indices.data_ptr(),
|
| 116 |
+
batch_size,
|
| 117 |
+
retained_tokens,
|
| 118 |
+
channels,
|
| 119 |
+
read_batch_stride,
|
| 120 |
+
read_seq_stride,
|
| 121 |
+
write_batch_stride,
|
| 122 |
+
write_seq_stride,
|
| 123 |
+
c10::cuda::getCurrentCUDAStream());
|
| 124 |
+
} else {
|
| 125 |
+
launch_scatter_tokens((__half*)all_activations.data_ptr(),
|
| 126 |
+
(__half*)layer_activations.data_ptr(),
|
| 127 |
+
(int32_t*)sorted_indices.data_ptr(),
|
| 128 |
+
batch_size,
|
| 129 |
+
retained_tokens,
|
| 130 |
+
channels,
|
| 131 |
+
read_batch_stride,
|
| 132 |
+
read_seq_stride,
|
| 133 |
+
write_batch_stride,
|
| 134 |
+
write_seq_stride,
|
| 135 |
+
c10::cuda::getCurrentCUDAStream());
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
return all_activations;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
torch::Tensor mask_gather_bert(torch::Tensor& dense_mask, torch::Tensor& sorted_indices)
|
| 142 |
+
{
|
| 143 |
+
// TORCH_CHECK(dense_mask.dim() == 4)
|
| 144 |
+
|
| 145 |
+
const int batch_size = dense_mask.size(0);
|
| 146 |
+
const int layers = sorted_indices.size(0);
|
| 147 |
+
/*
|
| 148 |
+
TORCH_CHECK(layers * batch_size == sorted_indices.size(0),
|
| 149 |
+
"Mismatch between the indices and the mask");
|
| 150 |
+
*/
|
| 151 |
+
const int orig_seq_len = dense_mask.size(3);
|
| 152 |
+
const int truncated_seq_len = sorted_indices.size(2);
|
| 153 |
+
|
| 154 |
+
auto output = torch::empty({layers, batch_size, 1, truncated_seq_len, truncated_seq_len},
|
| 155 |
+
dense_mask.options());
|
| 156 |
+
|
| 157 |
+
if (dense_mask.options().dtype() == torch::kFloat) {
|
| 158 |
+
launch_slice_bert_mask((float*)output.data_ptr(),
|
| 159 |
+
(const float*)dense_mask.data_ptr(),
|
| 160 |
+
(const int32_t*)sorted_indices.data_ptr(),
|
| 161 |
+
layers,
|
| 162 |
+
batch_size,
|
| 163 |
+
truncated_seq_len,
|
| 164 |
+
orig_seq_len,
|
| 165 |
+
c10::cuda::getCurrentCUDAStream());
|
| 166 |
+
} else {
|
| 167 |
+
launch_slice_bert_mask((__half*)output.data_ptr(),
|
| 168 |
+
(const __half*)dense_mask.data_ptr(),
|
| 169 |
+
(const int32_t*)sorted_indices.data_ptr(),
|
| 170 |
+
layers,
|
| 171 |
+
batch_size,
|
| 172 |
+
truncated_seq_len,
|
| 173 |
+
orig_seq_len,
|
| 174 |
+
c10::cuda::getCurrentCUDAStream());
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
return output;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
torch::Tensor mask_gather_gpt(torch::Tensor dense_mask, int truncated_seq_len)
|
| 181 |
+
{
|
| 182 |
+
// TORCH_CHECK(dense_mask.dim() == 4)
|
| 183 |
+
|
| 184 |
+
const int batch_size = dense_mask.size(0);
|
| 185 |
+
const int orig_seq_len = dense_mask.size(3);
|
| 186 |
+
|
| 187 |
+
auto output =
|
| 188 |
+
torch::empty({batch_size, 1, truncated_seq_len, truncated_seq_len}, dense_mask.options());
|
| 189 |
+
|
| 190 |
+
if (dense_mask.options().dtype() == torch::kFloat) {
|
| 191 |
+
launch_slice_gpt_mask((float*)output.data_ptr(),
|
| 192 |
+
(const float*)dense_mask.data_ptr(),
|
| 193 |
+
batch_size,
|
| 194 |
+
truncated_seq_len,
|
| 195 |
+
orig_seq_len,
|
| 196 |
+
c10::cuda::getCurrentCUDAStream());
|
| 197 |
+
} else {
|
| 198 |
+
launch_slice_gpt_mask((__half*)output.data_ptr(),
|
| 199 |
+
(const __half*)dense_mask.data_ptr(),
|
| 200 |
+
batch_size,
|
| 201 |
+
truncated_seq_len,
|
| 202 |
+
orig_seq_len,
|
| 203 |
+
c10::cuda::getCurrentCUDAStream());
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
return output;
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 210 |
+
{
|
| 211 |
+
m.def("token_sort_", &token_sort_, "Comparison free sorting algorithm (CUDA)");
|
| 212 |
+
m.def("token_gather", &token_gather, "Parallel gather of tokens (CUDA)");
|
| 213 |
+
m.def("token_scatter_", &token_scatter_, "Parallel scatter of tokens (CUDA)");
|
| 214 |
+
m.def("mask_gather_bert", &mask_gather_bert, "Token-based mask gather for BERT masking (CUDA)");
|
| 215 |
+
m.def("mask_gather_gpt", &mask_gather_gpt, "Token-based mask gather for GPT masking (CUDA)");
|
| 216 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
#include "memory_access_utils.h"
|
| 8 |
+
|
| 9 |
+
namespace cg = cooperative_groups;
|
| 10 |
+
|
| 11 |
+
template <typename T>
|
| 12 |
+
__global__ void slice_gpt_mask_impl(T* output_mask,
|
| 13 |
+
const T* input_mask,
|
| 14 |
+
int truncated_seq_len,
|
| 15 |
+
int orig_seq_len)
|
| 16 |
+
{
|
| 17 |
+
const int in_batch_stride = orig_seq_len * orig_seq_len;
|
| 18 |
+
const int out_batch_stride = truncated_seq_len * truncated_seq_len;
|
| 19 |
+
|
| 20 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 21 |
+
|
| 22 |
+
const T* input_mask_block =
|
| 23 |
+
input_mask + blockIdx.x * in_batch_stride + blockIdx.y * orig_seq_len;
|
| 24 |
+
T* output_mask_block =
|
| 25 |
+
output_mask + blockIdx.x * out_batch_stride + blockIdx.y * truncated_seq_len;
|
| 26 |
+
|
| 27 |
+
for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) {
|
| 28 |
+
output_mask_block[i] = input_mask_block[i];
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
template <typename T>
|
| 33 |
+
void launch_slice_gpt_mask(T* output_mask,
|
| 34 |
+
const T* input_mask,
|
| 35 |
+
int batch_size,
|
| 36 |
+
int truncated_seq_len,
|
| 37 |
+
int orig_seq_len,
|
| 38 |
+
cudaStream_t stream)
|
| 39 |
+
{
|
| 40 |
+
const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len;
|
| 41 |
+
|
| 42 |
+
dim3 block(threads);
|
| 43 |
+
dim3 grid(batch_size, truncated_seq_len);
|
| 44 |
+
|
| 45 |
+
slice_gpt_mask_impl<T>
|
| 46 |
+
<<<grid, block, 0, stream>>>(output_mask, input_mask, truncated_seq_len, orig_seq_len);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template void launch_slice_gpt_mask<float>(float*, const float*, int, int, int, cudaStream_t);
|
| 50 |
+
|
| 51 |
+
template void launch_slice_gpt_mask<__half>(__half*, const __half*, int, int, int, cudaStream_t);
|
| 52 |
+
|
| 53 |
+
template <typename T>
|
| 54 |
+
__global__ void slice_bert_mask_impl(T* output_mask,
|
| 55 |
+
const T* input_mask,
|
| 56 |
+
const int32_t* retained_indices,
|
| 57 |
+
int32_t truncated_seq_len,
|
| 58 |
+
int32_t orig_seq_len)
|
| 59 |
+
{
|
| 60 |
+
const int in_batch_stride = orig_seq_len * orig_seq_len;
|
| 61 |
+
const int out_batch_stride = truncated_seq_len * truncated_seq_len;
|
| 62 |
+
const int out_layer_stride = out_batch_stride * gridDim.y;
|
| 63 |
+
|
| 64 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 65 |
+
|
| 66 |
+
const int out_layer_offset = tb.group_index().x * out_layer_stride;
|
| 67 |
+
|
| 68 |
+
const int in_batch_offset = tb.group_index().y * in_batch_stride;
|
| 69 |
+
const int out_batch_offset = tb.group_index().y * out_batch_stride;
|
| 70 |
+
|
| 71 |
+
const int32_t gather_row =
|
| 72 |
+
retained_indices[tb.group_index().y * truncated_seq_len + tb.group_index().z];
|
| 73 |
+
const int in_seq_offset = gather_row * orig_seq_len;
|
| 74 |
+
const int out_seq_offset = tb.group_index().z * truncated_seq_len;
|
| 75 |
+
|
| 76 |
+
const T* in_sequence = input_mask + in_batch_offset + in_seq_offset;
|
| 77 |
+
T* out_sequence = output_mask + out_layer_offset + out_batch_offset + out_seq_offset;
|
| 78 |
+
const int32_t* gather_data = retained_indices + tb.group_index().y * truncated_seq_len;
|
| 79 |
+
|
| 80 |
+
for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) {
|
| 81 |
+
out_sequence[i] = in_sequence[gather_data[i]];
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
/*
|
| 86 |
+
Since the Bert mask is not causal like GPT, we can't just generate a set of
|
| 87 |
+
masks for the entire model based off a single layer sample.
|
| 88 |
+
|
| 89 |
+
We map the kernel as follows:
|
| 90 |
+
z-dimension: layer
|
| 91 |
+
y-dimension: batch
|
| 92 |
+
x-dimension: sequence_offset
|
| 93 |
+
*/
|
| 94 |
+
template <typename T>
|
| 95 |
+
void launch_slice_bert_mask(T* output_mask,
|
| 96 |
+
const T* input_mask,
|
| 97 |
+
const int32_t* retained_indices,
|
| 98 |
+
int32_t layers,
|
| 99 |
+
int32_t batch_size,
|
| 100 |
+
int32_t truncated_seq_len,
|
| 101 |
+
int32_t orig_seq_len,
|
| 102 |
+
cudaStream_t stream)
|
| 103 |
+
{
|
| 104 |
+
const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len;
|
| 105 |
+
dim3 block(threads);
|
| 106 |
+
dim3 grid(layers, batch_size, truncated_seq_len);
|
| 107 |
+
|
| 108 |
+
slice_bert_mask_impl<T><<<grid, block, 0, stream>>>(
|
| 109 |
+
output_mask, input_mask, retained_indices, truncated_seq_len, orig_seq_len);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template void launch_slice_bert_mask<float>(float*,
|
| 113 |
+
const float*,
|
| 114 |
+
const int32_t*,
|
| 115 |
+
int32_t,
|
| 116 |
+
int32_t,
|
| 117 |
+
int32_t,
|
| 118 |
+
int32_t,
|
| 119 |
+
cudaStream_t);
|
| 120 |
+
|
| 121 |
+
template void launch_slice_bert_mask<__half>(__half*,
|
| 122 |
+
const __half*,
|
| 123 |
+
const int32_t*,
|
| 124 |
+
int32_t,
|
| 125 |
+
int32_t,
|
| 126 |
+
int32_t,
|
| 127 |
+
int32_t,
|
| 128 |
+
cudaStream_t);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <cassert>
|
| 7 |
+
#include "custom_cuda_layers.h"
|
| 8 |
+
#include "memory_access_utils.h"
|
| 9 |
+
|
| 10 |
+
namespace cg = cooperative_groups;
|
| 11 |
+
|
| 12 |
+
namespace td_sort {
|
| 13 |
+
constexpr int threads = 512;
|
| 14 |
+
constexpr int granularity = 16;
|
| 15 |
+
constexpr int mem_vals = granularity / sizeof(int32_t);
|
| 16 |
+
constexpr int max_buffer_size = (threads + 1) * mem_vals;
|
| 17 |
+
|
| 18 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 19 |
+
constexpr int warp_size = 64;
|
| 20 |
+
#else
|
| 21 |
+
constexpr int warp_size = 32;
|
| 22 |
+
#endif
|
| 23 |
+
|
| 24 |
+
constexpr int max_warps = threads / warp_size;
|
| 25 |
+
} // namespace td_sort
|
| 26 |
+
|
| 27 |
+
template <int VALS_PER_THREAD>
|
| 28 |
+
__global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens)
|
| 29 |
+
{
|
| 30 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 31 |
+
cg::thread_block_tile<td_sort::warp_size> warp = cg::tiled_partition<td_sort::warp_size>(tb);
|
| 32 |
+
|
| 33 |
+
__shared__ int32_t indices_buffer[td_sort::max_buffer_size];
|
| 34 |
+
__shared__ int32_t intermediate_buffer[td_sort::max_warps];
|
| 35 |
+
__shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size];
|
| 36 |
+
|
| 37 |
+
for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1;
|
| 38 |
+
i += tb.group_dim().x * td_sort::mem_vals) {
|
| 39 |
+
uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0};
|
| 40 |
+
mem_access::store_shared<td_sort::granularity>(indices_buffer + i, zeros);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
int32_t local_vals[VALS_PER_THREAD];
|
| 44 |
+
|
| 45 |
+
// We flatten layers/batch into a single indexing dimension
|
| 46 |
+
int32_t* data_block = data + tb.group_index().x * reserved_tokens;
|
| 47 |
+
|
| 48 |
+
// The next two loops really could be fused for a more logical code layout, but don't want to
|
| 49 |
+
// move the barrier forward
|
| 50 |
+
#pragma unroll
|
| 51 |
+
for (int i = 0; i < VALS_PER_THREAD; i++) {
|
| 52 |
+
const int iter_idx = i * td_sort::threads + tb.thread_index().x;
|
| 53 |
+
if (iter_idx < reserved_tokens) {
|
| 54 |
+
mem_access::load_global<sizeof(int32_t)>(local_vals + i, data_block + iter_idx);
|
| 55 |
+
} else {
|
| 56 |
+
local_vals[i] = 0;
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
tb.sync();
|
| 61 |
+
|
| 62 |
+
#pragma unroll
|
| 63 |
+
for (int i = 0; i < VALS_PER_THREAD; i++) {
|
| 64 |
+
const int iter_idx = i * td_sort::threads + tb.thread_index().x;
|
| 65 |
+
if (iter_idx < reserved_tokens) {
|
| 66 |
+
const int32_t one = 1;
|
| 67 |
+
mem_access::store_shared<sizeof(int32_t)>(indices_buffer + local_vals[i], &one);
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
tb.sync();
|
| 72 |
+
|
| 73 |
+
int32_t local_input[td_sort::mem_vals];
|
| 74 |
+
mem_access::load_shared<td_sort::granularity>(
|
| 75 |
+
local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals);
|
| 76 |
+
|
| 77 |
+
int32_t reduce_vals[td_sort::mem_vals];
|
| 78 |
+
reduce_vals[0] = local_input[0];
|
| 79 |
+
|
| 80 |
+
#pragma unroll
|
| 81 |
+
for (int i = 1; i < td_sort::mem_vals; i++) {
|
| 82 |
+
reduce_vals[i] = local_input[i] + reduce_vals[i - 1];
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1];
|
| 86 |
+
// Short span exclusive scan algorithm (less work efficient)
|
| 87 |
+
#pragma unroll
|
| 88 |
+
for (int i = 1; i < td_sort::warp_size; i *= 2) {
|
| 89 |
+
int32_t step_val = warp.shfl_up(step_1_val, i);
|
| 90 |
+
step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
if (warp.thread_rank() == td_sort::warp_size - 1) {
|
| 94 |
+
mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.meta_group_rank(),
|
| 95 |
+
&step_1_val);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
tb.sync();
|
| 99 |
+
|
| 100 |
+
if (warp.meta_group_rank() == 0) {
|
| 101 |
+
int32_t step_2_val = 0;
|
| 102 |
+
if (warp.thread_rank() < td_sort::max_warps) {
|
| 103 |
+
mem_access::load_shared<sizeof(int32_t)>(&step_2_val,
|
| 104 |
+
intermediate_buffer + warp.thread_rank());
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
#pragma unroll
|
| 108 |
+
for (int i = 1; i < td_sort::warp_size; i *= 2) {
|
| 109 |
+
int32_t step_val = warp.shfl_up(step_2_val, i);
|
| 110 |
+
step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
if (warp.thread_rank() < td_sort::max_warps) {
|
| 114 |
+
mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.thread_rank(),
|
| 115 |
+
&step_2_val);
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
tb.sync();
|
| 120 |
+
|
| 121 |
+
int step_2_val = 0;
|
| 122 |
+
if (warp.meta_group_rank() > 0) {
|
| 123 |
+
mem_access::load_shared<sizeof(int32_t)>(&step_2_val,
|
| 124 |
+
intermediate_buffer + warp.meta_group_rank() - 1);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
const int thread_offset = reduce_vals[td_sort::mem_vals - 1];
|
| 128 |
+
|
| 129 |
+
#pragma unroll
|
| 130 |
+
for (int i = 0; i < td_sort::mem_vals; i++) {
|
| 131 |
+
reduce_vals[i] += step_1_val + step_2_val - thread_offset;
|
| 132 |
+
}
|
| 133 |
+
mem_access::store_shared<td_sort::granularity>(
|
| 134 |
+
indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals);
|
| 135 |
+
|
| 136 |
+
if (tb.thread_index().x == 0) {
|
| 137 |
+
indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens];
|
| 138 |
+
}
|
| 139 |
+
tb.sync();
|
| 140 |
+
|
| 141 |
+
for (int i = 0; i < VALS_PER_THREAD; i++) {
|
| 142 |
+
const int iter_idx = i * td_sort::threads + tb.thread_index().x;
|
| 143 |
+
if (iter_idx < reserved_tokens) {
|
| 144 |
+
if (local_vals[i] == 0) {
|
| 145 |
+
int zero = 0;
|
| 146 |
+
mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer, &zero);
|
| 147 |
+
} else {
|
| 148 |
+
int sorted_idx;
|
| 149 |
+
mem_access::load_shared<sizeof(int32_t)>(&sorted_idx,
|
| 150 |
+
indices_buffer + local_vals[i] - 1);
|
| 151 |
+
mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer + sorted_idx,
|
| 152 |
+
local_vals + i);
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
tb.sync();
|
| 158 |
+
|
| 159 |
+
#pragma unroll
|
| 160 |
+
for (int i = 0; i < VALS_PER_THREAD; i++) {
|
| 161 |
+
const int iter_idx = i * td_sort::threads + tb.thread_index().x;
|
| 162 |
+
if (iter_idx < reserved_tokens) {
|
| 163 |
+
int32_t store_val;
|
| 164 |
+
mem_access::load_shared<sizeof(int32_t)>(&store_val, sorted_indices_buffer + iter_idx);
|
| 165 |
+
mem_access::store_global<sizeof(int32_t)>(data_block + iter_idx, &store_val);
|
| 166 |
+
}
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
void launch_token_sort(int32_t* indices,
|
| 171 |
+
int layers,
|
| 172 |
+
int batch_size,
|
| 173 |
+
int reserved_size,
|
| 174 |
+
int original_tokens,
|
| 175 |
+
cudaStream_t stream)
|
| 176 |
+
{
|
| 177 |
+
// Each sort is completely independent, can flatten this dimension
|
| 178 |
+
dim3 grid(layers * batch_size);
|
| 179 |
+
dim3 block(td_sort::threads);
|
| 180 |
+
|
| 181 |
+
const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads;
|
| 182 |
+
|
| 183 |
+
if (vals_per_thread == 1) {
|
| 184 |
+
scan_sort<1><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
|
| 185 |
+
} else if (vals_per_thread == 2) {
|
| 186 |
+
scan_sort<2><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
|
| 187 |
+
} else if (vals_per_thread == 3) {
|
| 188 |
+
scan_sort<3><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
|
| 189 |
+
} else if (vals_per_thread == 4) {
|
| 190 |
+
scan_sort<4><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
|
| 191 |
+
} else {
|
| 192 |
+
assert(false);
|
| 193 |
+
}
|
| 194 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
|
| 8 |
+
https:github.com/ptillet/torch-blocksparse/blob/master/csrc/utils.cpp
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <torch/extension.h>
|
| 12 |
+
#include <string>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#ifdef _OPENMP
|
| 16 |
+
#include <omp.h>
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
typedef std::vector<std::tuple<int, torch::Tensor>> ret_t;
|
| 20 |
+
|
| 21 |
+
void segment_blocks(torch::Tensor layout,
|
| 22 |
+
torch::Tensor idx,
|
| 23 |
+
torch::Tensor scratch,
|
| 24 |
+
int max_width,
|
| 25 |
+
ret_t& ret)
|
| 26 |
+
{
|
| 27 |
+
size_t H = layout.size(0);
|
| 28 |
+
size_t M = layout.size(1);
|
| 29 |
+
size_t N = layout.size(2);
|
| 30 |
+
torch::Tensor tmp = torch::zeros_like(layout);
|
| 31 |
+
|
| 32 |
+
auto _tmp = tmp.accessor<int, 3>();
|
| 33 |
+
auto _layout = layout.accessor<int, 3>();
|
| 34 |
+
auto _idx = idx.accessor<int, 3>();
|
| 35 |
+
auto _scratch = scratch.accessor<int, 3>();
|
| 36 |
+
std::vector<int> current(H, 0);
|
| 37 |
+
|
| 38 |
+
#ifdef _OPENMP
|
| 39 |
+
#pragma omp parallel for
|
| 40 |
+
#endif
|
| 41 |
+
for (size_t h = 0; h < H; h++) {
|
| 42 |
+
// surrounding indices
|
| 43 |
+
std::vector<int> ii_left(max_width, -1);
|
| 44 |
+
std::vector<std::vector<int>> ii_top(max_width, std::vector<int>(N, -1));
|
| 45 |
+
|
| 46 |
+
for (size_t m = 0; m < M; m++) {
|
| 47 |
+
for (size_t n = 0; n < N; n++) {
|
| 48 |
+
int v = _layout[h][m][n];
|
| 49 |
+
if (v == 0) continue;
|
| 50 |
+
int n_left = ii_left[max_width - 1];
|
| 51 |
+
int m_top = ii_top[max_width - 1][n];
|
| 52 |
+
int top = (m_top >= 0) ? _tmp[h][m_top][n] : 0;
|
| 53 |
+
int left = (n_left >= 0) ? _tmp[h][m][n_left] : 0;
|
| 54 |
+
int topleft = (m_top >= 0 && n_left >= 0) ? _tmp[h][m_top][n_left] : 0;
|
| 55 |
+
int width = std::min(left, std::min(top, topleft)) + 1;
|
| 56 |
+
|
| 57 |
+
// reset width if blocks cannot be
|
| 58 |
+
// packed together (i.e., there's a 1 "in the middle")
|
| 59 |
+
for (int nn = n_left + 1; nn < n; nn++)
|
| 60 |
+
if (ii_top[max_width - 1][nn] > ii_top[max_width - 1][n]) width = 1;
|
| 61 |
+
_tmp[h][m][n] = width;
|
| 62 |
+
|
| 63 |
+
// update n_left ring buffer
|
| 64 |
+
for (int k = 0; k < max_width - 1; k++) ii_left[k] = ii_left[k + 1];
|
| 65 |
+
ii_left[max_width - 1] = n;
|
| 66 |
+
|
| 67 |
+
// update ii_top ring buffer
|
| 68 |
+
for (int k = 0; k < max_width - 1; k++) ii_top[k][n] = ii_top[k + 1][n];
|
| 69 |
+
ii_top[max_width - 1][n] = m;
|
| 70 |
+
|
| 71 |
+
// block is too small -- skip
|
| 72 |
+
if (width != max_width) continue;
|
| 73 |
+
|
| 74 |
+
// retained blocks are set to zeros
|
| 75 |
+
for (size_t km = 0; km < max_width; km++)
|
| 76 |
+
for (size_t kn = 0; kn < max_width; kn++) {
|
| 77 |
+
int mm = ii_top[km][n];
|
| 78 |
+
int nn = ii_left[kn];
|
| 79 |
+
if (mm < 0 || nn < 0) continue;
|
| 80 |
+
_layout[h][mm][nn] = 0;
|
| 81 |
+
_tmp[h][mm][nn] = 0;
|
| 82 |
+
_scratch[h][current[h]][0] = (int)h;
|
| 83 |
+
_scratch[h][current[h]][1] = (int)mm;
|
| 84 |
+
_scratch[h][current[h]][2] = (int)nn;
|
| 85 |
+
_scratch[h][current[h]][3] = _idx[h][mm][nn];
|
| 86 |
+
current[h]++;
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
std::vector<torch::Tensor> to_cat;
|
| 92 |
+
for (size_t h = 0; h < H; h++)
|
| 93 |
+
if (current[h] > 0) to_cat.push_back(scratch[h].slice(0, 0, current[h]));
|
| 94 |
+
if (!to_cat.empty()) ret.push_back({max_width, torch::cat(to_cat)});
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
ret_t sdd_segment(torch::Tensor layout, int start_width)
|
| 98 |
+
{
|
| 99 |
+
ret_t ret;
|
| 100 |
+
|
| 101 |
+
// block index
|
| 102 |
+
torch::Tensor idx = torch::zeros_like(layout);
|
| 103 |
+
int current = 0;
|
| 104 |
+
int64_t H = layout.size(0);
|
| 105 |
+
int64_t M = layout.size(1);
|
| 106 |
+
int64_t N = layout.size(2);
|
| 107 |
+
auto _layout = layout.accessor<int, 3>();
|
| 108 |
+
auto _idx = idx.accessor<int, 3>();
|
| 109 |
+
for (int64_t h = 0; h < H; h++)
|
| 110 |
+
for (int64_t m = 0; m < M; m++)
|
| 111 |
+
for (int64_t n = 0; n < N; n++) {
|
| 112 |
+
if (_layout[h][m][n] == 0) continue;
|
| 113 |
+
_idx[h][m][n] = current++;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
// scratch memory
|
| 117 |
+
torch::Tensor scratch = torch::empty({H, layout.sum().item<int>(), 4}, layout.dtype());
|
| 118 |
+
|
| 119 |
+
for (int max_width = start_width; max_width > 0; max_width /= 2)
|
| 120 |
+
segment_blocks(layout, idx, scratch, max_width, ret);
|
| 121 |
+
return ret;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 125 |
+
{
|
| 126 |
+
m.def("sdd_segment", &sdd_segment, "SDD segmentation handler");
|
| 127 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <c10/cuda/CUDAStream.h>
|
| 7 |
+
#include <torch/extension.h>
|
| 8 |
+
#include <cstdio>
|
| 9 |
+
#include <vector>
|
| 10 |
+
#include "spatial_cuda_layers.h"
|
| 11 |
+
|
| 12 |
+
ChannelsLastProblem dimension_problem(at::Tensor& input)
|
| 13 |
+
{
|
| 14 |
+
ChannelsLastProblem dims;
|
| 15 |
+
|
| 16 |
+
if (input.dim() == 4) {
|
| 17 |
+
// In some sense this is unsafe (and a reflection of the assumptions made inside
|
| 18 |
+
// the C10 options checker). Basically, there's no great way to be sure that
|
| 19 |
+
// a tensor is in channels last because a 1x1 image will appear to be in channels
|
| 20 |
+
// last even when it isn't.
|
| 21 |
+
assert(input.is_contiguous(at::MemoryFormat::ChannelsLast));
|
| 22 |
+
dims.batch_size = input.size(0);
|
| 23 |
+
dims.seq_len = input.size(2) * input.size(3);
|
| 24 |
+
dims.channels = input.size(1);
|
| 25 |
+
} else {
|
| 26 |
+
assert(input.is_contiguous());
|
| 27 |
+
dims.batch_size = input.size(0);
|
| 28 |
+
dims.seq_len = input.size(1);
|
| 29 |
+
dims.channels = input.size(2);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
return dims;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
at::Tensor seq_unroll_bias_add(at::Tensor& input, at::Tensor& bias)
|
| 36 |
+
{
|
| 37 |
+
assert(input.dtype() == at::kHalf);
|
| 38 |
+
|
| 39 |
+
// TODO(cmikeh2): Should probably refactor this into a more portable
|
| 40 |
+
// description, since it does generalize for channels-last
|
| 41 |
+
ChannelsLastProblem problem = dimension_problem(input);
|
| 42 |
+
|
| 43 |
+
auto output = at::empty_like(input);
|
| 44 |
+
|
| 45 |
+
launch_opt_bias_add((__half*)output.data_ptr(),
|
| 46 |
+
(const __half*)input.data_ptr(),
|
| 47 |
+
(const __half*)bias.data_ptr(),
|
| 48 |
+
nullptr,
|
| 49 |
+
nullptr,
|
| 50 |
+
problem.batch_size,
|
| 51 |
+
problem.seq_len,
|
| 52 |
+
problem.channels,
|
| 53 |
+
at::cuda::getCurrentCUDAStream());
|
| 54 |
+
|
| 55 |
+
return output;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
at::Tensor seq_bias_add_add(at::Tensor& input, at::Tensor& bias, at::Tensor& other)
|
| 59 |
+
{
|
| 60 |
+
assert(input.dtype() == at::kHalf);
|
| 61 |
+
|
| 62 |
+
// TODO(cmikeh2): Should probably refactor this into a more portable
|
| 63 |
+
// description, since it does generalize for channels-last
|
| 64 |
+
ChannelsLastProblem problem = dimension_problem(input);
|
| 65 |
+
|
| 66 |
+
auto output = at::empty_like(input);
|
| 67 |
+
|
| 68 |
+
launch_opt_bias_add((__half*)output.data_ptr(),
|
| 69 |
+
(const __half*)input.data_ptr(),
|
| 70 |
+
(const __half*)bias.data_ptr(),
|
| 71 |
+
(const __half*)other.data_ptr(),
|
| 72 |
+
nullptr,
|
| 73 |
+
problem.batch_size,
|
| 74 |
+
problem.seq_len,
|
| 75 |
+
problem.channels,
|
| 76 |
+
at::cuda::getCurrentCUDAStream());
|
| 77 |
+
|
| 78 |
+
return output;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
at::Tensor seq_bias_add_bias_add(at::Tensor& input,
|
| 82 |
+
at::Tensor& bias,
|
| 83 |
+
at::Tensor& other,
|
| 84 |
+
at::Tensor& other_bias)
|
| 85 |
+
{
|
| 86 |
+
assert(input.dtype() == at::kHalf);
|
| 87 |
+
|
| 88 |
+
// TODO(cmikeh2): Should probably refactor this into a more portable
|
| 89 |
+
// description, since it does generalize for channels-last
|
| 90 |
+
ChannelsLastProblem problem = dimension_problem(input);
|
| 91 |
+
|
| 92 |
+
auto output = at::empty_like(input);
|
| 93 |
+
|
| 94 |
+
launch_opt_bias_add((__half*)output.data_ptr(),
|
| 95 |
+
(const __half*)input.data_ptr(),
|
| 96 |
+
(const __half*)bias.data_ptr(),
|
| 97 |
+
(const __half*)other.data_ptr(),
|
| 98 |
+
(const __half*)other_bias.data_ptr(),
|
| 99 |
+
problem.batch_size,
|
| 100 |
+
problem.seq_len,
|
| 101 |
+
problem.channels,
|
| 102 |
+
at::cuda::getCurrentCUDAStream());
|
| 103 |
+
|
| 104 |
+
return output;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 108 |
+
{
|
| 109 |
+
m.def("nhwc_bias_add", &seq_unroll_bias_add);
|
| 110 |
+
m.def("nhwc_bias_add_add", &seq_bias_add_add);
|
| 111 |
+
m.def("nhwc_bias_add_bias_add", &seq_bias_add_bias_add);
|
| 112 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#if __CUDA_ARCH__ >= 530
|
| 9 |
+
#define HALF_PRECISION_AVAILABLE = 1
|
| 10 |
+
#endif
|
| 11 |
+
|
| 12 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 13 |
+
#include <hip/hip_cooperative_groups.h>
|
| 14 |
+
#else
|
| 15 |
+
#include <cooperative_groups.h>
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
#include <cuda.h>
|
| 19 |
+
#include <cuda_fp16.h>
|
| 20 |
+
|
| 21 |
+
/*********** Group Norm Kernels, Structs, and Helpers ************/
|
| 22 |
+
|
| 23 |
+
struct {
|
| 24 |
+
int64_t batch_size;
|
| 25 |
+
int64_t seq_len;
|
| 26 |
+
int64_t channels;
|
| 27 |
+
} typedef ChannelsLastProblem;
|
| 28 |
+
|
| 29 |
+
void launch_opt_bias_add(__half* result,
|
| 30 |
+
const __half* activation,
|
| 31 |
+
const __half* bias,
|
| 32 |
+
const __half* other,
|
| 33 |
+
const __half* other_bias,
|
| 34 |
+
int batch_size,
|
| 35 |
+
int seq_len,
|
| 36 |
+
int channels,
|
| 37 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu
ADDED
|
@@ -0,0 +1,873 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
|
| 8 |
+
const int unroll_factor = 4;
|
| 9 |
+
|
| 10 |
+
__global__ void dropout_kernel(const int N,
|
| 11 |
+
const float ratio,
|
| 12 |
+
float* out,
|
| 13 |
+
const float* Xdata,
|
| 14 |
+
uint8_t* mask,
|
| 15 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 16 |
+
{
|
| 17 |
+
const float scale = 1. / (1. - ratio);
|
| 18 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 19 |
+
|
| 20 |
+
curandStatePhilox4_32_10_t state;
|
| 21 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 22 |
+
|
| 23 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 24 |
+
{
|
| 25 |
+
float4 rand = curand_uniform4(&state);
|
| 26 |
+
uint8_t m[unroll_factor];
|
| 27 |
+
|
| 28 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 29 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 30 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 31 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 32 |
+
|
| 33 |
+
int i = j * unroll_factor;
|
| 34 |
+
|
| 35 |
+
mask[i] = (uint8_t)m[0];
|
| 36 |
+
mask[i + 1] = (uint8_t)m[1];
|
| 37 |
+
mask[i + 2] = (uint8_t)m[2];
|
| 38 |
+
mask[i + 3] = (uint8_t)m[3];
|
| 39 |
+
|
| 40 |
+
out[i] = Xdata[i] * scale * m[0];
|
| 41 |
+
out[i + 1] = Xdata[i + 1] * scale * m[1];
|
| 42 |
+
out[i + 2] = Xdata[i + 2] * scale * m[2];
|
| 43 |
+
out[i + 3] = Xdata[i + 3] * scale * m[3];
|
| 44 |
+
}
|
| 45 |
+
int high_index =
|
| 46 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 47 |
+
if (N > high_index) {
|
| 48 |
+
float4 rand = curand_uniform4(&state);
|
| 49 |
+
float* rand_data = &(rand.x);
|
| 50 |
+
int k = 0;
|
| 51 |
+
for (int i = high_index; i < N; i++) {
|
| 52 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 53 |
+
out[i] = Xdata[i] * scale * m;
|
| 54 |
+
mask[i] = m;
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
__global__ void dropout_kernel(const int N,
|
| 60 |
+
const float ratio,
|
| 61 |
+
__half* out,
|
| 62 |
+
const __half* Xdata,
|
| 63 |
+
uint8_t* mask,
|
| 64 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 65 |
+
{
|
| 66 |
+
const float scale = 1. / (1. - ratio);
|
| 67 |
+
|
| 68 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 69 |
+
|
| 70 |
+
curandStatePhilox4_32_10_t state;
|
| 71 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 72 |
+
|
| 73 |
+
#ifdef __STOCHASTIC_MODE__
|
| 74 |
+
|
| 75 |
+
const __half2 h_scale = __float2half2_rn(scale);
|
| 76 |
+
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
|
| 77 |
+
float2* out_cast = reinterpret_cast<float2*>(out);
|
| 78 |
+
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
|
| 79 |
+
|
| 80 |
+
uint32_t m_32;
|
| 81 |
+
uint8_t* m = reinterpret_cast<uint8_t*>(&m_32);
|
| 82 |
+
|
| 83 |
+
float2 result_f;
|
| 84 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 85 |
+
__half2 mask_h[2];
|
| 86 |
+
float2 mask_f[2];
|
| 87 |
+
|
| 88 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 89 |
+
{
|
| 90 |
+
float2 x_f = x_cast[j];
|
| 91 |
+
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
|
| 92 |
+
|
| 93 |
+
float4 rand = curand_uniform4(&state);
|
| 94 |
+
|
| 95 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 96 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 97 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 98 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 99 |
+
|
| 100 |
+
float* mask_f_data = &mask_f[0].x;
|
| 101 |
+
#pragma unroll
|
| 102 |
+
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
|
| 103 |
+
|
| 104 |
+
mask_h[0] = __float22half2_rn(mask_f[0]);
|
| 105 |
+
mask_h[1] = __float22half2_rn(mask_f[1]);
|
| 106 |
+
|
| 107 |
+
result_h[0] = x_h[0] * h_scale * mask_h[0];
|
| 108 |
+
result_h[1] = x_h[1] * h_scale * mask_h[1];
|
| 109 |
+
|
| 110 |
+
out_cast[j] = result_f;
|
| 111 |
+
|
| 112 |
+
mask_cast[j] = m_32;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
#else
|
| 116 |
+
|
| 117 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 118 |
+
{
|
| 119 |
+
int i = j * unroll_factor;
|
| 120 |
+
|
| 121 |
+
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
|
| 122 |
+
float2 vals_half_f[2];
|
| 123 |
+
vals_half_f[0] = __half22float2(vals_half[0]);
|
| 124 |
+
vals_half_f[1] = __half22float2(vals_half[1]);
|
| 125 |
+
|
| 126 |
+
uint8_t m[unroll_factor];
|
| 127 |
+
float4 rand = curand_uniform4(&state);
|
| 128 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 129 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 130 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 131 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 132 |
+
|
| 133 |
+
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
|
| 134 |
+
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
|
| 135 |
+
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
|
| 136 |
+
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
|
| 137 |
+
|
| 138 |
+
mask[i] = m[0];
|
| 139 |
+
mask[i + 1] = m[1];
|
| 140 |
+
mask[i + 2] = m[2];
|
| 141 |
+
mask[i + 3] = m[3];
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
#endif
|
| 145 |
+
int high_index =
|
| 146 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 147 |
+
if (N > high_index) {
|
| 148 |
+
float4 rand = curand_uniform4(&state);
|
| 149 |
+
float* rand_data = &(rand.x);
|
| 150 |
+
int k = 0;
|
| 151 |
+
for (int i = high_index; i < N; i++) {
|
| 152 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 153 |
+
out[i] = __float2half((float)Xdata[i] * scale * m);
|
| 154 |
+
mask[i] = m;
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
__global__ void dropout_kernel_bwd(const int N,
|
| 160 |
+
const float ratio,
|
| 161 |
+
const float* Xdata,
|
| 162 |
+
float* out,
|
| 163 |
+
uint8_t* mask,
|
| 164 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 165 |
+
{
|
| 166 |
+
const float scale = 1. / (1. - ratio);
|
| 167 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 168 |
+
{
|
| 169 |
+
int i = j * unroll_factor;
|
| 170 |
+
|
| 171 |
+
out[i] = mask[i] ? Xdata[i] * scale : 0.0;
|
| 172 |
+
out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0;
|
| 173 |
+
out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0;
|
| 174 |
+
out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0;
|
| 175 |
+
}
|
| 176 |
+
int high_index =
|
| 177 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 178 |
+
if (N > high_index) {
|
| 179 |
+
for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; }
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
__global__ void dropout_kernel_bwd(const int N,
|
| 184 |
+
const float ratio,
|
| 185 |
+
const __half* Xdata,
|
| 186 |
+
__half* out,
|
| 187 |
+
uint8_t* mask,
|
| 188 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 189 |
+
{
|
| 190 |
+
const float scale = 1. / (1. - ratio);
|
| 191 |
+
|
| 192 |
+
#ifdef __STOCHASTIC_MODE__
|
| 193 |
+
|
| 194 |
+
const __half2 h_scale = __float2half2_rn(scale);
|
| 195 |
+
|
| 196 |
+
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
|
| 197 |
+
float2* out_cast = reinterpret_cast<float2*>(out);
|
| 198 |
+
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
|
| 199 |
+
|
| 200 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 201 |
+
{
|
| 202 |
+
float2 x_f = x_cast[j];
|
| 203 |
+
__half2* x_h = reinterpret_cast<__half2*>(&x_f);
|
| 204 |
+
|
| 205 |
+
uint32_t m_32 = mask_cast[j];
|
| 206 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 207 |
+
|
| 208 |
+
__half2 mask_h[2];
|
| 209 |
+
float2 mask_f[2];
|
| 210 |
+
|
| 211 |
+
float* mask_f_data = &mask_f[0].x;
|
| 212 |
+
#pragma unroll
|
| 213 |
+
for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]);
|
| 214 |
+
|
| 215 |
+
#pragma unroll
|
| 216 |
+
for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]);
|
| 217 |
+
|
| 218 |
+
float2 result_f;
|
| 219 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 220 |
+
|
| 221 |
+
result_h[0] = x_h[0] * h_scale * mask_h[0];
|
| 222 |
+
result_h[1] = x_h[1] * h_scale * mask_h[1];
|
| 223 |
+
|
| 224 |
+
out_cast[j] = result_f;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
#else
|
| 228 |
+
|
| 229 |
+
const __half h_scale = __float2half(scale);
|
| 230 |
+
const __half h_zero = __float2half(0.0);
|
| 231 |
+
|
| 232 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 233 |
+
{
|
| 234 |
+
int i = j * unroll_factor;
|
| 235 |
+
|
| 236 |
+
const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i);
|
| 237 |
+
|
| 238 |
+
uint8_t* m = mask + i;
|
| 239 |
+
|
| 240 |
+
float2 vals_half_f[2];
|
| 241 |
+
|
| 242 |
+
vals_half_f[0] = __half22float2(vals_half[0]);
|
| 243 |
+
vals_half_f[1] = __half22float2(vals_half[1]);
|
| 244 |
+
|
| 245 |
+
out[i] = __float2half(vals_half_f[0].x * scale * m[0]);
|
| 246 |
+
out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]);
|
| 247 |
+
out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]);
|
| 248 |
+
out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
#endif
|
| 252 |
+
int high_index =
|
| 253 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 254 |
+
if (N > high_index) {
|
| 255 |
+
for (int i = high_index; i < N; i++) {
|
| 256 |
+
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
template <typename T>
|
| 262 |
+
void launch_dropout(T* out,
|
| 263 |
+
const T* vals,
|
| 264 |
+
uint8_t* mask,
|
| 265 |
+
int total_count,
|
| 266 |
+
int dim,
|
| 267 |
+
float ratio,
|
| 268 |
+
cudaStream_t stream,
|
| 269 |
+
bool bwd)
|
| 270 |
+
{
|
| 271 |
+
assert(unroll_factor == 4);
|
| 272 |
+
|
| 273 |
+
dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor);
|
| 274 |
+
dim3 block_dim = DS_CUDA_NUM_THREADS;
|
| 275 |
+
|
| 276 |
+
if (dim > 512) {
|
| 277 |
+
block_dim.x >>= 1;
|
| 278 |
+
grid_dim.x <<= 1;
|
| 279 |
+
}
|
| 280 |
+
uint64_t inc = total_count / grid_dim.x / block_dim.x;
|
| 281 |
+
std::pair<uint64_t, uint64_t> seed = TrainingContext::Instance().IncrementOffset(inc);
|
| 282 |
+
if (bwd)
|
| 283 |
+
dropout_kernel_bwd<<<grid_dim, block_dim, 0, stream>>>(
|
| 284 |
+
total_count, ratio, vals, out, mask, seed);
|
| 285 |
+
else
|
| 286 |
+
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 287 |
+
total_count, ratio, out, vals, mask, seed);
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
template void launch_dropout(float* out,
|
| 291 |
+
const float* vals,
|
| 292 |
+
uint8_t* mask,
|
| 293 |
+
int total_count,
|
| 294 |
+
int dim,
|
| 295 |
+
float ratio,
|
| 296 |
+
cudaStream_t stream,
|
| 297 |
+
bool);
|
| 298 |
+
template void launch_dropout(__half* out,
|
| 299 |
+
const __half* vals,
|
| 300 |
+
uint8_t* mask,
|
| 301 |
+
int total_count,
|
| 302 |
+
int dim,
|
| 303 |
+
float ratio,
|
| 304 |
+
cudaStream_t stream,
|
| 305 |
+
bool);
|
| 306 |
+
|
| 307 |
+
__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask)
|
| 308 |
+
{
|
| 309 |
+
CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; }
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask)
|
| 313 |
+
{
|
| 314 |
+
const __half2 h_scale = __float2half2_rn(scale);
|
| 315 |
+
float2* x_cast = reinterpret_cast<float2*>(Xdata);
|
| 316 |
+
uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask);
|
| 317 |
+
|
| 318 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 319 |
+
{
|
| 320 |
+
float2 x_data = x_cast[j];
|
| 321 |
+
uint32_t m_32 = mask_cast[j];
|
| 322 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 323 |
+
|
| 324 |
+
float2 result_f;
|
| 325 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 326 |
+
|
| 327 |
+
#ifdef __STOCHASTIC_MODE__
|
| 328 |
+
|
| 329 |
+
__half2* x_data_h = reinterpret_cast<__half2*>(&x_data);
|
| 330 |
+
__half2 mask_h[2];
|
| 331 |
+
float2 mask_f[2];
|
| 332 |
+
|
| 333 |
+
float* mask_f_data = &mask_f[0].x;
|
| 334 |
+
#pragma unroll
|
| 335 |
+
for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]);
|
| 336 |
+
|
| 337 |
+
mask_h[0] = __float22half2_rn(mask_f[0]);
|
| 338 |
+
mask_h[1] = __float22half2_rn(mask_f[1]);
|
| 339 |
+
|
| 340 |
+
result_h[0] = x_data_h[0] * h_scale * mask_h[0];
|
| 341 |
+
result_h[1] = x_data_h[1] * h_scale * mask_h[1];
|
| 342 |
+
|
| 343 |
+
#else
|
| 344 |
+
|
| 345 |
+
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
|
| 346 |
+
float2 result[2];
|
| 347 |
+
|
| 348 |
+
result[0].x = (float)x_data_h[0] * scale * m[0];
|
| 349 |
+
result[0].y = (float)x_data_h[1] * scale * m[1];
|
| 350 |
+
result[1].x = (float)x_data_h[2] * scale * m[2];
|
| 351 |
+
result[1].y = (float)x_data_h[3] * scale * m[3];
|
| 352 |
+
|
| 353 |
+
result_h[0] = __float22half2_rn(result[0]);
|
| 354 |
+
result_h[1] = __float22half2_rn(result[1]);
|
| 355 |
+
|
| 356 |
+
#endif
|
| 357 |
+
x_cast[j] = result_f;
|
| 358 |
+
}
|
| 359 |
+
int high_index =
|
| 360 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 361 |
+
if (N > high_index) {
|
| 362 |
+
for (int i = high_index; i < N; i++) {
|
| 363 |
+
Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]);
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
template <typename T>
|
| 369 |
+
void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream)
|
| 370 |
+
{
|
| 371 |
+
assert(unroll_factor == 4);
|
| 372 |
+
|
| 373 |
+
const float scale = 1. / (1. - ratio);
|
| 374 |
+
dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor),
|
| 375 |
+
DS_CUDA_NUM_THREADS,
|
| 376 |
+
0,
|
| 377 |
+
stream>>>(total_count, scale, vals, mask);
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
template void launch_dropout_grad(float* vals,
|
| 381 |
+
uint8_t* mask,
|
| 382 |
+
int total_count,
|
| 383 |
+
float ratio,
|
| 384 |
+
cudaStream_t stream);
|
| 385 |
+
template void launch_dropout_grad(__half* vals,
|
| 386 |
+
uint8_t* mask,
|
| 387 |
+
int total_count,
|
| 388 |
+
float ratio,
|
| 389 |
+
cudaStream_t stream);
|
| 390 |
+
|
| 391 |
+
__global__ void dropout_grad_kernel(const int N,
|
| 392 |
+
const float scale,
|
| 393 |
+
const float* Xdata,
|
| 394 |
+
float* out,
|
| 395 |
+
uint8_t* mask)
|
| 396 |
+
{
|
| 397 |
+
CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; }
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
__global__ void dropout_grad_kernel(const int N,
|
| 401 |
+
const float scale,
|
| 402 |
+
const __half* Xdata,
|
| 403 |
+
__half* out,
|
| 404 |
+
uint8_t* mask)
|
| 405 |
+
{
|
| 406 |
+
const float2* x_cast = reinterpret_cast<const float2*>(Xdata);
|
| 407 |
+
float2* out_cast = reinterpret_cast<float2*>(out);
|
| 408 |
+
const uint32_t* mask_cast = reinterpret_cast<const uint32_t*>(mask);
|
| 409 |
+
|
| 410 |
+
float2 result_f;
|
| 411 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 412 |
+
|
| 413 |
+
CUDA_1D_KERNEL_LOOP(j, N / unroll_factor)
|
| 414 |
+
{
|
| 415 |
+
float2 x_data = x_cast[j];
|
| 416 |
+
uint32_t m_32 = mask_cast[j];
|
| 417 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 418 |
+
|
| 419 |
+
__half* x_data_h = reinterpret_cast<__half*>(&x_data);
|
| 420 |
+
float2 result[2];
|
| 421 |
+
|
| 422 |
+
result[0].x = (float)x_data_h[0] * scale * m[0];
|
| 423 |
+
result[0].y = (float)x_data_h[1] * scale * m[1];
|
| 424 |
+
result[1].x = (float)x_data_h[2] * scale * m[2];
|
| 425 |
+
result[1].y = (float)x_data_h[3] * scale * m[3];
|
| 426 |
+
|
| 427 |
+
result_h[0] = __float22half2_rn(result[0]);
|
| 428 |
+
result_h[1] = __float22half2_rn(result[1]);
|
| 429 |
+
|
| 430 |
+
out_cast[j] = result_f;
|
| 431 |
+
}
|
| 432 |
+
int high_index =
|
| 433 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 434 |
+
if (N > high_index) {
|
| 435 |
+
for (int i = high_index; i < N; i++) {
|
| 436 |
+
out[i] = __float2half((float)Xdata[i] * scale * mask[i]);
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
template <typename T>
|
| 442 |
+
void launch_dropout_grad(T* vals_out,
|
| 443 |
+
const T* vals,
|
| 444 |
+
uint8_t* mask,
|
| 445 |
+
int total_count,
|
| 446 |
+
float ratio,
|
| 447 |
+
cudaStream_t stream)
|
| 448 |
+
{
|
| 449 |
+
assert(unroll_factor == 4);
|
| 450 |
+
|
| 451 |
+
const float scale = 1. / (1. - ratio);
|
| 452 |
+
dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor),
|
| 453 |
+
DS_CUDA_NUM_THREADS,
|
| 454 |
+
0,
|
| 455 |
+
stream>>>(total_count, scale, vals, vals_out, mask);
|
| 456 |
+
}
|
| 457 |
+
template void launch_dropout_grad(float*,
|
| 458 |
+
const float* vals,
|
| 459 |
+
uint8_t* mask,
|
| 460 |
+
int total_count,
|
| 461 |
+
float ratio,
|
| 462 |
+
cudaStream_t stream);
|
| 463 |
+
template void launch_dropout_grad(__half*,
|
| 464 |
+
const __half* vals,
|
| 465 |
+
uint8_t* mask,
|
| 466 |
+
int total_count,
|
| 467 |
+
float ratio,
|
| 468 |
+
cudaStream_t stream);
|
| 469 |
+
|
| 470 |
+
__global__ void dropout_kernel(const int N,
|
| 471 |
+
const int dim,
|
| 472 |
+
const float ratio,
|
| 473 |
+
const float* bias,
|
| 474 |
+
float* Xdata,
|
| 475 |
+
uint8_t* mask,
|
| 476 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 477 |
+
{
|
| 478 |
+
const float scale = 1. / (1. - ratio);
|
| 479 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 480 |
+
int tid = threadIdx.x % (dim / unroll_factor);
|
| 481 |
+
|
| 482 |
+
curandStatePhilox4_32_10_t state;
|
| 483 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 484 |
+
|
| 485 |
+
float4* Xdata_cast = reinterpret_cast<float4*>(Xdata);
|
| 486 |
+
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
|
| 487 |
+
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
|
| 488 |
+
|
| 489 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 490 |
+
{
|
| 491 |
+
float4 rand = curand_uniform4(&state);
|
| 492 |
+
uint32_t m_32;
|
| 493 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 494 |
+
|
| 495 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 496 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 497 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 498 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 499 |
+
|
| 500 |
+
float4 x_data = Xdata_cast[j];
|
| 501 |
+
float4 b_data = bias_cast[j % (dim / unroll_factor)];
|
| 502 |
+
|
| 503 |
+
x_data.x += b_data.x;
|
| 504 |
+
x_data.y += b_data.y;
|
| 505 |
+
x_data.z += b_data.z;
|
| 506 |
+
x_data.w += b_data.w;
|
| 507 |
+
|
| 508 |
+
x_data.x = x_data.x * scale * m[0];
|
| 509 |
+
x_data.y = x_data.y * scale * m[1];
|
| 510 |
+
x_data.z = x_data.z * scale * m[2];
|
| 511 |
+
x_data.w = x_data.w * scale * m[3];
|
| 512 |
+
|
| 513 |
+
mask_32[j] = m_32;
|
| 514 |
+
Xdata_cast[j] = x_data;
|
| 515 |
+
}
|
| 516 |
+
int high_index =
|
| 517 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 518 |
+
if (N > high_index) {
|
| 519 |
+
float4 rand = curand_uniform4(&state);
|
| 520 |
+
float* rand_data = &(rand.x);
|
| 521 |
+
int k = 0;
|
| 522 |
+
for (int i = high_index; i < N; i++) {
|
| 523 |
+
float x_data = Xdata[i] + bias[i % dim];
|
| 524 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 525 |
+
Xdata[i] = x_data * scale * m;
|
| 526 |
+
mask[i] = m;
|
| 527 |
+
}
|
| 528 |
+
}
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
__global__ void dropout_kernel(const int N,
|
| 532 |
+
const int dim,
|
| 533 |
+
const float ratio,
|
| 534 |
+
const __half* bias,
|
| 535 |
+
__half* Xdata,
|
| 536 |
+
uint8_t* mask,
|
| 537 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 538 |
+
{
|
| 539 |
+
const float scale = 1. / (1. - ratio);
|
| 540 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 541 |
+
int tid = threadIdx.x % (dim / unroll_factor);
|
| 542 |
+
|
| 543 |
+
curandStatePhilox4_32_10_t state;
|
| 544 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 545 |
+
|
| 546 |
+
float2* Xdata_cast = reinterpret_cast<float2*>(Xdata);
|
| 547 |
+
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
|
| 548 |
+
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
|
| 549 |
+
|
| 550 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 551 |
+
{
|
| 552 |
+
float4 rand = curand_uniform4(&state);
|
| 553 |
+
|
| 554 |
+
float2 data_f;
|
| 555 |
+
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
|
| 556 |
+
|
| 557 |
+
float2 bias_f;
|
| 558 |
+
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
|
| 559 |
+
|
| 560 |
+
data_f = Xdata_cast[j];
|
| 561 |
+
bias_f = bias_cast[j % (dim / unroll_factor)];
|
| 562 |
+
|
| 563 |
+
float2 data_h_0 = __half22float2(data_h[0]);
|
| 564 |
+
float2 data_h_1 = __half22float2(data_h[1]);
|
| 565 |
+
|
| 566 |
+
float2 bias_h_0 = __half22float2(bias_h[0]);
|
| 567 |
+
float2 bias_h_1 = __half22float2(bias_h[1]);
|
| 568 |
+
|
| 569 |
+
data_h_0.x += bias_h_0.x;
|
| 570 |
+
data_h_0.y += bias_h_0.y;
|
| 571 |
+
data_h_1.x += bias_h_1.x;
|
| 572 |
+
data_h_1.y += bias_h_1.y;
|
| 573 |
+
|
| 574 |
+
uint32_t m_32;
|
| 575 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 576 |
+
|
| 577 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 578 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 579 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 580 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 581 |
+
|
| 582 |
+
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
|
| 583 |
+
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
|
| 584 |
+
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
|
| 585 |
+
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
|
| 586 |
+
|
| 587 |
+
float2 result_f;
|
| 588 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 589 |
+
|
| 590 |
+
result_h[0] = __float22half2_rn(data_h_0);
|
| 591 |
+
result_h[1] = __float22half2_rn(data_h_1);
|
| 592 |
+
|
| 593 |
+
Xdata_cast[j] = result_f;
|
| 594 |
+
mask_32[j] = m_32;
|
| 595 |
+
}
|
| 596 |
+
int high_index =
|
| 597 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 598 |
+
if (N > high_index) {
|
| 599 |
+
float4 rand = curand_uniform4(&state);
|
| 600 |
+
float* rand_data = &(rand.x);
|
| 601 |
+
int k = 0;
|
| 602 |
+
for (int i = high_index; i < N; i++) {
|
| 603 |
+
float x_data = (float)Xdata[i] + (float)bias[i % dim];
|
| 604 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 605 |
+
Xdata[i] = __float2half(x_data * scale * m);
|
| 606 |
+
mask[i] = m;
|
| 607 |
+
}
|
| 608 |
+
}
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
template <typename T>
|
| 612 |
+
void launch_dropout(T* out,
|
| 613 |
+
const T* bias,
|
| 614 |
+
uint8_t* mask,
|
| 615 |
+
int batch,
|
| 616 |
+
int dim,
|
| 617 |
+
float ratio,
|
| 618 |
+
cudaStream_t stream)
|
| 619 |
+
{
|
| 620 |
+
assert(unroll_factor == 4);
|
| 621 |
+
|
| 622 |
+
int total_count = batch * dim / unroll_factor;
|
| 623 |
+
|
| 624 |
+
dim3 grid_dim = DS_GET_BLOCKS(total_count);
|
| 625 |
+
dim3 block_dim = DS_CUDA_NUM_THREADS;
|
| 626 |
+
|
| 627 |
+
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
|
| 628 |
+
std::pair<uint64_t, uint64_t> seed = TrainingContext::Instance().IncrementOffset(inc);
|
| 629 |
+
|
| 630 |
+
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 631 |
+
total_count, dim, ratio, bias, out, mask, seed);
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
template void launch_dropout(float*,
|
| 635 |
+
const float* bias,
|
| 636 |
+
uint8_t* mask,
|
| 637 |
+
int batch,
|
| 638 |
+
int dim,
|
| 639 |
+
float ratio,
|
| 640 |
+
cudaStream_t stream);
|
| 641 |
+
template void launch_dropout(__half*,
|
| 642 |
+
const __half* bias,
|
| 643 |
+
uint8_t* mask,
|
| 644 |
+
int batch,
|
| 645 |
+
int dim,
|
| 646 |
+
float ratio,
|
| 647 |
+
cudaStream_t stream);
|
| 648 |
+
|
| 649 |
+
__global__ void dropout_kernel(const int N,
|
| 650 |
+
const int dim,
|
| 651 |
+
const float ratio,
|
| 652 |
+
const float* input,
|
| 653 |
+
const float* residual,
|
| 654 |
+
const float* bias,
|
| 655 |
+
float* out,
|
| 656 |
+
uint8_t* mask,
|
| 657 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 658 |
+
{
|
| 659 |
+
const float scale = 1. / (1. - ratio);
|
| 660 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 661 |
+
int tid = threadIdx.x % (dim / unroll_factor);
|
| 662 |
+
|
| 663 |
+
curandStatePhilox4_32_10_t state;
|
| 664 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 665 |
+
|
| 666 |
+
float4* out_cast = reinterpret_cast<float4*>(out);
|
| 667 |
+
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
|
| 668 |
+
|
| 669 |
+
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
|
| 670 |
+
const float4* residual_cast = reinterpret_cast<const float4*>(residual);
|
| 671 |
+
const float4* input_cast = reinterpret_cast<const float4*>(input);
|
| 672 |
+
|
| 673 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 674 |
+
{
|
| 675 |
+
float4 rand = curand_uniform4(&state);
|
| 676 |
+
|
| 677 |
+
uint32_t m_32;
|
| 678 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 679 |
+
|
| 680 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 681 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 682 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 683 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 684 |
+
|
| 685 |
+
float4 out_data;
|
| 686 |
+
float4 b_data = bias_cast[j % (dim / unroll_factor)];
|
| 687 |
+
float4 res_data = residual_cast[j];
|
| 688 |
+
float4 inp_data = input_cast[j];
|
| 689 |
+
|
| 690 |
+
out_data.x = (b_data.x + inp_data.x);
|
| 691 |
+
out_data.y = (b_data.y + inp_data.y);
|
| 692 |
+
out_data.z = (b_data.z + inp_data.z);
|
| 693 |
+
out_data.w = (b_data.w + inp_data.w);
|
| 694 |
+
|
| 695 |
+
out_data.x = out_data.x * scale * m[0];
|
| 696 |
+
out_data.y = out_data.y * scale * m[1];
|
| 697 |
+
out_data.z = out_data.z * scale * m[2];
|
| 698 |
+
out_data.w = out_data.w * scale * m[3];
|
| 699 |
+
|
| 700 |
+
out_data.x += res_data.x;
|
| 701 |
+
out_data.y += res_data.y;
|
| 702 |
+
out_data.z += res_data.z;
|
| 703 |
+
out_data.w += res_data.w;
|
| 704 |
+
|
| 705 |
+
mask_32[j] = m_32;
|
| 706 |
+
out_cast[j] = out_data;
|
| 707 |
+
}
|
| 708 |
+
int high_index =
|
| 709 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 710 |
+
if (N > high_index) {
|
| 711 |
+
float4 rand = curand_uniform4(&state);
|
| 712 |
+
float* rand_data = &(rand.x);
|
| 713 |
+
int k = 0;
|
| 714 |
+
for (int i = high_index; i < N; i++) {
|
| 715 |
+
float x_data = input[i] + bias[i % dim];
|
| 716 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 717 |
+
x_data = x_data * scale * m;
|
| 718 |
+
x_data += residual[i];
|
| 719 |
+
|
| 720 |
+
out[i] = x_data;
|
| 721 |
+
mask[i] = m;
|
| 722 |
+
}
|
| 723 |
+
}
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
__global__ void dropout_kernel(const int N,
|
| 727 |
+
const int dim,
|
| 728 |
+
const float ratio,
|
| 729 |
+
const __half* input,
|
| 730 |
+
const __half* residual,
|
| 731 |
+
const __half* bias,
|
| 732 |
+
__half* out,
|
| 733 |
+
uint8_t* mask,
|
| 734 |
+
std::pair<uint64_t, uint64_t> seed)
|
| 735 |
+
{
|
| 736 |
+
const float scale = 1. / (1. - ratio);
|
| 737 |
+
int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 738 |
+
int tid = threadIdx.x % (dim / unroll_factor);
|
| 739 |
+
|
| 740 |
+
curandStatePhilox4_32_10_t state;
|
| 741 |
+
curand_init(seed.first, idx, seed.second, &state);
|
| 742 |
+
|
| 743 |
+
float2* out_cast = reinterpret_cast<float2*>(out);
|
| 744 |
+
uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask);
|
| 745 |
+
|
| 746 |
+
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
|
| 747 |
+
const float2* residual_cast = reinterpret_cast<const float2*>(residual);
|
| 748 |
+
const float2* input_cast = reinterpret_cast<const float2*>(input);
|
| 749 |
+
|
| 750 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 751 |
+
{
|
| 752 |
+
float4 rand = curand_uniform4(&state);
|
| 753 |
+
|
| 754 |
+
float2 data_f;
|
| 755 |
+
__half2* data_h = reinterpret_cast<__half2*>(&data_f);
|
| 756 |
+
|
| 757 |
+
float2 bias_f;
|
| 758 |
+
__half2* bias_h = reinterpret_cast<__half2*>(&bias_f);
|
| 759 |
+
|
| 760 |
+
float2 residual_f;
|
| 761 |
+
__half2* residual_h = reinterpret_cast<__half2*>(&residual_f);
|
| 762 |
+
|
| 763 |
+
float2 input_f;
|
| 764 |
+
__half2* input_h = reinterpret_cast<__half2*>(&input_f);
|
| 765 |
+
|
| 766 |
+
bias_f = bias_cast[j % (dim / unroll_factor)];
|
| 767 |
+
residual_f = residual_cast[j];
|
| 768 |
+
input_f = input_cast[j];
|
| 769 |
+
|
| 770 |
+
float2 data_h_0 = __half22float2(data_h[0]);
|
| 771 |
+
float2 data_h_1 = __half22float2(data_h[1]);
|
| 772 |
+
|
| 773 |
+
float2 bias_h_0 = __half22float2(bias_h[0]);
|
| 774 |
+
float2 bias_h_1 = __half22float2(bias_h[1]);
|
| 775 |
+
|
| 776 |
+
float2 residual_h_0 = __half22float2(residual_h[0]);
|
| 777 |
+
float2 residual_h_1 = __half22float2(residual_h[1]);
|
| 778 |
+
|
| 779 |
+
float2 input_h_0 = __half22float2(input_h[0]);
|
| 780 |
+
float2 input_h_1 = __half22float2(input_h[1]);
|
| 781 |
+
|
| 782 |
+
data_h_0.x = (bias_h_0.x + input_h_0.x);
|
| 783 |
+
data_h_0.y = (bias_h_0.y + input_h_0.y);
|
| 784 |
+
data_h_1.x = (bias_h_1.x + input_h_1.x);
|
| 785 |
+
data_h_1.y = (bias_h_1.y + input_h_1.y);
|
| 786 |
+
|
| 787 |
+
uint32_t m_32;
|
| 788 |
+
uint8_t* m = (uint8_t*)&m_32;
|
| 789 |
+
|
| 790 |
+
m[0] = (uint8_t)(rand.x > ratio);
|
| 791 |
+
m[1] = (uint8_t)(rand.y > ratio);
|
| 792 |
+
m[2] = (uint8_t)(rand.z > ratio);
|
| 793 |
+
m[3] = (uint8_t)(rand.w > ratio);
|
| 794 |
+
|
| 795 |
+
data_h_0.x = __float2half(data_h_0.x * scale * m[0]);
|
| 796 |
+
data_h_0.y = __float2half(data_h_0.y * scale * m[1]);
|
| 797 |
+
data_h_1.x = __float2half(data_h_1.x * scale * m[2]);
|
| 798 |
+
data_h_1.y = __float2half(data_h_1.y * scale * m[3]);
|
| 799 |
+
|
| 800 |
+
data_h_0.x += residual_h_0.x;
|
| 801 |
+
data_h_0.y += residual_h_0.y;
|
| 802 |
+
data_h_1.x += residual_h_1.x;
|
| 803 |
+
data_h_1.y += residual_h_1.y;
|
| 804 |
+
|
| 805 |
+
float2 result_f;
|
| 806 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 807 |
+
|
| 808 |
+
result_h[0] = __float22half2_rn(data_h_0);
|
| 809 |
+
result_h[1] = __float22half2_rn(data_h_1);
|
| 810 |
+
|
| 811 |
+
out_cast[j] = result_f;
|
| 812 |
+
mask_32[j] = m_32;
|
| 813 |
+
}
|
| 814 |
+
int high_index =
|
| 815 |
+
((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x;
|
| 816 |
+
if (N > high_index) {
|
| 817 |
+
float4 rand = curand_uniform4(&state);
|
| 818 |
+
float* rand_data = &(rand.x);
|
| 819 |
+
int k = 0;
|
| 820 |
+
for (int i = high_index; i < N; i++) {
|
| 821 |
+
float x_data = (float)input[i] + (float)bias[i % dim];
|
| 822 |
+
uint8_t m = (uint8_t)(rand_data[k++] > ratio);
|
| 823 |
+
x_data = x_data * scale * m;
|
| 824 |
+
x_data += (float)residual[i];
|
| 825 |
+
|
| 826 |
+
out[i] = __float2half(x_data);
|
| 827 |
+
mask[i] = m;
|
| 828 |
+
}
|
| 829 |
+
}
|
| 830 |
+
}
|
| 831 |
+
|
| 832 |
+
template <typename T>
|
| 833 |
+
void launch_dropout(T* out,
|
| 834 |
+
const T* input,
|
| 835 |
+
const T* residual,
|
| 836 |
+
const T* bias,
|
| 837 |
+
uint8_t* mask,
|
| 838 |
+
int batch,
|
| 839 |
+
int dim,
|
| 840 |
+
float ratio,
|
| 841 |
+
cudaStream_t stream)
|
| 842 |
+
{
|
| 843 |
+
assert(unroll_factor == 4);
|
| 844 |
+
|
| 845 |
+
int total_count = batch * dim / unroll_factor;
|
| 846 |
+
dim3 grid_dim = DS_GET_BLOCKS(total_count);
|
| 847 |
+
dim3 block_dim = DS_CUDA_NUM_THREADS;
|
| 848 |
+
|
| 849 |
+
uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x;
|
| 850 |
+
std::pair<uint64_t, uint64_t> seed = TrainingContext::Instance().IncrementOffset(inc);
|
| 851 |
+
|
| 852 |
+
dropout_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 853 |
+
total_count, dim, ratio, input, residual, bias, out, mask, seed);
|
| 854 |
+
}
|
| 855 |
+
|
| 856 |
+
template void launch_dropout(float*,
|
| 857 |
+
const float*,
|
| 858 |
+
const float* residual,
|
| 859 |
+
const float* bias,
|
| 860 |
+
uint8_t* mask,
|
| 861 |
+
int batch,
|
| 862 |
+
int dim,
|
| 863 |
+
float ratio,
|
| 864 |
+
cudaStream_t stream);
|
| 865 |
+
template void launch_dropout(__half*,
|
| 866 |
+
const __half*,
|
| 867 |
+
const __half* residual,
|
| 868 |
+
const __half* bias,
|
| 869 |
+
uint8_t* mask,
|
| 870 |
+
int batch,
|
| 871 |
+
int dim,
|
| 872 |
+
float ratio,
|
| 873 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp
ADDED
|
@@ -0,0 +1,1055 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
#include <cublas_v2.h>
|
| 9 |
+
#include <cuda_fp16.h>
|
| 10 |
+
#include <cuda_runtime.h>
|
| 11 |
+
#include <type_traits>
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include <vector>
|
| 14 |
+
#include "Timer.h"
|
| 15 |
+
#include "context.h"
|
| 16 |
+
#include "cublas_wrappers.h"
|
| 17 |
+
#include "custom_cuda_layers.h"
|
| 18 |
+
#include "ds_transformer_cuda.h"
|
| 19 |
+
|
| 20 |
+
static std::unordered_map<int, std::shared_ptr<void>> s_transformer_layers;
|
| 21 |
+
|
| 22 |
+
const int init_seq_length = 128;
|
| 23 |
+
|
| 24 |
+
// C++ interface
|
| 25 |
+
|
| 26 |
+
template <typename T>
|
| 27 |
+
unsigned get_workspace_size(unsigned maxBatchSize,
|
| 28 |
+
unsigned seq_len,
|
| 29 |
+
unsigned hidden_size,
|
| 30 |
+
unsigned intermediate_size,
|
| 31 |
+
unsigned heads,
|
| 32 |
+
bool training,
|
| 33 |
+
bool gelu_checkpoint)
|
| 34 |
+
{
|
| 35 |
+
unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size);
|
| 36 |
+
if (training) {
|
| 37 |
+
workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size);
|
| 38 |
+
workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size),
|
| 39 |
+
2 * (size_t(maxBatchSize) * heads * seq_len * seq_len)));
|
| 40 |
+
if (gelu_checkpoint)
|
| 41 |
+
workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size);
|
| 42 |
+
}
|
| 43 |
+
return workSpacesize; // * sizeof(T);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4.
|
| 47 |
+
#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
|
| 48 |
+
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
|
| 49 |
+
#define CHECK_INPUT(x) \
|
| 50 |
+
CHECK_CUDA(x); \
|
| 51 |
+
CHECK_CONTIGUOUS(x)
|
| 52 |
+
|
| 53 |
+
template <typename T>
|
| 54 |
+
BertTransformerLayer<T>::BertTransformerLayer(unsigned layer_id,
|
| 55 |
+
unsigned batch_size,
|
| 56 |
+
unsigned hidden_size,
|
| 57 |
+
unsigned num_heads,
|
| 58 |
+
unsigned intermediate_size,
|
| 59 |
+
unsigned seq_length,
|
| 60 |
+
float attn_prob_dropout_ratio,
|
| 61 |
+
float hidden_output_dropout_ratio,
|
| 62 |
+
float layer_norm_eps,
|
| 63 |
+
bool pre_or_postLayerNorm,
|
| 64 |
+
const std::vector<std::array<int, 3>>& gemm_algos,
|
| 65 |
+
bool attn_dropout_checkpoint,
|
| 66 |
+
bool normalize_invertible,
|
| 67 |
+
bool gelu_checkpoint,
|
| 68 |
+
bool stochastic_mode)
|
| 69 |
+
: _layer_id(layer_id),
|
| 70 |
+
_batch_size(batch_size),
|
| 71 |
+
_hidden_size(hidden_size),
|
| 72 |
+
_heads(num_heads),
|
| 73 |
+
_intermediate_size(intermediate_size),
|
| 74 |
+
_seq_length(seq_length),
|
| 75 |
+
_training(true),
|
| 76 |
+
_pre_or_postLayerNorm(pre_or_postLayerNorm),
|
| 77 |
+
_attn_dropout_checkpoint(attn_dropout_checkpoint),
|
| 78 |
+
_normalize_invertible(normalize_invertible),
|
| 79 |
+
_gelu_checkpoint(gelu_checkpoint),
|
| 80 |
+
_stochastic_mode(stochastic_mode),
|
| 81 |
+
_stream(TrainingContext::Instance().GetCurrentStream()),
|
| 82 |
+
_cublasHandle(TrainingContext::Instance().GetCublasHandle()),
|
| 83 |
+
_qkv_linear(typename FeedForward<T>::Config(batch_size * seq_length,
|
| 84 |
+
3 * hidden_size,
|
| 85 |
+
hidden_size,
|
| 86 |
+
gemm_algos[0])),
|
| 87 |
+
_attn_out_linear(typename FeedForward<T>::Config(batch_size * seq_length,
|
| 88 |
+
hidden_size,
|
| 89 |
+
hidden_size,
|
| 90 |
+
gemm_algos[0])),
|
| 91 |
+
_attn_layer_norm(typename Normalize_Layer<T>::Config(batch_size,
|
| 92 |
+
seq_length,
|
| 93 |
+
hidden_size,
|
| 94 |
+
layer_norm_eps,
|
| 95 |
+
true,
|
| 96 |
+
!normalize_invertible)),
|
| 97 |
+
_layer_norm(typename Normalize_Layer<T>::Config(batch_size,
|
| 98 |
+
seq_length,
|
| 99 |
+
hidden_size,
|
| 100 |
+
layer_norm_eps,
|
| 101 |
+
true,
|
| 102 |
+
!normalize_invertible)),
|
| 103 |
+
_ff1(typename FeedForward<T>::Config(batch_size * seq_length,
|
| 104 |
+
_intermediate_size,
|
| 105 |
+
hidden_size,
|
| 106 |
+
gemm_algos[1])),
|
| 107 |
+
_ff2(typename FeedForward<T>::Config(batch_size * seq_length,
|
| 108 |
+
hidden_size,
|
| 109 |
+
_intermediate_size,
|
| 110 |
+
gemm_algos[2])),
|
| 111 |
+
_softmax(typename Softmax<T>::Config(batch_size, num_heads, seq_length)),
|
| 112 |
+
_gelu(typename Gelu<T>::Config(_intermediate_size)),
|
| 113 |
+
_attn_prob_dropout(typename Dropout<T>::Config(attn_prob_dropout_ratio, _seq_length)),
|
| 114 |
+
_attn_output_dropout(typename Dropout<T>::Config(hidden_output_dropout_ratio, _hidden_size)),
|
| 115 |
+
_layer_output_dropout(typename Dropout<T>::Config(hidden_output_dropout_ratio, _hidden_size)),
|
| 116 |
+
_attn_scores(typename StridedBatchGemm<T>::Config(_batch_size * _heads,
|
| 117 |
+
_seq_length,
|
| 118 |
+
_seq_length,
|
| 119 |
+
_hidden_size / _heads,
|
| 120 |
+
(T(1.0) / T(sqrt(_hidden_size / _heads))),
|
| 121 |
+
T(0.0),
|
| 122 |
+
CUBLAS_OP_T,
|
| 123 |
+
CUBLAS_OP_N,
|
| 124 |
+
gemm_algos[3])),
|
| 125 |
+
_attn_context(typename StridedBatchGemm<T>::Config(_batch_size * _heads,
|
| 126 |
+
_hidden_size / _heads,
|
| 127 |
+
_seq_length,
|
| 128 |
+
_seq_length,
|
| 129 |
+
T(1.0),
|
| 130 |
+
T(0.0),
|
| 131 |
+
CUBLAS_OP_N,
|
| 132 |
+
CUBLAS_OP_N,
|
| 133 |
+
gemm_algos[4]))
|
| 134 |
+
{
|
| 135 |
+
assert(_hidden_size % _heads == 0);
|
| 136 |
+
|
| 137 |
+
Initialize();
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template <typename T>
|
| 141 |
+
BertTransformerLayer<T>::~BertTransformerLayer()
|
| 142 |
+
{
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <typename T>
|
| 146 |
+
void BertTransformerLayer<T>::Initialize()
|
| 147 |
+
{
|
| 148 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 149 |
+
if (std::is_same<T, __half>::value) cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH);
|
| 150 |
+
#endif
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template <typename T>
|
| 154 |
+
void BertTransformerLayer<T>::Forward(unsigned bsz,
|
| 155 |
+
const T* input_ptr,
|
| 156 |
+
const T* input_mask_ptr,
|
| 157 |
+
const T* attn_qkvw_ptr,
|
| 158 |
+
const T* attn_qkvb_ptr,
|
| 159 |
+
const T* attn_ow_ptr,
|
| 160 |
+
const T* attn_ob_ptr,
|
| 161 |
+
const T* attn_nw_ptr,
|
| 162 |
+
const T* attn_nb_ptr,
|
| 163 |
+
const T* inter_w_ptr,
|
| 164 |
+
const T* inter_b_ptr,
|
| 165 |
+
const T* output_w_ptr,
|
| 166 |
+
const T* output_b_ptr,
|
| 167 |
+
const T* norm_w_ptr,
|
| 168 |
+
const T* norm_b_ptr,
|
| 169 |
+
T* out_ptr,
|
| 170 |
+
T* inp_norm_ptr,
|
| 171 |
+
T* q_tf_ptr,
|
| 172 |
+
T* k_tf_ptr,
|
| 173 |
+
T* v_tf_ptr,
|
| 174 |
+
T* soft_out_ptr,
|
| 175 |
+
T* ctx_bufB_ptr,
|
| 176 |
+
T* attn_o_inp_ptr,
|
| 177 |
+
T* add_res_ptr,
|
| 178 |
+
T* ff1_inp_ptr,
|
| 179 |
+
T* gelu_inp_ptr,
|
| 180 |
+
T* ff2_inp_ptr)
|
| 181 |
+
{
|
| 182 |
+
cublasSetStream(_cublasHandle, _stream);
|
| 183 |
+
|
| 184 |
+
if (!_stochastic_mode) cudaStreamSynchronize(_stream);
|
| 185 |
+
|
| 186 |
+
T* workspace = static_cast<T*>(TrainingContext::Instance().GetWorkSpace());
|
| 187 |
+
size_t small_buf_size = bsz * _seq_length * _hidden_size;
|
| 188 |
+
T* buf_0 = workspace;
|
| 189 |
+
T* buf_1 = buf_0 + small_buf_size;
|
| 190 |
+
T* buf_2 = buf_1;
|
| 191 |
+
|
| 192 |
+
if (_normalize_invertible) {
|
| 193 |
+
add_res_ptr = buf_1 + 3 * small_buf_size;
|
| 194 |
+
buf_2 = add_res_ptr;
|
| 195 |
+
}
|
| 196 |
+
if (_gelu_checkpoint) buf_2 += small_buf_size;
|
| 197 |
+
if (_attn_dropout_checkpoint)
|
| 198 |
+
ctx_bufB_ptr =
|
| 199 |
+
(_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size)
|
| 200 |
+
: (buf_1 + 4 * small_buf_size));
|
| 201 |
+
|
| 202 |
+
int bsz_seq = bsz * _seq_length;
|
| 203 |
+
|
| 204 |
+
if (_pre_or_postLayerNorm) {
|
| 205 |
+
if (_layer_norm.UseMean())
|
| 206 |
+
_layer_norm.ForwardCheckpoint(
|
| 207 |
+
bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true);
|
| 208 |
+
|
| 209 |
+
else
|
| 210 |
+
_layer_norm.Forward(
|
| 211 |
+
bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
if (_pre_or_postLayerNorm)
|
| 215 |
+
_qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle);
|
| 216 |
+
else
|
| 217 |
+
_qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle);
|
| 218 |
+
|
| 219 |
+
launch_bias_add_transform_0213<T>(
|
| 220 |
+
q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3);
|
| 221 |
+
|
| 222 |
+
int bsz_heads = bsz * _heads;
|
| 223 |
+
|
| 224 |
+
// attention scores
|
| 225 |
+
_attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle);
|
| 226 |
+
|
| 227 |
+
// Softmax + Mask
|
| 228 |
+
_softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream);
|
| 229 |
+
|
| 230 |
+
// attn prob dropout.
|
| 231 |
+
_attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream);
|
| 232 |
+
|
| 233 |
+
// attention context
|
| 234 |
+
_attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle);
|
| 235 |
+
|
| 236 |
+
launch_transform4d_0213<T>(
|
| 237 |
+
attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1);
|
| 238 |
+
|
| 239 |
+
if (_pre_or_postLayerNorm)
|
| 240 |
+
_attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle);
|
| 241 |
+
else
|
| 242 |
+
_attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle);
|
| 243 |
+
|
| 244 |
+
// attn output dropout.
|
| 245 |
+
if (_pre_or_postLayerNorm)
|
| 246 |
+
_attn_output_dropout.ForwardWithBias(
|
| 247 |
+
bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream);
|
| 248 |
+
else
|
| 249 |
+
_attn_output_dropout.ForwardWithBias(
|
| 250 |
+
bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream);
|
| 251 |
+
|
| 252 |
+
if (_pre_or_postLayerNorm) {
|
| 253 |
+
if (_attn_layer_norm.UseMean())
|
| 254 |
+
_attn_layer_norm.ForwardCheckpoint(
|
| 255 |
+
bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true);
|
| 256 |
+
else
|
| 257 |
+
_attn_layer_norm.Forward(
|
| 258 |
+
bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true);
|
| 259 |
+
} else {
|
| 260 |
+
if (_attn_layer_norm.UseMean())
|
| 261 |
+
_attn_layer_norm.ForwardCheckpoint(
|
| 262 |
+
bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true);
|
| 263 |
+
else
|
| 264 |
+
_attn_layer_norm.Forward(
|
| 265 |
+
bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true);
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
_ff1.Forward(bsz_seq,
|
| 269 |
+
ff1_inp_ptr,
|
| 270 |
+
inter_w_ptr,
|
| 271 |
+
(_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr),
|
| 272 |
+
_cublasHandle);
|
| 273 |
+
|
| 274 |
+
_gelu.ForwardWithBiasAdd(bsz_seq,
|
| 275 |
+
(_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr),
|
| 276 |
+
inter_b_ptr,
|
| 277 |
+
(_gelu_checkpoint ? buf_2 : ff2_inp_ptr),
|
| 278 |
+
_stream);
|
| 279 |
+
|
| 280 |
+
_ff2.Forward(
|
| 281 |
+
bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle);
|
| 282 |
+
|
| 283 |
+
// layer output dropout.
|
| 284 |
+
if (_pre_or_postLayerNorm)
|
| 285 |
+
_layer_output_dropout.ForwardWithBias(
|
| 286 |
+
bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream);
|
| 287 |
+
else
|
| 288 |
+
_layer_output_dropout.ForwardWithBias(
|
| 289 |
+
bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream);
|
| 290 |
+
|
| 291 |
+
if (!_pre_or_postLayerNorm) {
|
| 292 |
+
if (_layer_norm.UseMean())
|
| 293 |
+
_layer_norm.ForwardCheckpoint(
|
| 294 |
+
bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true);
|
| 295 |
+
else
|
| 296 |
+
_layer_norm.Forward(
|
| 297 |
+
bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true);
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
template <typename T>
|
| 302 |
+
void BertTransformerLayer<T>::Backward(unsigned bsz,
|
| 303 |
+
const T* grad_output_ptr,
|
| 304 |
+
const T* input_ptr,
|
| 305 |
+
const T* output_ptr,
|
| 306 |
+
const T* inp_norm_ptr,
|
| 307 |
+
const T* q_tf_ptr,
|
| 308 |
+
const T* k_tf_ptr,
|
| 309 |
+
const T* v_tf_ptr,
|
| 310 |
+
const T* soft_out_ptr,
|
| 311 |
+
const T* ctx_bufB_ptr,
|
| 312 |
+
const T* attn_o_inp_ptr,
|
| 313 |
+
const T* add_res_ptr,
|
| 314 |
+
const T* ff1_inp_ptr,
|
| 315 |
+
const T* gelu_inp_ptr,
|
| 316 |
+
const T* ff2_inp_ptr,
|
| 317 |
+
const T* input_mask_ptr,
|
| 318 |
+
const T* attn_qkvw_ptr,
|
| 319 |
+
const T* attn_ow_ptr,
|
| 320 |
+
const T* attn_nw_ptr,
|
| 321 |
+
const T* attn_nb_ptr,
|
| 322 |
+
const T* inter_w_ptr,
|
| 323 |
+
const T* inter_b_ptr,
|
| 324 |
+
const T* output_w_ptr,
|
| 325 |
+
const T* norm_w_ptr,
|
| 326 |
+
const T* norm_b_ptr,
|
| 327 |
+
|
| 328 |
+
T* grad_input_ptr,
|
| 329 |
+
T* grad_attn_qkvw_ptr,
|
| 330 |
+
T* grad_attn_qkvb_ptr,
|
| 331 |
+
T* grad_attn_ow_ptr,
|
| 332 |
+
T* grad_attn_ob_ptr,
|
| 333 |
+
T* grad_attn_nw_ptr,
|
| 334 |
+
T* grad_attn_nb_ptr,
|
| 335 |
+
T* grad_inter_w_ptr,
|
| 336 |
+
T* grad_inter_b_ptr,
|
| 337 |
+
T* grad_output_w_ptr,
|
| 338 |
+
T* grad_output_b_ptr,
|
| 339 |
+
T* grad_norm_w_ptr,
|
| 340 |
+
T* grad_norm_b_ptr)
|
| 341 |
+
{
|
| 342 |
+
cublasSetStream(_cublasHandle, _stream);
|
| 343 |
+
|
| 344 |
+
if (!_stochastic_mode) cudaStreamSynchronize(_stream);
|
| 345 |
+
|
| 346 |
+
T* workspace = static_cast<T*>(TrainingContext::Instance().GetWorkSpace());
|
| 347 |
+
size_t small_buf_size = bsz * _seq_length * _hidden_size;
|
| 348 |
+
T* buf_0 = workspace;
|
| 349 |
+
T* buf_1 = buf_0 + small_buf_size;
|
| 350 |
+
T* buf_2 = buf_1 + small_buf_size;
|
| 351 |
+
T* buf_3 = buf_2 + small_buf_size;
|
| 352 |
+
|
| 353 |
+
T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size)
|
| 354 |
+
: buf_3 + small_buf_size);
|
| 355 |
+
T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads);
|
| 356 |
+
|
| 357 |
+
cudaStream_t streams[2] = {_stream, _stream};
|
| 358 |
+
|
| 359 |
+
int bsz_seq = bsz * _seq_length;
|
| 360 |
+
int bsz_heads = bsz * _heads;
|
| 361 |
+
|
| 362 |
+
if (!_pre_or_postLayerNorm) {
|
| 363 |
+
if (_layer_norm.UseMean())
|
| 364 |
+
_layer_norm.Backward(bsz_seq,
|
| 365 |
+
grad_output_ptr,
|
| 366 |
+
norm_w_ptr,
|
| 367 |
+
grad_norm_w_ptr,
|
| 368 |
+
grad_norm_b_ptr,
|
| 369 |
+
streams,
|
| 370 |
+
buf_1,
|
| 371 |
+
inp_norm_ptr);
|
| 372 |
+
|
| 373 |
+
else
|
| 374 |
+
_layer_norm.Backward(bsz_seq,
|
| 375 |
+
grad_output_ptr,
|
| 376 |
+
norm_w_ptr,
|
| 377 |
+
norm_b_ptr,
|
| 378 |
+
grad_norm_w_ptr,
|
| 379 |
+
grad_norm_b_ptr,
|
| 380 |
+
streams,
|
| 381 |
+
buf_1,
|
| 382 |
+
output_ptr);
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
if (_pre_or_postLayerNorm)
|
| 386 |
+
_layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream);
|
| 387 |
+
else
|
| 388 |
+
_layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream);
|
| 389 |
+
|
| 390 |
+
const T* layer_dropout_buf = _layer_output_dropout.HasDropout()
|
| 391 |
+
? buf_0
|
| 392 |
+
: (_pre_or_postLayerNorm ? grad_output_ptr : buf_1);
|
| 393 |
+
|
| 394 |
+
if (_gelu_checkpoint)
|
| 395 |
+
_gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream);
|
| 396 |
+
_ff2.Backward(bsz_seq,
|
| 397 |
+
layer_dropout_buf,
|
| 398 |
+
(_gelu_checkpoint ? buf_2 : ff2_inp_ptr),
|
| 399 |
+
output_w_ptr,
|
| 400 |
+
grad_output_w_ptr,
|
| 401 |
+
grad_output_b_ptr,
|
| 402 |
+
_cublasHandle,
|
| 403 |
+
_stream,
|
| 404 |
+
ff2_buf);
|
| 405 |
+
|
| 406 |
+
_gelu.Backward(
|
| 407 |
+
bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream);
|
| 408 |
+
|
| 409 |
+
_ff1.Backward(bsz_seq,
|
| 410 |
+
ff2_buf,
|
| 411 |
+
ff1_inp_ptr,
|
| 412 |
+
inter_w_ptr,
|
| 413 |
+
grad_inter_w_ptr,
|
| 414 |
+
grad_inter_b_ptr,
|
| 415 |
+
_cublasHandle,
|
| 416 |
+
_stream,
|
| 417 |
+
buf_3);
|
| 418 |
+
|
| 419 |
+
if (!_pre_or_postLayerNorm)
|
| 420 |
+
launch_fused_add2<T>(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream);
|
| 421 |
+
|
| 422 |
+
if (_pre_or_postLayerNorm) {
|
| 423 |
+
if (_attn_layer_norm.UseMean())
|
| 424 |
+
_attn_layer_norm.BackwardFusedAdd(bsz_seq,
|
| 425 |
+
buf_3,
|
| 426 |
+
grad_output_ptr,
|
| 427 |
+
attn_nw_ptr,
|
| 428 |
+
grad_attn_nw_ptr,
|
| 429 |
+
grad_attn_nb_ptr,
|
| 430 |
+
streams,
|
| 431 |
+
buf_0,
|
| 432 |
+
add_res_ptr);
|
| 433 |
+
|
| 434 |
+
else
|
| 435 |
+
_attn_layer_norm.BackwardFusedAdd(bsz_seq,
|
| 436 |
+
buf_3,
|
| 437 |
+
grad_output_ptr,
|
| 438 |
+
attn_nw_ptr,
|
| 439 |
+
attn_nb_ptr,
|
| 440 |
+
grad_attn_nw_ptr,
|
| 441 |
+
grad_attn_nb_ptr,
|
| 442 |
+
streams,
|
| 443 |
+
buf_0,
|
| 444 |
+
ff1_inp_ptr);
|
| 445 |
+
} else {
|
| 446 |
+
if (_attn_layer_norm.UseMean())
|
| 447 |
+
_attn_layer_norm.Backward(bsz_seq,
|
| 448 |
+
buf_2,
|
| 449 |
+
attn_nw_ptr,
|
| 450 |
+
grad_attn_nw_ptr,
|
| 451 |
+
grad_attn_nb_ptr,
|
| 452 |
+
streams,
|
| 453 |
+
buf_0,
|
| 454 |
+
add_res_ptr);
|
| 455 |
+
|
| 456 |
+
else
|
| 457 |
+
_attn_layer_norm.Backward(bsz_seq,
|
| 458 |
+
buf_2,
|
| 459 |
+
attn_nw_ptr,
|
| 460 |
+
attn_nb_ptr,
|
| 461 |
+
grad_attn_nw_ptr,
|
| 462 |
+
grad_attn_nb_ptr,
|
| 463 |
+
streams,
|
| 464 |
+
buf_0,
|
| 465 |
+
ff1_inp_ptr);
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
_attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream);
|
| 469 |
+
|
| 470 |
+
T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0;
|
| 471 |
+
|
| 472 |
+
_attn_out_linear.Backward(bsz_seq,
|
| 473 |
+
attn_output_dropout_buf,
|
| 474 |
+
attn_o_inp_ptr,
|
| 475 |
+
attn_ow_ptr,
|
| 476 |
+
grad_attn_ow_ptr,
|
| 477 |
+
grad_attn_ob_ptr,
|
| 478 |
+
_cublasHandle,
|
| 479 |
+
_stream,
|
| 480 |
+
buf_1);
|
| 481 |
+
|
| 482 |
+
launch_transform_0213<T>(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream);
|
| 483 |
+
|
| 484 |
+
if (_attn_prob_dropout.HasDropout()) {
|
| 485 |
+
if (_attn_dropout_checkpoint)
|
| 486 |
+
_attn_prob_dropout.Forward(
|
| 487 |
+
bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true);
|
| 488 |
+
|
| 489 |
+
_attn_context.Backward(bsz_heads,
|
| 490 |
+
buf_2,
|
| 491 |
+
v_tf_ptr,
|
| 492 |
+
(_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr),
|
| 493 |
+
_cublasHandle,
|
| 494 |
+
buf_3,
|
| 495 |
+
ff2_buf);
|
| 496 |
+
} else
|
| 497 |
+
_attn_context.Backward(
|
| 498 |
+
bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf);
|
| 499 |
+
|
| 500 |
+
_attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream);
|
| 501 |
+
|
| 502 |
+
_softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream);
|
| 503 |
+
|
| 504 |
+
_attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1);
|
| 505 |
+
|
| 506 |
+
launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3);
|
| 507 |
+
|
| 508 |
+
if (_pre_or_postLayerNorm)
|
| 509 |
+
_qkv_linear.Backward(bsz_seq,
|
| 510 |
+
ff2_buf,
|
| 511 |
+
inp_norm_ptr,
|
| 512 |
+
attn_qkvw_ptr,
|
| 513 |
+
grad_attn_qkvw_ptr,
|
| 514 |
+
grad_attn_qkvb_ptr,
|
| 515 |
+
_cublasHandle,
|
| 516 |
+
_stream,
|
| 517 |
+
buf_2);
|
| 518 |
+
else
|
| 519 |
+
_qkv_linear.Backward(bsz_seq,
|
| 520 |
+
ff2_buf,
|
| 521 |
+
input_ptr,
|
| 522 |
+
attn_qkvw_ptr,
|
| 523 |
+
grad_attn_qkvw_ptr,
|
| 524 |
+
grad_attn_qkvb_ptr,
|
| 525 |
+
_cublasHandle,
|
| 526 |
+
_stream,
|
| 527 |
+
buf_2);
|
| 528 |
+
|
| 529 |
+
if (_pre_or_postLayerNorm) {
|
| 530 |
+
if (_layer_norm.UseMean())
|
| 531 |
+
_layer_norm.BackwardFusedAdd(bsz_seq,
|
| 532 |
+
buf_2,
|
| 533 |
+
buf_0,
|
| 534 |
+
norm_w_ptr,
|
| 535 |
+
grad_norm_w_ptr,
|
| 536 |
+
grad_norm_b_ptr,
|
| 537 |
+
streams,
|
| 538 |
+
grad_input_ptr,
|
| 539 |
+
input_ptr);
|
| 540 |
+
|
| 541 |
+
else
|
| 542 |
+
_layer_norm.BackwardFusedAdd(bsz_seq,
|
| 543 |
+
buf_2,
|
| 544 |
+
buf_0,
|
| 545 |
+
norm_w_ptr,
|
| 546 |
+
norm_b_ptr,
|
| 547 |
+
grad_norm_w_ptr,
|
| 548 |
+
grad_norm_b_ptr,
|
| 549 |
+
streams,
|
| 550 |
+
grad_input_ptr,
|
| 551 |
+
inp_norm_ptr);
|
| 552 |
+
} else
|
| 553 |
+
launch_fused_add2<T>(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream);
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
template <typename T>
|
| 557 |
+
void BertTransformerLayer<T>::SetTrainingMode(bool training)
|
| 558 |
+
{
|
| 559 |
+
// Dropout will be skipped when not in training model.
|
| 560 |
+
_attn_prob_dropout.SetTrainingMode(training);
|
| 561 |
+
_attn_output_dropout.SetTrainingMode(training);
|
| 562 |
+
_layer_output_dropout.SetTrainingMode(training);
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
template <typename T>
|
| 566 |
+
void BertTransformerLayer<T>::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr,
|
| 567 |
+
uint8_t* attn_output_dropout_mask_ptr,
|
| 568 |
+
uint8_t* layer_output_dropout_mask_ptr,
|
| 569 |
+
T* attn_layer_norm_var,
|
| 570 |
+
T* attn_layer_norm_mean,
|
| 571 |
+
T* layer_norm_var,
|
| 572 |
+
T* layer_norm_mean)
|
| 573 |
+
{
|
| 574 |
+
_attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr);
|
| 575 |
+
_attn_output_dropout.SetMask(attn_output_dropout_mask_ptr);
|
| 576 |
+
_layer_output_dropout.SetMask(layer_output_dropout_mask_ptr);
|
| 577 |
+
|
| 578 |
+
_attn_layer_norm.SetVar(attn_layer_norm_var);
|
| 579 |
+
_attn_layer_norm.SetMean(attn_layer_norm_mean);
|
| 580 |
+
_layer_norm.SetVar(layer_norm_var);
|
| 581 |
+
_layer_norm.SetMean(layer_norm_mean);
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
template <typename T>
|
| 585 |
+
void BertTransformerLayer<T>::SetSeqLength(unsigned seq_len)
|
| 586 |
+
{
|
| 587 |
+
_seq_length = seq_len;
|
| 588 |
+
|
| 589 |
+
_softmax.SetSeqLength(_seq_length);
|
| 590 |
+
_attn_prob_dropout.SetDimension(_seq_length);
|
| 591 |
+
_attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads);
|
| 592 |
+
_attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
template <typename T>
|
| 596 |
+
int create_transformer_layer(unsigned layer_id,
|
| 597 |
+
unsigned batch_size,
|
| 598 |
+
unsigned hidden_dim,
|
| 599 |
+
unsigned num_heads,
|
| 600 |
+
unsigned intermediate_size,
|
| 601 |
+
float attn_dropout_ratio,
|
| 602 |
+
float hidden_dropout_ratio,
|
| 603 |
+
float layer_norm_eps,
|
| 604 |
+
int seed,
|
| 605 |
+
bool pre_or_postLayerNorm,
|
| 606 |
+
bool test_gemm,
|
| 607 |
+
bool attn_dropout_checkpoint,
|
| 608 |
+
bool normalize_invertible,
|
| 609 |
+
bool gelu_checkpoint,
|
| 610 |
+
bool stochastic_mode)
|
| 611 |
+
{
|
| 612 |
+
TrainingContext::Instance().SetSeed(seed);
|
| 613 |
+
TrainingContext::Instance().TestGemmFP16(
|
| 614 |
+
test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads);
|
| 615 |
+
|
| 616 |
+
auto layer =
|
| 617 |
+
std::make_shared<BertTransformerLayer<T>>(layer_id,
|
| 618 |
+
batch_size,
|
| 619 |
+
hidden_dim,
|
| 620 |
+
num_heads,
|
| 621 |
+
intermediate_size,
|
| 622 |
+
init_seq_length,
|
| 623 |
+
attn_dropout_ratio,
|
| 624 |
+
hidden_dropout_ratio,
|
| 625 |
+
layer_norm_eps,
|
| 626 |
+
pre_or_postLayerNorm,
|
| 627 |
+
TrainingContext::Instance().GetGemmAlgos(),
|
| 628 |
+
attn_dropout_checkpoint,
|
| 629 |
+
normalize_invertible,
|
| 630 |
+
gelu_checkpoint,
|
| 631 |
+
stochastic_mode);
|
| 632 |
+
|
| 633 |
+
s_transformer_layers[layer_id] = layer;
|
| 634 |
+
|
| 635 |
+
std::string dtype = (std::is_same<T, __half>::value) ? "half" : "float";
|
| 636 |
+
|
| 637 |
+
std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]."
|
| 638 |
+
<< std::endl;
|
| 639 |
+
|
| 640 |
+
return 0;
|
| 641 |
+
}
|
| 642 |
+
|
| 643 |
+
template <typename T>
|
| 644 |
+
std::vector<torch::Tensor> ds_transformer_forward(unsigned layer_id,
|
| 645 |
+
const torch::Tensor& input,
|
| 646 |
+
const torch::Tensor& input_mask,
|
| 647 |
+
const torch::Tensor& attn_qkvw,
|
| 648 |
+
const torch::Tensor& attn_qkvb,
|
| 649 |
+
const torch::Tensor& attn_ow,
|
| 650 |
+
const torch::Tensor& attn_ob,
|
| 651 |
+
const torch::Tensor& attn_nw,
|
| 652 |
+
const torch::Tensor& attn_nb,
|
| 653 |
+
const torch::Tensor& inter_w,
|
| 654 |
+
const torch::Tensor& inter_b,
|
| 655 |
+
const torch::Tensor& output_w,
|
| 656 |
+
const torch::Tensor& output_b,
|
| 657 |
+
const torch::Tensor& norm_w,
|
| 658 |
+
const torch::Tensor& norm_b,
|
| 659 |
+
bool training_mode,
|
| 660 |
+
bool prelayernorm,
|
| 661 |
+
bool attn_dropout_checkpoint,
|
| 662 |
+
bool normalize_invertible,
|
| 663 |
+
bool gelu_checkpoint)
|
| 664 |
+
{
|
| 665 |
+
CHECK_INPUT(input);
|
| 666 |
+
CHECK_INPUT(input_mask);
|
| 667 |
+
CHECK_INPUT(attn_qkvw);
|
| 668 |
+
CHECK_INPUT(attn_qkvb);
|
| 669 |
+
CHECK_INPUT(attn_ow);
|
| 670 |
+
CHECK_INPUT(attn_ob);
|
| 671 |
+
CHECK_INPUT(attn_nw);
|
| 672 |
+
CHECK_INPUT(attn_nb);
|
| 673 |
+
CHECK_INPUT(inter_w);
|
| 674 |
+
CHECK_INPUT(inter_b);
|
| 675 |
+
CHECK_INPUT(output_w);
|
| 676 |
+
CHECK_INPUT(output_b);
|
| 677 |
+
CHECK_INPUT(norm_w);
|
| 678 |
+
CHECK_INPUT(norm_b);
|
| 679 |
+
|
| 680 |
+
unsigned bsz = input.size(0);
|
| 681 |
+
|
| 682 |
+
const T* input_ptr = (const T*)input.data_ptr();
|
| 683 |
+
const T* input_mask_ptr = (const T*)input_mask.data_ptr();
|
| 684 |
+
const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr();
|
| 685 |
+
const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr();
|
| 686 |
+
const T* attn_ow_ptr = (const T*)attn_ow.data_ptr();
|
| 687 |
+
const T* attn_ob_ptr = (const T*)attn_ob.data_ptr();
|
| 688 |
+
const T* attn_nw_ptr = (const T*)attn_nw.data_ptr();
|
| 689 |
+
const T* attn_nb_ptr = (const T*)attn_nb.data_ptr();
|
| 690 |
+
const T* inter_w_ptr = (const T*)inter_w.data_ptr();
|
| 691 |
+
const T* inter_b_ptr = (const T*)inter_b.data_ptr();
|
| 692 |
+
const T* output_w_ptr = (const T*)output_w.data_ptr();
|
| 693 |
+
const T* output_b_ptr = (const T*)output_b.data_ptr();
|
| 694 |
+
const T* norm_w_ptr = (const T*)norm_w.data_ptr();
|
| 695 |
+
const T* norm_b_ptr = (const T*)norm_b.data_ptr();
|
| 696 |
+
|
| 697 |
+
auto output = torch::empty_like(input);
|
| 698 |
+
T* out_ptr = (T*)output.data_ptr();
|
| 699 |
+
|
| 700 |
+
auto options = torch::TensorOptions()
|
| 701 |
+
.dtype(input.options().dtype())
|
| 702 |
+
.layout(torch::kStrided)
|
| 703 |
+
.device(torch::kCUDA)
|
| 704 |
+
.requires_grad(true);
|
| 705 |
+
|
| 706 |
+
auto uint8_options = torch::TensorOptions()
|
| 707 |
+
.dtype(torch::kInt8)
|
| 708 |
+
.layout(torch::kStrided)
|
| 709 |
+
.device(torch::kCUDA)
|
| 710 |
+
.requires_grad(false);
|
| 711 |
+
|
| 712 |
+
std::shared_ptr<BertTransformerLayer<T>> layer =
|
| 713 |
+
std::static_pointer_cast<BertTransformerLayer<T>>(s_transformer_layers[layer_id]);
|
| 714 |
+
|
| 715 |
+
unsigned seq_len = layer->GetSeqLength();
|
| 716 |
+
if (input.size(1) != seq_len) {
|
| 717 |
+
seq_len = input.size(1);
|
| 718 |
+
layer->SetSeqLength(seq_len);
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
auto workspace = torch::empty({get_workspace_size<T>(bsz,
|
| 722 |
+
seq_len,
|
| 723 |
+
layer->GetHiddenSize(),
|
| 724 |
+
layer->GetIntermediateSize(),
|
| 725 |
+
layer->GetNumHeads(),
|
| 726 |
+
layer->IsTrainingMode(),
|
| 727 |
+
layer->GeluCheckpoint())},
|
| 728 |
+
options);
|
| 729 |
+
TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr());
|
| 730 |
+
|
| 731 |
+
auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output);
|
| 732 |
+
auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input));
|
| 733 |
+
auto attn_o_inp = torch::empty_like(input);
|
| 734 |
+
auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options);
|
| 735 |
+
|
| 736 |
+
auto attn_prob_dropout_mask =
|
| 737 |
+
torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options);
|
| 738 |
+
auto attn_output_dropout_mask =
|
| 739 |
+
torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options);
|
| 740 |
+
auto layer_output_dropout_mask =
|
| 741 |
+
torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options);
|
| 742 |
+
|
| 743 |
+
auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options);
|
| 744 |
+
auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options);
|
| 745 |
+
auto layer_norm_var = torch::empty({(bsz * seq_len)}, options);
|
| 746 |
+
auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options);
|
| 747 |
+
|
| 748 |
+
T* inp_norm_ptr = (T*)inp_norm.data_ptr();
|
| 749 |
+
T* add_res_ptr = (T*)add_res.data_ptr();
|
| 750 |
+
T* q_tf_ptr = (T*)qkv_tf.data_ptr();
|
| 751 |
+
T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr();
|
| 752 |
+
T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr();
|
| 753 |
+
T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr();
|
| 754 |
+
|
| 755 |
+
torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options);
|
| 756 |
+
torch::Tensor gelu_inp =
|
| 757 |
+
(gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options));
|
| 758 |
+
auto ff1_inp = torch::empty_like(input);
|
| 759 |
+
T* ff2_inp_ptr = (T*)ff2_inp.data_ptr();
|
| 760 |
+
T* gelu_inp_ptr = (T*)gelu_inp.data_ptr();
|
| 761 |
+
T* ff1_inp_ptr = (T*)ff1_inp.data_ptr();
|
| 762 |
+
|
| 763 |
+
torch::Tensor soft_out =
|
| 764 |
+
torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options);
|
| 765 |
+
torch::Tensor ctx_bufB =
|
| 766 |
+
(attn_dropout_checkpoint
|
| 767 |
+
? soft_out
|
| 768 |
+
: torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options));
|
| 769 |
+
T* soft_out_ptr = (T*)soft_out.data_ptr();
|
| 770 |
+
T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr();
|
| 771 |
+
|
| 772 |
+
layer->SetTrainingMode(training_mode);
|
| 773 |
+
layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(),
|
| 774 |
+
(uint8_t*)attn_output_dropout_mask.data_ptr(),
|
| 775 |
+
(uint8_t*)layer_output_dropout_mask.data_ptr(),
|
| 776 |
+
(T*)attn_layer_norm_var.data_ptr(),
|
| 777 |
+
(T*)attn_layer_norm_mean.data_ptr(),
|
| 778 |
+
(T*)layer_norm_var.data_ptr(),
|
| 779 |
+
(T*)layer_norm_mean.data_ptr());
|
| 780 |
+
|
| 781 |
+
layer->Forward(bsz,
|
| 782 |
+
input_ptr,
|
| 783 |
+
input_mask_ptr,
|
| 784 |
+
attn_qkvw_ptr,
|
| 785 |
+
attn_qkvb_ptr,
|
| 786 |
+
attn_ow_ptr,
|
| 787 |
+
attn_ob_ptr,
|
| 788 |
+
attn_nw_ptr,
|
| 789 |
+
attn_nb_ptr,
|
| 790 |
+
inter_w_ptr,
|
| 791 |
+
inter_b_ptr,
|
| 792 |
+
output_w_ptr,
|
| 793 |
+
output_b_ptr,
|
| 794 |
+
norm_w_ptr,
|
| 795 |
+
norm_b_ptr,
|
| 796 |
+
out_ptr,
|
| 797 |
+
inp_norm_ptr,
|
| 798 |
+
q_tf_ptr,
|
| 799 |
+
k_tf_ptr,
|
| 800 |
+
v_tf_ptr,
|
| 801 |
+
soft_out_ptr,
|
| 802 |
+
ctx_bufB_ptr,
|
| 803 |
+
attn_o_inp_ptr,
|
| 804 |
+
add_res_ptr,
|
| 805 |
+
ff1_inp_ptr,
|
| 806 |
+
gelu_inp_ptr,
|
| 807 |
+
ff2_inp_ptr);
|
| 808 |
+
|
| 809 |
+
return {output,
|
| 810 |
+
inp_norm,
|
| 811 |
+
qkv_tf,
|
| 812 |
+
soft_out,
|
| 813 |
+
ctx_bufB,
|
| 814 |
+
attn_o_inp,
|
| 815 |
+
add_res,
|
| 816 |
+
ff1_inp,
|
| 817 |
+
gelu_inp,
|
| 818 |
+
ff2_inp,
|
| 819 |
+
attn_prob_dropout_mask,
|
| 820 |
+
attn_output_dropout_mask,
|
| 821 |
+
layer_output_dropout_mask,
|
| 822 |
+
attn_layer_norm_var,
|
| 823 |
+
attn_layer_norm_mean,
|
| 824 |
+
layer_norm_var,
|
| 825 |
+
layer_norm_mean};
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
template <typename T>
|
| 829 |
+
std::vector<torch::Tensor> ds_transformer_backward(unsigned layer_id,
|
| 830 |
+
const torch::Tensor& grad_output,
|
| 831 |
+
const torch::Tensor& output,
|
| 832 |
+
const torch::Tensor& inp_norm,
|
| 833 |
+
const torch::Tensor& qkv_tf,
|
| 834 |
+
const torch::Tensor& soft_out,
|
| 835 |
+
const torch::Tensor& ctx_bufB,
|
| 836 |
+
const torch::Tensor& attn_o_inp,
|
| 837 |
+
const torch::Tensor& add_res,
|
| 838 |
+
const torch::Tensor& ff1_inp,
|
| 839 |
+
const torch::Tensor& gelu_inp,
|
| 840 |
+
const torch::Tensor& ff2_inp,
|
| 841 |
+
const torch::Tensor& attn_prob_dropout_mask,
|
| 842 |
+
const torch::Tensor& attn_output_dropout_mask,
|
| 843 |
+
const torch::Tensor& layer_output_dropout_mask,
|
| 844 |
+
const torch::Tensor& attn_layer_norm_var,
|
| 845 |
+
const torch::Tensor& attn_layer_norm_mean,
|
| 846 |
+
const torch::Tensor& layer_norm_var,
|
| 847 |
+
const torch::Tensor& layer_norm_mean,
|
| 848 |
+
const torch::Tensor& input,
|
| 849 |
+
const torch::Tensor& input_mask,
|
| 850 |
+
const torch::Tensor& attn_qkvw,
|
| 851 |
+
const torch::Tensor& attn_qkvb,
|
| 852 |
+
const torch::Tensor& attn_ow,
|
| 853 |
+
const torch::Tensor& attn_ob,
|
| 854 |
+
const torch::Tensor& attn_nw,
|
| 855 |
+
const torch::Tensor& attn_nb,
|
| 856 |
+
const torch::Tensor& inter_w,
|
| 857 |
+
const torch::Tensor& inter_b,
|
| 858 |
+
const torch::Tensor& output_w,
|
| 859 |
+
const torch::Tensor& output_b,
|
| 860 |
+
const torch::Tensor& norm_w,
|
| 861 |
+
const torch::Tensor& norm_b)
|
| 862 |
+
{
|
| 863 |
+
auto g_output = grad_output.contiguous();
|
| 864 |
+
CHECK_INPUT(g_output);
|
| 865 |
+
CHECK_INPUT(output);
|
| 866 |
+
CHECK_INPUT(inp_norm);
|
| 867 |
+
CHECK_INPUT(qkv_tf);
|
| 868 |
+
CHECK_INPUT(add_res);
|
| 869 |
+
CHECK_INPUT(soft_out);
|
| 870 |
+
CHECK_INPUT(ctx_bufB);
|
| 871 |
+
CHECK_INPUT(attn_o_inp);
|
| 872 |
+
CHECK_INPUT(ff1_inp);
|
| 873 |
+
CHECK_INPUT(gelu_inp);
|
| 874 |
+
CHECK_INPUT(ff2_inp);
|
| 875 |
+
CHECK_INPUT(input);
|
| 876 |
+
CHECK_INPUT(input_mask);
|
| 877 |
+
CHECK_INPUT(attn_qkvw);
|
| 878 |
+
CHECK_INPUT(attn_qkvb);
|
| 879 |
+
CHECK_INPUT(attn_ow);
|
| 880 |
+
CHECK_INPUT(attn_ob);
|
| 881 |
+
CHECK_INPUT(attn_nw);
|
| 882 |
+
CHECK_INPUT(attn_nb);
|
| 883 |
+
CHECK_INPUT(inter_w);
|
| 884 |
+
CHECK_INPUT(inter_b);
|
| 885 |
+
CHECK_INPUT(output_w);
|
| 886 |
+
CHECK_INPUT(output_b);
|
| 887 |
+
CHECK_INPUT(norm_w);
|
| 888 |
+
CHECK_INPUT(norm_b);
|
| 889 |
+
|
| 890 |
+
unsigned bsz = g_output.size(0);
|
| 891 |
+
|
| 892 |
+
std::shared_ptr<BertTransformerLayer<T>> layer =
|
| 893 |
+
std::static_pointer_cast<BertTransformerLayer<T>>(s_transformer_layers[layer_id]);
|
| 894 |
+
|
| 895 |
+
unsigned seq_len = layer->GetSeqLength();
|
| 896 |
+
if (g_output.size(1) != seq_len) {
|
| 897 |
+
seq_len = g_output.size(1);
|
| 898 |
+
layer->SetSeqLength(seq_len);
|
| 899 |
+
}
|
| 900 |
+
auto options = torch::TensorOptions()
|
| 901 |
+
.dtype(g_output.options().dtype())
|
| 902 |
+
.layout(torch::kStrided)
|
| 903 |
+
.device(torch::kCUDA)
|
| 904 |
+
.requires_grad(true);
|
| 905 |
+
auto workspace = torch::empty({get_workspace_size<T>(bsz,
|
| 906 |
+
seq_len,
|
| 907 |
+
layer->GetHiddenSize(),
|
| 908 |
+
layer->GetIntermediateSize(),
|
| 909 |
+
layer->GetNumHeads(),
|
| 910 |
+
layer->IsTrainingMode(),
|
| 911 |
+
layer->GeluCheckpoint())},
|
| 912 |
+
options);
|
| 913 |
+
TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr());
|
| 914 |
+
|
| 915 |
+
auto grad_input = torch::empty_like(input);
|
| 916 |
+
auto grad_attn_qkvw = torch::empty_like(attn_qkvw);
|
| 917 |
+
auto grad_attn_qkvb = torch::empty_like(attn_qkvb);
|
| 918 |
+
auto grad_attn_ow = torch::empty_like(attn_ow);
|
| 919 |
+
auto grad_attn_ob = torch::empty_like(attn_ob);
|
| 920 |
+
auto grad_attn_nw = torch::empty_like(attn_nw);
|
| 921 |
+
auto grad_attn_nb = torch::empty_like(attn_nb);
|
| 922 |
+
auto grad_inter_w = torch::empty_like(inter_w);
|
| 923 |
+
auto grad_inter_b = torch::empty_like(inter_b);
|
| 924 |
+
auto grad_output_w = torch::empty_like(output_w);
|
| 925 |
+
auto grad_output_b = torch::empty_like(output_b);
|
| 926 |
+
auto grad_norm_w = torch::empty_like(norm_w);
|
| 927 |
+
auto grad_norm_b = torch::empty_like(norm_b);
|
| 928 |
+
|
| 929 |
+
// inputs.
|
| 930 |
+
const T* grad_output_ptr = (const T*)g_output.data_ptr();
|
| 931 |
+
const T* input_ptr = (const T*)input.data_ptr();
|
| 932 |
+
const T* output_ptr = (const T*)output.data_ptr();
|
| 933 |
+
const T* inp_norm_ptr = (const T*)inp_norm.data_ptr();
|
| 934 |
+
const T* q_tf_ptr = (const T*)qkv_tf.data_ptr();
|
| 935 |
+
const T* add_res_ptr = (const T*)add_res.data_ptr();
|
| 936 |
+
const T* k_tf_ptr =
|
| 937 |
+
q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr();
|
| 938 |
+
const T* v_tf_ptr =
|
| 939 |
+
k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr();
|
| 940 |
+
const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr();
|
| 941 |
+
const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr();
|
| 942 |
+
const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr();
|
| 943 |
+
const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr();
|
| 944 |
+
const T* soft_out_ptr = (const T*)soft_out.data_ptr();
|
| 945 |
+
const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr();
|
| 946 |
+
const T* input_mask_ptr = (const T*)input_mask.data_ptr();
|
| 947 |
+
const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr();
|
| 948 |
+
const T* attn_ow_ptr = (const T*)attn_ow.data_ptr();
|
| 949 |
+
const T* attn_nw_ptr = (const T*)attn_nw.data_ptr();
|
| 950 |
+
const T* attn_nb_ptr = (const T*)attn_nb.data_ptr();
|
| 951 |
+
const T* inter_w_ptr = (const T*)inter_w.data_ptr();
|
| 952 |
+
const T* inter_b_ptr = (const T*)inter_b.data_ptr();
|
| 953 |
+
const T* output_w_ptr = (const T*)output_w.data_ptr();
|
| 954 |
+
const T* norm_w_ptr = (const T*)norm_w.data_ptr();
|
| 955 |
+
const T* norm_b_ptr = (const T*)norm_b.data_ptr();
|
| 956 |
+
|
| 957 |
+
// outputs.
|
| 958 |
+
T* grad_input_ptr = (T*)grad_input.data_ptr();
|
| 959 |
+
T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr();
|
| 960 |
+
T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr();
|
| 961 |
+
T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr();
|
| 962 |
+
T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr();
|
| 963 |
+
T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr();
|
| 964 |
+
T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr();
|
| 965 |
+
T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr();
|
| 966 |
+
T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr();
|
| 967 |
+
T* grad_output_w_ptr = (T*)grad_output_w.data_ptr();
|
| 968 |
+
T* grad_output_b_ptr = (T*)grad_output_b.data_ptr();
|
| 969 |
+
T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr();
|
| 970 |
+
T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr();
|
| 971 |
+
|
| 972 |
+
layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(),
|
| 973 |
+
(uint8_t*)attn_output_dropout_mask.data_ptr(),
|
| 974 |
+
(uint8_t*)layer_output_dropout_mask.data_ptr(),
|
| 975 |
+
(T*)attn_layer_norm_var.data_ptr(),
|
| 976 |
+
(T*)attn_layer_norm_mean.data_ptr(),
|
| 977 |
+
(T*)layer_norm_var.data_ptr(),
|
| 978 |
+
(T*)layer_norm_mean.data_ptr());
|
| 979 |
+
|
| 980 |
+
layer->Backward(bsz,
|
| 981 |
+
grad_output_ptr,
|
| 982 |
+
input_ptr,
|
| 983 |
+
output_ptr,
|
| 984 |
+
inp_norm_ptr,
|
| 985 |
+
q_tf_ptr,
|
| 986 |
+
k_tf_ptr,
|
| 987 |
+
v_tf_ptr,
|
| 988 |
+
soft_out_ptr,
|
| 989 |
+
ctx_bufB_ptr,
|
| 990 |
+
attn_o_inp_ptr,
|
| 991 |
+
add_res_ptr,
|
| 992 |
+
ff1_inp_ptr,
|
| 993 |
+
gelu_inp_ptr,
|
| 994 |
+
ff2_inp_ptr,
|
| 995 |
+
input_mask_ptr,
|
| 996 |
+
attn_qkvw_ptr,
|
| 997 |
+
attn_ow_ptr,
|
| 998 |
+
attn_nw_ptr,
|
| 999 |
+
attn_nb_ptr,
|
| 1000 |
+
inter_w_ptr,
|
| 1001 |
+
inter_b_ptr,
|
| 1002 |
+
output_w_ptr,
|
| 1003 |
+
norm_w_ptr,
|
| 1004 |
+
norm_b_ptr,
|
| 1005 |
+
|
| 1006 |
+
grad_input_ptr,
|
| 1007 |
+
grad_attn_qkvw_ptr,
|
| 1008 |
+
grad_attn_qkvb_ptr,
|
| 1009 |
+
grad_attn_ow_ptr,
|
| 1010 |
+
grad_attn_ob_ptr,
|
| 1011 |
+
grad_attn_nw_ptr,
|
| 1012 |
+
grad_attn_nb_ptr,
|
| 1013 |
+
grad_inter_w_ptr,
|
| 1014 |
+
grad_inter_b_ptr,
|
| 1015 |
+
grad_output_w_ptr,
|
| 1016 |
+
grad_output_b_ptr,
|
| 1017 |
+
grad_norm_w_ptr,
|
| 1018 |
+
grad_norm_b_ptr);
|
| 1019 |
+
|
| 1020 |
+
return {grad_input,
|
| 1021 |
+
grad_attn_qkvw,
|
| 1022 |
+
grad_attn_qkvb,
|
| 1023 |
+
grad_attn_ow,
|
| 1024 |
+
grad_attn_ob,
|
| 1025 |
+
grad_attn_nw,
|
| 1026 |
+
grad_attn_nb,
|
| 1027 |
+
grad_inter_w,
|
| 1028 |
+
grad_inter_b,
|
| 1029 |
+
grad_output_w,
|
| 1030 |
+
grad_output_b,
|
| 1031 |
+
grad_norm_w,
|
| 1032 |
+
grad_norm_b};
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 1036 |
+
{
|
| 1037 |
+
m.def("forward_fp32",
|
| 1038 |
+
&ds_transformer_forward<float>,
|
| 1039 |
+
"DeepSpeed Transformer forward with fp32 (CUDA)");
|
| 1040 |
+
m.def("forward_fp16",
|
| 1041 |
+
&ds_transformer_forward<__half>,
|
| 1042 |
+
"DeepSpeed Transformer forward with fp16 (CUDA)");
|
| 1043 |
+
m.def("backward_fp32",
|
| 1044 |
+
&ds_transformer_backward<float>,
|
| 1045 |
+
"DeepSpeed Transformer backward with fp32 (CUDA)");
|
| 1046 |
+
m.def("backward_fp16",
|
| 1047 |
+
&ds_transformer_backward<__half>,
|
| 1048 |
+
"DeepSpeed Transformer backward with fp16 (CUDA)");
|
| 1049 |
+
m.def("create_transformer_layer_fp32",
|
| 1050 |
+
&create_transformer_layer<float>,
|
| 1051 |
+
"Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)");
|
| 1052 |
+
m.def("create_transformer_layer_fp16",
|
| 1053 |
+
&create_transformer_layer<__half>,
|
| 1054 |
+
"Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)");
|
| 1055 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
|
| 8 |
+
inline __device__ float gelu(const float x)
|
| 9 |
+
{
|
| 10 |
+
const float sqrt_param = 0.79788456080286535587989211986876f;
|
| 11 |
+
const float mul_param = 0.044715;
|
| 12 |
+
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
inline __device__ float d_gelu(const float x)
|
| 16 |
+
{
|
| 17 |
+
const float sqrt_param = 0.79788456080286535587989211986876f;
|
| 18 |
+
const float mul_param = 0.044715;
|
| 19 |
+
|
| 20 |
+
float x2mul = x * x * mul_param;
|
| 21 |
+
float tan_h = tanhf(sqrt_param * (x + x * x2mul));
|
| 22 |
+
float dg1 = 0.5f * (1.0f + tan_h);
|
| 23 |
+
float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h);
|
| 24 |
+
float dg3 = dg2 * 3 * x2mul;
|
| 25 |
+
return (dg1 + dg2 + dg3);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
/*
|
| 29 |
+
Fused bias add with GELU
|
| 30 |
+
|
| 31 |
+
Loads a vector of 4 elements each iteration, for stride
|
| 32 |
+
iterations. It was written with the intention to launch 256 thread
|
| 33 |
+
threadblocks, so to launch for bert-large, we would set ITERATIONS
|
| 34 |
+
to 4. This is currently done automatically as a heuristic, setting
|
| 35 |
+
the number of iterations as blocks of 1024.
|
| 36 |
+
|
| 37 |
+
For FP16, the values are loaded from memory as __half, but converted
|
| 38 |
+
to FP32 for the arithmetic itself, to prevent numerous overflow on
|
| 39 |
+
the intermediate hyperbolic tangent, since there's no intrinsic
|
| 40 |
+
that computes it directly.
|
| 41 |
+
*/
|
| 42 |
+
|
| 43 |
+
__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations)
|
| 44 |
+
{
|
| 45 |
+
int row = blockIdx.x;
|
| 46 |
+
int id = threadIdx.x;
|
| 47 |
+
int loop_stride = blockDim.x;
|
| 48 |
+
|
| 49 |
+
const float4* input_cast = reinterpret_cast<const float4*>(input);
|
| 50 |
+
float4* vals_cast = reinterpret_cast<float4*>(vals);
|
| 51 |
+
|
| 52 |
+
for (int i = 0; i < iterations; i++) {
|
| 53 |
+
if (i * loop_stride + id < row_stride) {
|
| 54 |
+
float4 data = input_cast[row * row_stride + i * loop_stride + id];
|
| 55 |
+
|
| 56 |
+
data.x = gelu(data.x);
|
| 57 |
+
data.y = gelu(data.y);
|
| 58 |
+
data.z = gelu(data.z);
|
| 59 |
+
data.w = gelu(data.w);
|
| 60 |
+
|
| 61 |
+
vals_cast[row * row_stride + i * loop_stride + id] = data;
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations)
|
| 67 |
+
{
|
| 68 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 69 |
+
int row = blockIdx.x;
|
| 70 |
+
int id = threadIdx.x;
|
| 71 |
+
int loop_stride = blockDim.x;
|
| 72 |
+
|
| 73 |
+
const float2* input_cast = reinterpret_cast<const float2*>(input);
|
| 74 |
+
float2* vals_cast = reinterpret_cast<float2*>(vals);
|
| 75 |
+
|
| 76 |
+
for (int i = 0; i < iterations; i++) {
|
| 77 |
+
if (i * loop_stride + id < row_stride) {
|
| 78 |
+
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
|
| 79 |
+
|
| 80 |
+
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
|
| 81 |
+
|
| 82 |
+
float2 low_data = __half22float2(vals_half[0]);
|
| 83 |
+
float2 high_data = __half22float2(vals_half[1]);
|
| 84 |
+
|
| 85 |
+
low_data.x = gelu(low_data.x);
|
| 86 |
+
low_data.y = gelu(low_data.y);
|
| 87 |
+
high_data.x = gelu(high_data.x);
|
| 88 |
+
high_data.y = gelu(high_data.y);
|
| 89 |
+
|
| 90 |
+
vals_half[0] = __float22half2_rn(low_data);
|
| 91 |
+
vals_half[1] = __float22half2_rn(high_data);
|
| 92 |
+
|
| 93 |
+
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
#endif
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
__global__ void fused_bias_gelu(const float* input,
|
| 100 |
+
const float* bias,
|
| 101 |
+
float* vals,
|
| 102 |
+
int row_stride,
|
| 103 |
+
int iterations)
|
| 104 |
+
{
|
| 105 |
+
int row = blockIdx.x;
|
| 106 |
+
int id = threadIdx.x;
|
| 107 |
+
int loop_stride = blockDim.x;
|
| 108 |
+
|
| 109 |
+
const float4* input_cast = reinterpret_cast<const float4*>(input);
|
| 110 |
+
float4* vals_cast = reinterpret_cast<float4*>(vals);
|
| 111 |
+
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
|
| 112 |
+
|
| 113 |
+
for (int i = 0; i < iterations; i++) {
|
| 114 |
+
if (i * loop_stride + id < row_stride) {
|
| 115 |
+
float4 data = input_cast[row * row_stride + i * loop_stride + id];
|
| 116 |
+
float4 bias_data = bias_cast[i * loop_stride + id];
|
| 117 |
+
|
| 118 |
+
data.x += bias_data.x;
|
| 119 |
+
data.y += bias_data.y;
|
| 120 |
+
data.z += bias_data.z;
|
| 121 |
+
data.w += bias_data.w;
|
| 122 |
+
|
| 123 |
+
data.x = gelu(data.x);
|
| 124 |
+
data.y = gelu(data.y);
|
| 125 |
+
data.z = gelu(data.z);
|
| 126 |
+
data.w = gelu(data.w);
|
| 127 |
+
|
| 128 |
+
vals_cast[row * row_stride + i * loop_stride + id] = data;
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
__global__ void fused_bias_gelu(const __half* input,
|
| 134 |
+
const __half* bias,
|
| 135 |
+
__half* vals,
|
| 136 |
+
int row_stride,
|
| 137 |
+
int iterations)
|
| 138 |
+
{
|
| 139 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 140 |
+
int row = blockIdx.x;
|
| 141 |
+
int id = threadIdx.x;
|
| 142 |
+
int loop_stride = blockDim.x;
|
| 143 |
+
|
| 144 |
+
const float2* input_cast = reinterpret_cast<const float2*>(input);
|
| 145 |
+
float2* vals_cast = reinterpret_cast<float2*>(vals);
|
| 146 |
+
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
|
| 147 |
+
|
| 148 |
+
for (int i = 0; i < iterations; i++) {
|
| 149 |
+
if (i * loop_stride + id < row_stride) {
|
| 150 |
+
float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id];
|
| 151 |
+
float2 bias_vec = bias_cast[i * loop_stride + id];
|
| 152 |
+
|
| 153 |
+
__half2* vals_half = reinterpret_cast<__half2*>(&vals_vec);
|
| 154 |
+
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
|
| 155 |
+
|
| 156 |
+
float2 low_data = __half22float2(vals_half[0]);
|
| 157 |
+
float2 high_data = __half22float2(vals_half[1]);
|
| 158 |
+
|
| 159 |
+
float2 low_bias = __half22float2(bias_half[0]);
|
| 160 |
+
float2 high_bias = __half22float2(bias_half[1]);
|
| 161 |
+
|
| 162 |
+
low_data.x += low_bias.x;
|
| 163 |
+
low_data.y += low_bias.y;
|
| 164 |
+
high_data.x += high_bias.x;
|
| 165 |
+
high_data.y += high_bias.y;
|
| 166 |
+
|
| 167 |
+
low_data.x = gelu(low_data.x);
|
| 168 |
+
low_data.y = gelu(low_data.y);
|
| 169 |
+
high_data.x = gelu(high_data.x);
|
| 170 |
+
high_data.y = gelu(high_data.y);
|
| 171 |
+
|
| 172 |
+
vals_half[0] = __float22half2_rn(low_data);
|
| 173 |
+
vals_half[1] = __float22half2_rn(high_data);
|
| 174 |
+
|
| 175 |
+
vals_cast[row * row_stride + i * loop_stride + id] = vals_vec;
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
#endif
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
__global__ void d_gelu_func(float* d_output,
|
| 182 |
+
const float* gelu_input,
|
| 183 |
+
const float* bias,
|
| 184 |
+
int row_stride,
|
| 185 |
+
int iterations)
|
| 186 |
+
{
|
| 187 |
+
int row = blockIdx.x;
|
| 188 |
+
int id = threadIdx.x;
|
| 189 |
+
int loop_stride = blockDim.x;
|
| 190 |
+
|
| 191 |
+
float4* d_output_cast = reinterpret_cast<float4*>(d_output);
|
| 192 |
+
const float4* gelu_input_cast = reinterpret_cast<const float4*>(gelu_input);
|
| 193 |
+
const float4* bias_cast = reinterpret_cast<const float4*>(bias);
|
| 194 |
+
|
| 195 |
+
for (int i = 0; i < iterations; i++) {
|
| 196 |
+
if (i * loop_stride + id < row_stride) {
|
| 197 |
+
float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
|
| 198 |
+
float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
|
| 199 |
+
float4 bias_data = bias_cast[i * loop_stride + id];
|
| 200 |
+
|
| 201 |
+
gelu_input_data.x += bias_data.x;
|
| 202 |
+
gelu_input_data.y += bias_data.y;
|
| 203 |
+
gelu_input_data.z += bias_data.z;
|
| 204 |
+
gelu_input_data.w += bias_data.w;
|
| 205 |
+
|
| 206 |
+
output_data.x *= d_gelu(gelu_input_data.x);
|
| 207 |
+
output_data.y *= d_gelu(gelu_input_data.y);
|
| 208 |
+
output_data.z *= d_gelu(gelu_input_data.z);
|
| 209 |
+
output_data.w *= d_gelu(gelu_input_data.w);
|
| 210 |
+
|
| 211 |
+
d_output_cast[row * row_stride + i * loop_stride + id] = output_data;
|
| 212 |
+
}
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
__global__ void d_gelu_func(__half* d_output,
|
| 217 |
+
const __half* gelu_input,
|
| 218 |
+
const __half* bias,
|
| 219 |
+
int row_stride,
|
| 220 |
+
int iterations)
|
| 221 |
+
{
|
| 222 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 223 |
+
int row = blockIdx.x;
|
| 224 |
+
int id = threadIdx.x;
|
| 225 |
+
int loop_stride = blockDim.x;
|
| 226 |
+
|
| 227 |
+
float2* d_output_cast = reinterpret_cast<float2*>(d_output);
|
| 228 |
+
const float2* gelu_input_cast = reinterpret_cast<const float2*>(gelu_input);
|
| 229 |
+
const float2* bias_cast = reinterpret_cast<const float2*>(bias);
|
| 230 |
+
|
| 231 |
+
#pragma unroll
|
| 232 |
+
for (int i = 0; i < iterations; i++) {
|
| 233 |
+
if (i * loop_stride + id < row_stride) {
|
| 234 |
+
float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id];
|
| 235 |
+
float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id];
|
| 236 |
+
float2 bias_vec = bias_cast[i * loop_stride + id];
|
| 237 |
+
|
| 238 |
+
__half2* output_data_half = reinterpret_cast<__half2*>(&output_data);
|
| 239 |
+
__half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data);
|
| 240 |
+
__half2* bias_half = reinterpret_cast<__half2*>(&bias_vec);
|
| 241 |
+
|
| 242 |
+
float2 output_half_0 = __half22float2(output_data_half[0]);
|
| 243 |
+
float2 output_half_1 = __half22float2(output_data_half[1]);
|
| 244 |
+
|
| 245 |
+
float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]);
|
| 246 |
+
float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]);
|
| 247 |
+
|
| 248 |
+
float2 bias_half_0 = __half22float2(bias_half[0]);
|
| 249 |
+
float2 bias_half_1 = __half22float2(bias_half[1]);
|
| 250 |
+
|
| 251 |
+
gelu_input_half_0.x += bias_half_0.x;
|
| 252 |
+
gelu_input_half_0.y += bias_half_0.y;
|
| 253 |
+
gelu_input_half_1.x += bias_half_1.x;
|
| 254 |
+
gelu_input_half_1.y += bias_half_1.y;
|
| 255 |
+
|
| 256 |
+
output_half_0.x *= d_gelu(gelu_input_half_0.x);
|
| 257 |
+
output_half_0.y *= d_gelu(gelu_input_half_0.y);
|
| 258 |
+
output_half_1.x *= d_gelu(gelu_input_half_1.x);
|
| 259 |
+
output_half_1.y *= d_gelu(gelu_input_half_1.y);
|
| 260 |
+
|
| 261 |
+
float2 result;
|
| 262 |
+
__half2* result_half2 = reinterpret_cast<__half2*>(&result);
|
| 263 |
+
|
| 264 |
+
result_half2[0] = __float22half2_rn(output_half_0);
|
| 265 |
+
result_half2[1] = __float22half2_rn(output_half_1);
|
| 266 |
+
|
| 267 |
+
d_output_cast[row * row_stride + i * loop_stride + id] = result;
|
| 268 |
+
}
|
| 269 |
+
}
|
| 270 |
+
#endif
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
template <typename T>
|
| 274 |
+
void launch_bias_gelu(const T* input,
|
| 275 |
+
const T* bias,
|
| 276 |
+
T* output,
|
| 277 |
+
int intermediate_size,
|
| 278 |
+
int batch_size,
|
| 279 |
+
cudaStream_t stream)
|
| 280 |
+
{
|
| 281 |
+
int iterations = (intermediate_size + 1023) / 1024;
|
| 282 |
+
int threads = (intermediate_size - 1) / (iterations * 4) + 1;
|
| 283 |
+
dim3 block_dims(threads);
|
| 284 |
+
dim3 grid_dims(batch_size);
|
| 285 |
+
|
| 286 |
+
fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>(
|
| 287 |
+
input, bias, output, intermediate_size / 4, iterations);
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
template <typename T>
|
| 291 |
+
void launch_gelu(const T* input,
|
| 292 |
+
T* output,
|
| 293 |
+
int intermediate_size,
|
| 294 |
+
int batch_size,
|
| 295 |
+
cudaStream_t stream)
|
| 296 |
+
{
|
| 297 |
+
int iterations = (intermediate_size + 1023) / 1024;
|
| 298 |
+
int threads = (intermediate_size - 1) / (iterations * 4) + 1;
|
| 299 |
+
dim3 block_dims(threads);
|
| 300 |
+
dim3 grid_dims(batch_size);
|
| 301 |
+
|
| 302 |
+
gelu_kernel<<<grid_dims, block_dims, 0, stream>>>(
|
| 303 |
+
input, output, intermediate_size / 4, iterations);
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
template void launch_bias_gelu<float>(const float*, const float*, float*, int, int, cudaStream_t);
|
| 307 |
+
template void launch_bias_gelu<__half>(const __half*,
|
| 308 |
+
const __half*,
|
| 309 |
+
__half*,
|
| 310 |
+
int,
|
| 311 |
+
int,
|
| 312 |
+
cudaStream_t);
|
| 313 |
+
|
| 314 |
+
template void launch_gelu<float>(const float*, float*, int, int, cudaStream_t);
|
| 315 |
+
template void launch_gelu<__half>(const __half*, __half*, int, int, cudaStream_t);
|
| 316 |
+
|
| 317 |
+
template <typename T>
|
| 318 |
+
void launch_d_gelu(T* d_output,
|
| 319 |
+
const T* input,
|
| 320 |
+
const T* bias,
|
| 321 |
+
int intermediate_size,
|
| 322 |
+
int batch_size,
|
| 323 |
+
cudaStream_t stream)
|
| 324 |
+
{
|
| 325 |
+
int iterations = (intermediate_size + 1023) / 1024;
|
| 326 |
+
int threads = (intermediate_size - 1) / (iterations * 4) + 1;
|
| 327 |
+
dim3 block_dims(threads);
|
| 328 |
+
dim3 grid_dims(batch_size);
|
| 329 |
+
|
| 330 |
+
d_gelu_func<<<grid_dims, block_dims, 0, stream>>>(
|
| 331 |
+
d_output, input, bias, intermediate_size / 4, iterations);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
template void launch_d_gelu<float>(float*, const float*, const float*, int, int, cudaStream_t);
|
| 335 |
+
template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, cudaStream_t);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "general_kernels.h"
|
| 7 |
+
|
| 8 |
+
namespace cg = cooperative_groups;
|
| 9 |
+
|
| 10 |
+
template <typename T>
|
| 11 |
+
__global__ void column_sum_reduce(const T* __restrict__ inp,
|
| 12 |
+
T* __restrict__ out,
|
| 13 |
+
int rows,
|
| 14 |
+
int width)
|
| 15 |
+
{
|
| 16 |
+
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
|
| 17 |
+
|
| 18 |
+
cg::thread_block b = cg::this_thread_block();
|
| 19 |
+
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
|
| 20 |
+
|
| 21 |
+
int idx = blockDim.x * blockIdx.x + threadIdx.x;
|
| 22 |
+
|
| 23 |
+
int y_stride = width * TILE_DIM;
|
| 24 |
+
|
| 25 |
+
float localSum = 0;
|
| 26 |
+
|
| 27 |
+
// Loop across matrix height
|
| 28 |
+
if (idx < width) {
|
| 29 |
+
int offset = threadIdx.y * width + idx;
|
| 30 |
+
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
|
| 31 |
+
localSum += (float)inp[offset];
|
| 32 |
+
offset += y_stride;
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
tile[threadIdx.x][threadIdx.y] = localSum;
|
| 37 |
+
|
| 38 |
+
__syncthreads();
|
| 39 |
+
|
| 40 |
+
// Sum the shared buffer.
|
| 41 |
+
float sum = tile[threadIdx.y][threadIdx.x];
|
| 42 |
+
|
| 43 |
+
#ifndef __STOCHASTIC_MODE__
|
| 44 |
+
__syncthreads();
|
| 45 |
+
#endif
|
| 46 |
+
|
| 47 |
+
for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i);
|
| 48 |
+
|
| 49 |
+
if (threadIdx.x == 0) {
|
| 50 |
+
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
|
| 51 |
+
if (pos < width) out[pos] = sum;
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
template <typename T>
|
| 56 |
+
void launch_fuse_transpose_bias_kernel(const T* inp,
|
| 57 |
+
T* out,
|
| 58 |
+
int rows,
|
| 59 |
+
int cols,
|
| 60 |
+
cudaStream_t stream);
|
| 61 |
+
|
| 62 |
+
template <>
|
| 63 |
+
void launch_fuse_transpose_bias_kernel<float>(const float* inp,
|
| 64 |
+
float* out,
|
| 65 |
+
int rows,
|
| 66 |
+
int cols,
|
| 67 |
+
cudaStream_t stream)
|
| 68 |
+
{
|
| 69 |
+
// assert(rows % TILE_DIM == 0);
|
| 70 |
+
// assert(cols % TILE_DIM == 0);
|
| 71 |
+
|
| 72 |
+
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
|
| 73 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 74 |
+
|
| 75 |
+
column_sum_reduce<float><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
template <>
|
| 79 |
+
void launch_fuse_transpose_bias_kernel<__half>(const __half* inp,
|
| 80 |
+
__half* out,
|
| 81 |
+
int rows,
|
| 82 |
+
int cols,
|
| 83 |
+
cudaStream_t stream)
|
| 84 |
+
{
|
| 85 |
+
// assert(rows % TILE_DIM == 0);
|
| 86 |
+
// assert(cols % TILE_DIM == 0);
|
| 87 |
+
|
| 88 |
+
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
|
| 89 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 90 |
+
|
| 91 |
+
column_sum_reduce<__half><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2)
|
| 95 |
+
{
|
| 96 |
+
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
|
| 97 |
+
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
|
| 98 |
+
float4* out_4 = reinterpret_cast<float4*>(out);
|
| 99 |
+
|
| 100 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 101 |
+
{
|
| 102 |
+
float4 val;
|
| 103 |
+
float4 inp1_reg = inp1_4[j];
|
| 104 |
+
float4 inp2_reg = inp2_4[j];
|
| 105 |
+
|
| 106 |
+
val.x = inp1_reg.x + inp2_reg.x;
|
| 107 |
+
val.y = inp1_reg.y + inp2_reg.y;
|
| 108 |
+
val.z = inp1_reg.z + inp2_reg.z;
|
| 109 |
+
val.w = inp1_reg.w + inp2_reg.w;
|
| 110 |
+
|
| 111 |
+
out_4[j] = val;
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2)
|
| 116 |
+
{
|
| 117 |
+
float2 inp1_4;
|
| 118 |
+
float2 inp2_4;
|
| 119 |
+
|
| 120 |
+
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
|
| 121 |
+
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
|
| 122 |
+
|
| 123 |
+
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
|
| 124 |
+
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
|
| 125 |
+
|
| 126 |
+
CUDA_1D_KERNEL_LOOP(j, N)
|
| 127 |
+
{
|
| 128 |
+
inp1_4 = inp1_arr[j];
|
| 129 |
+
inp2_4 = inp2_arr[j];
|
| 130 |
+
|
| 131 |
+
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
|
| 132 |
+
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
|
| 133 |
+
|
| 134 |
+
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
|
| 135 |
+
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
|
| 136 |
+
|
| 137 |
+
inp1_h_f_0.x += inp2_h_f_0.x;
|
| 138 |
+
inp1_h_f_0.y += inp2_h_f_0.y;
|
| 139 |
+
inp1_h_f_1.x += inp2_h_f_1.x;
|
| 140 |
+
inp1_h_f_1.y += inp2_h_f_1.y;
|
| 141 |
+
|
| 142 |
+
float2 val_f;
|
| 143 |
+
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
|
| 144 |
+
|
| 145 |
+
val_h[0] = __float22half2_rn(inp1_h_f_0);
|
| 146 |
+
val_h[1] = __float22half2_rn(inp1_h_f_1);
|
| 147 |
+
|
| 148 |
+
float2* out_4 = reinterpret_cast<float2*>(out);
|
| 149 |
+
out_4[j] = val_f;
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template <>
|
| 154 |
+
void launch_fused_add2<float>(float* out,
|
| 155 |
+
const float* inp1,
|
| 156 |
+
const float* inp2,
|
| 157 |
+
int batch_size,
|
| 158 |
+
int seq_length,
|
| 159 |
+
int hidden_dim,
|
| 160 |
+
cudaStream_t& stream)
|
| 161 |
+
{
|
| 162 |
+
int total_count = batch_size * seq_length * hidden_dim / 4;
|
| 163 |
+
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
|
| 164 |
+
|
| 165 |
+
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
|
| 166 |
+
|
| 167 |
+
fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
template <>
|
| 171 |
+
void launch_fused_add2<__half>(__half* out,
|
| 172 |
+
const __half* inp1,
|
| 173 |
+
const __half* inp2,
|
| 174 |
+
int batch_size,
|
| 175 |
+
int seq_length,
|
| 176 |
+
int hidden_dim,
|
| 177 |
+
cudaStream_t& stream)
|
| 178 |
+
{
|
| 179 |
+
int total_count = batch_size * seq_length * hidden_dim / 4;
|
| 180 |
+
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
|
| 181 |
+
|
| 182 |
+
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
|
| 183 |
+
|
| 184 |
+
fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
__global__ void fused_add3_kernel(float* out,
|
| 188 |
+
const float* inp1,
|
| 189 |
+
const float* inp2,
|
| 190 |
+
const float* inp3,
|
| 191 |
+
int size,
|
| 192 |
+
int row_stride)
|
| 193 |
+
{
|
| 194 |
+
int row = blockIdx.x;
|
| 195 |
+
int id = threadIdx.x;
|
| 196 |
+
|
| 197 |
+
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
|
| 198 |
+
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
|
| 199 |
+
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
|
| 200 |
+
|
| 201 |
+
float4* out_4 = reinterpret_cast<float4*>(out);
|
| 202 |
+
|
| 203 |
+
float4 val;
|
| 204 |
+
float4 inp1_reg = inp1_4[row * row_stride + id];
|
| 205 |
+
float4 inp2_reg = inp2_4[row * row_stride + id];
|
| 206 |
+
float4 inp3_reg = inp3_4[row * row_stride + id];
|
| 207 |
+
|
| 208 |
+
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x;
|
| 209 |
+
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y;
|
| 210 |
+
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z;
|
| 211 |
+
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w;
|
| 212 |
+
|
| 213 |
+
out_4[row * row_stride + id] = val;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
__global__ void fused_add3_kernel(__half* out,
|
| 217 |
+
const __half* inp1,
|
| 218 |
+
const __half* inp2,
|
| 219 |
+
const __half* inp3,
|
| 220 |
+
int size,
|
| 221 |
+
int row_stride)
|
| 222 |
+
{
|
| 223 |
+
int row = blockIdx.x;
|
| 224 |
+
int id = threadIdx.x;
|
| 225 |
+
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
|
| 226 |
+
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
|
| 227 |
+
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
|
| 228 |
+
|
| 229 |
+
float2 inp1_4 = inp1_arr[row * row_stride + id];
|
| 230 |
+
float2 inp2_4 = inp2_arr[row * row_stride + id];
|
| 231 |
+
float2 inp3_4 = inp3_arr[row * row_stride + id];
|
| 232 |
+
|
| 233 |
+
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
|
| 234 |
+
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
|
| 235 |
+
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
|
| 236 |
+
|
| 237 |
+
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
|
| 238 |
+
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
|
| 239 |
+
|
| 240 |
+
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
|
| 241 |
+
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
|
| 242 |
+
|
| 243 |
+
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
|
| 244 |
+
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
|
| 245 |
+
|
| 246 |
+
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x);
|
| 247 |
+
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y);
|
| 248 |
+
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x);
|
| 249 |
+
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y);
|
| 250 |
+
|
| 251 |
+
float2 val_f;
|
| 252 |
+
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
|
| 253 |
+
|
| 254 |
+
val_h[0] = __float22half2_rn(inp1_h_f_0);
|
| 255 |
+
val_h[1] = __float22half2_rn(inp1_h_f_1);
|
| 256 |
+
|
| 257 |
+
float2* out_4 = reinterpret_cast<float2*>(out);
|
| 258 |
+
out_4[row * row_stride + id] = val_f;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
template <>
|
| 262 |
+
void launch_fused_add3<float>(float* out,
|
| 263 |
+
const float* inp1,
|
| 264 |
+
const float* inp2,
|
| 265 |
+
const float* inp3,
|
| 266 |
+
int batch_size,
|
| 267 |
+
int seq_length,
|
| 268 |
+
int hidden_size,
|
| 269 |
+
cudaStream_t& stream)
|
| 270 |
+
{
|
| 271 |
+
dim3 grid_dim(batch_size * seq_length);
|
| 272 |
+
|
| 273 |
+
dim3 block_dim(hidden_size / 4);
|
| 274 |
+
|
| 275 |
+
fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 276 |
+
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
template <>
|
| 280 |
+
void launch_fused_add3<__half>(__half* out,
|
| 281 |
+
const __half* inp1,
|
| 282 |
+
const __half* inp2,
|
| 283 |
+
const __half* inp3,
|
| 284 |
+
int batch_size,
|
| 285 |
+
int seq_length,
|
| 286 |
+
int hidden_size,
|
| 287 |
+
cudaStream_t& stream)
|
| 288 |
+
{
|
| 289 |
+
dim3 grid_dim(batch_size * seq_length);
|
| 290 |
+
|
| 291 |
+
dim3 block_dim(hidden_size / 4);
|
| 292 |
+
|
| 293 |
+
fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 294 |
+
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
__global__ void fused_add4_kernel(float* out,
|
| 298 |
+
const float* inp1,
|
| 299 |
+
const float* inp2,
|
| 300 |
+
const float* inp3,
|
| 301 |
+
const float* inp4,
|
| 302 |
+
int size,
|
| 303 |
+
int row_stride)
|
| 304 |
+
{
|
| 305 |
+
int row = blockIdx.x;
|
| 306 |
+
int id = threadIdx.x;
|
| 307 |
+
|
| 308 |
+
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
|
| 309 |
+
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
|
| 310 |
+
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
|
| 311 |
+
const float4* inp4_4 = reinterpret_cast<const float4*>(inp4);
|
| 312 |
+
float4* out_4 = reinterpret_cast<float4*>(out);
|
| 313 |
+
|
| 314 |
+
float4 val;
|
| 315 |
+
float4 inp1_reg = inp1_4[row * row_stride + id];
|
| 316 |
+
float4 inp2_reg = inp2_4[row * row_stride + id];
|
| 317 |
+
float4 inp3_reg = inp3_4[row * row_stride + id];
|
| 318 |
+
float4 inp4_reg = inp4_4[row * row_stride + id];
|
| 319 |
+
|
| 320 |
+
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x;
|
| 321 |
+
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y;
|
| 322 |
+
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z;
|
| 323 |
+
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w;
|
| 324 |
+
|
| 325 |
+
out_4[row * row_stride + id] = val;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
__global__ void fused_add4_kernel(__half* out,
|
| 329 |
+
const __half* inp1,
|
| 330 |
+
const __half* inp2,
|
| 331 |
+
const __half* inp3,
|
| 332 |
+
const __half* inp4,
|
| 333 |
+
int size,
|
| 334 |
+
int row_stride)
|
| 335 |
+
{
|
| 336 |
+
int row = blockIdx.x;
|
| 337 |
+
int id = threadIdx.x;
|
| 338 |
+
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
|
| 339 |
+
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
|
| 340 |
+
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
|
| 341 |
+
const float2* inp4_arr = reinterpret_cast<const float2*>(inp4);
|
| 342 |
+
|
| 343 |
+
float2 inp1_4 = inp1_arr[row * row_stride + id];
|
| 344 |
+
float2 inp2_4 = inp2_arr[row * row_stride + id];
|
| 345 |
+
float2 inp3_4 = inp3_arr[row * row_stride + id];
|
| 346 |
+
float2 inp4_4 = inp4_arr[row * row_stride + id];
|
| 347 |
+
|
| 348 |
+
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
|
| 349 |
+
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
|
| 350 |
+
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
|
| 351 |
+
__half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4);
|
| 352 |
+
|
| 353 |
+
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
|
| 354 |
+
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
|
| 355 |
+
|
| 356 |
+
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
|
| 357 |
+
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
|
| 358 |
+
|
| 359 |
+
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
|
| 360 |
+
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
|
| 361 |
+
|
| 362 |
+
float2 inp4_h_f_0 = __half22float2(inp4_h[0]);
|
| 363 |
+
float2 inp4_h_f_1 = __half22float2(inp4_h[1]);
|
| 364 |
+
|
| 365 |
+
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x);
|
| 366 |
+
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y);
|
| 367 |
+
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x);
|
| 368 |
+
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y);
|
| 369 |
+
|
| 370 |
+
float2 val_f;
|
| 371 |
+
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
|
| 372 |
+
|
| 373 |
+
val_h[0] = __float22half2_rn(inp1_h_f_0);
|
| 374 |
+
val_h[1] = __float22half2_rn(inp1_h_f_1);
|
| 375 |
+
|
| 376 |
+
float2* out_4 = reinterpret_cast<float2*>(out);
|
| 377 |
+
out_4[row * row_stride + id] = val_f;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
template <>
|
| 381 |
+
void launch_fused_add4<float>(float* out,
|
| 382 |
+
const float* inp1,
|
| 383 |
+
const float* inp2,
|
| 384 |
+
const float* inp3,
|
| 385 |
+
const float* inp4,
|
| 386 |
+
int batch_size,
|
| 387 |
+
int seq_length,
|
| 388 |
+
int hidden_size,
|
| 389 |
+
cudaStream_t& stream)
|
| 390 |
+
{
|
| 391 |
+
dim3 grid_dim(batch_size * seq_length);
|
| 392 |
+
|
| 393 |
+
dim3 block_dim(hidden_size / 4);
|
| 394 |
+
|
| 395 |
+
fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 396 |
+
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
template <>
|
| 400 |
+
void launch_fused_add4<__half>(__half* out,
|
| 401 |
+
const __half* inp1,
|
| 402 |
+
const __half* inp2,
|
| 403 |
+
const __half* inp3,
|
| 404 |
+
const __half* inp4,
|
| 405 |
+
int batch_size,
|
| 406 |
+
int seq_length,
|
| 407 |
+
int hidden_size,
|
| 408 |
+
cudaStream_t& stream)
|
| 409 |
+
{
|
| 410 |
+
dim3 grid_dim(batch_size * seq_length);
|
| 411 |
+
|
| 412 |
+
dim3 block_dim(hidden_size / 4);
|
| 413 |
+
|
| 414 |
+
fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 415 |
+
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
|
| 416 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 8 |
+
#include "hip/hip_cooperative_groups.h"
|
| 9 |
+
#else
|
| 10 |
+
#include "cooperative_groups.h"
|
| 11 |
+
#endif
|
| 12 |
+
#include "ds_kernel_utils.h"
|
| 13 |
+
#include "inference_cuda_layers.h"
|
| 14 |
+
#include "memory_access_utils.h"
|
| 15 |
+
|
| 16 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 17 |
+
#include <cuda_profiler_api.h>
|
| 18 |
+
#endif
|
| 19 |
+
|
| 20 |
+
namespace cg = cooperative_groups;
|
| 21 |
+
|
| 22 |
+
namespace rot_half {
|
| 23 |
+
constexpr int threads = 256;
|
| 24 |
+
} // namespace rot_half
|
| 25 |
+
|
| 26 |
+
template <typename T, int threadsPerHead, int granularity>
|
| 27 |
+
__global__ void apply_rotary_pos_half(T* mixed_query,
|
| 28 |
+
T* key_layer,
|
| 29 |
+
unsigned rotary_dim,
|
| 30 |
+
unsigned seq_len,
|
| 31 |
+
unsigned seq_offset,
|
| 32 |
+
unsigned num_heads,
|
| 33 |
+
unsigned head_size,
|
| 34 |
+
unsigned total_count,
|
| 35 |
+
float rope_theta,
|
| 36 |
+
int max_out_tokens)
|
| 37 |
+
{
|
| 38 |
+
constexpr int T_per_thread = granularity / sizeof(T);
|
| 39 |
+
constexpr int heads_per_block = rot_half::threads / threadsPerHead;
|
| 40 |
+
|
| 41 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 42 |
+
cg::thread_block_tile<threadsPerHead> head_group = cg::tiled_partition<threadsPerHead>(tb);
|
| 43 |
+
|
| 44 |
+
const int head_idx = blockIdx.x * heads_per_block + threadIdx.x / threadsPerHead;
|
| 45 |
+
const int cur_seq_idx = head_idx % seq_len;
|
| 46 |
+
const int offset = head_idx * head_size;
|
| 47 |
+
const int k_offset = (cur_seq_idx + (head_idx / seq_len) * max_out_tokens) * head_size;
|
| 48 |
+
|
| 49 |
+
const int seq_idx = cur_seq_idx + seq_offset;
|
| 50 |
+
const int half_dim = rotary_dim >> 1;
|
| 51 |
+
const int half_dim_threads = half_dim / T_per_thread;
|
| 52 |
+
|
| 53 |
+
if (head_idx < total_count) {
|
| 54 |
+
const int base_neuron_idx = head_group.thread_rank() * T_per_thread;
|
| 55 |
+
|
| 56 |
+
T q[T_per_thread], k[T_per_thread];
|
| 57 |
+
mem_access::load_global<granularity>(q, mixed_query + offset + base_neuron_idx);
|
| 58 |
+
mem_access::load_global<granularity>(k, key_layer + k_offset + base_neuron_idx);
|
| 59 |
+
|
| 60 |
+
#pragma unroll
|
| 61 |
+
for (int i = 0; i < T_per_thread; i++) {
|
| 62 |
+
const int neuron_idx = base_neuron_idx + i;
|
| 63 |
+
if (neuron_idx < rotary_dim) {
|
| 64 |
+
float inv_freq = (float)((neuron_idx % half_dim) * 2) / (float)rotary_dim;
|
| 65 |
+
inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_idx;
|
| 66 |
+
|
| 67 |
+
float rotary_sign = (neuron_idx > (half_dim - 1) ? -1.0 : 1.0);
|
| 68 |
+
float q_rot = conversion::to<float>(q[i]) * rotary_sign;
|
| 69 |
+
float k_rot = conversion::to<float>(k[i]) * rotary_sign;
|
| 70 |
+
|
| 71 |
+
const int target_lane = (neuron_idx < half_dim)
|
| 72 |
+
? head_group.thread_rank() + half_dim_threads
|
| 73 |
+
: head_group.thread_rank() - half_dim_threads;
|
| 74 |
+
|
| 75 |
+
const float q_rot_temp = head_group.shfl(q_rot, target_lane);
|
| 76 |
+
const float k_rot_temp = head_group.shfl(k_rot, target_lane);
|
| 77 |
+
|
| 78 |
+
q[i] = conversion::to<T>(conversion::to<float>(q[i]) * cosf(inv_freq) +
|
| 79 |
+
q_rot_temp * sinf(inv_freq));
|
| 80 |
+
k[i] = conversion::to<T>(conversion::to<float>(k[i]) * cosf(inv_freq) +
|
| 81 |
+
k_rot_temp * sinf(inv_freq));
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
mem_access::store_global<granularity>(mixed_query + offset + base_neuron_idx, q);
|
| 86 |
+
mem_access::store_global<granularity>(key_layer + k_offset + base_neuron_idx, k);
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
#define LAUNCH_ROT_POS_EMB_HALF(HEAD_THREADS, ALIGNMENT) \
|
| 91 |
+
apply_rotary_pos_half<T, HEAD_THREADS, ALIGNMENT><<<grid, block, 0, stream>>>(mixed_query, \
|
| 92 |
+
key_layer, \
|
| 93 |
+
rotary_dim, \
|
| 94 |
+
seq_len, \
|
| 95 |
+
offset, \
|
| 96 |
+
num_heads, \
|
| 97 |
+
head_size, \
|
| 98 |
+
total_count, \
|
| 99 |
+
rope_theta, \
|
| 100 |
+
max_out_tokens);
|
| 101 |
+
|
| 102 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 103 |
+
#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \
|
| 104 |
+
if (threads_per_head == 4) { \
|
| 105 |
+
LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \
|
| 106 |
+
} else if (threads_per_head == 8) { \
|
| 107 |
+
LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \
|
| 108 |
+
} else if (threads_per_head == 16) { \
|
| 109 |
+
LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \
|
| 110 |
+
} else if (threads_per_head == 32) { \
|
| 111 |
+
LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \
|
| 112 |
+
} else if (threads_per_head == 64) { \
|
| 113 |
+
LAUNCH_ROT_POS_EMB_HALF(64, ALIGNMENT); \
|
| 114 |
+
} else { \
|
| 115 |
+
assert(false); \
|
| 116 |
+
}
|
| 117 |
+
#else
|
| 118 |
+
#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \
|
| 119 |
+
if (threads_per_head == 4) { \
|
| 120 |
+
LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \
|
| 121 |
+
} else if (threads_per_head == 8) { \
|
| 122 |
+
LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \
|
| 123 |
+
} else if (threads_per_head == 16) { \
|
| 124 |
+
LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \
|
| 125 |
+
} else if (threads_per_head == 32) { \
|
| 126 |
+
LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \
|
| 127 |
+
} else { \
|
| 128 |
+
assert(false); \
|
| 129 |
+
}
|
| 130 |
+
#endif
|
| 131 |
+
|
| 132 |
+
template <typename T>
|
| 133 |
+
void launch_apply_rotary_pos_emb(T* mixed_query,
|
| 134 |
+
T* key_layer,
|
| 135 |
+
unsigned head_size,
|
| 136 |
+
unsigned seq_len,
|
| 137 |
+
unsigned rotary_dim,
|
| 138 |
+
unsigned offset,
|
| 139 |
+
unsigned num_heads,
|
| 140 |
+
unsigned batch,
|
| 141 |
+
float rope_theta,
|
| 142 |
+
cudaStream_t stream,
|
| 143 |
+
int max_out_tokens)
|
| 144 |
+
{
|
| 145 |
+
const int half_dim = rotary_dim >> 1;
|
| 146 |
+
|
| 147 |
+
int alignment = sizeof(T);
|
| 148 |
+
if (half_dim % (16 / sizeof(T)) == 0) {
|
| 149 |
+
alignment = 16;
|
| 150 |
+
} else if (half_dim % (8 / sizeof(T)) == 0) {
|
| 151 |
+
alignment = 8;
|
| 152 |
+
} else if (half_dim % (4 / sizeof(T)) == 0) {
|
| 153 |
+
alignment = 4;
|
| 154 |
+
} else {
|
| 155 |
+
assert(false);
|
| 156 |
+
}
|
| 157 |
+
const int T_per_elem = alignment / sizeof(T);
|
| 158 |
+
|
| 159 |
+
int total_count = batch * num_heads * seq_len;
|
| 160 |
+
|
| 161 |
+
const int padded_head_size = next_pow2(head_size);
|
| 162 |
+
|
| 163 |
+
assert(padded_head_size <= hw_warp_size * T_per_elem);
|
| 164 |
+
|
| 165 |
+
const int threads_per_head = padded_head_size / T_per_elem;
|
| 166 |
+
const int heads_per_block = rot_half::threads / threads_per_head;
|
| 167 |
+
|
| 168 |
+
dim3 block(rot_half::threads);
|
| 169 |
+
dim3 grid((total_count + heads_per_block - 1) / heads_per_block);
|
| 170 |
+
|
| 171 |
+
if (alignment == 4) {
|
| 172 |
+
LAUNCH_FOR_ALIGNMENT(4);
|
| 173 |
+
} else if (alignment == 8) {
|
| 174 |
+
LAUNCH_FOR_ALIGNMENT(8);
|
| 175 |
+
} else if (alignment == 16) {
|
| 176 |
+
LAUNCH_FOR_ALIGNMENT(16);
|
| 177 |
+
} else {
|
| 178 |
+
assert(false);
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
#define INSTANTIATE_LAUNCH_ROTARY_POS_EMB(T) \
|
| 183 |
+
template void launch_apply_rotary_pos_emb<T>(T*, \
|
| 184 |
+
T*, \
|
| 185 |
+
unsigned, \
|
| 186 |
+
unsigned, \
|
| 187 |
+
unsigned, \
|
| 188 |
+
unsigned, \
|
| 189 |
+
unsigned, \
|
| 190 |
+
unsigned, \
|
| 191 |
+
float, \
|
| 192 |
+
cudaStream_t, \
|
| 193 |
+
int);
|
| 194 |
+
|
| 195 |
+
INSTANTIATE_LAUNCH_ROTARY_POS_EMB(float);
|
| 196 |
+
#ifdef BF16_AVAILABLE
|
| 197 |
+
INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__nv_bfloat16);
|
| 198 |
+
#endif
|
| 199 |
+
INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__half);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "inference_cuda_layers.h"
|
| 8 |
+
|
| 9 |
+
#define MAX_QUANTIZE_GROUPING 1024
|
| 10 |
+
|
| 11 |
+
#define loop_unroll 1
|
| 12 |
+
#define loop_unroll_bits 1
|
| 13 |
+
|
| 14 |
+
template <typename T>
|
| 15 |
+
__global__ void dequantize_kernel(T* output,
|
| 16 |
+
const int8_t* input,
|
| 17 |
+
const float* qscale,
|
| 18 |
+
int output_size,
|
| 19 |
+
int hidden_dim,
|
| 20 |
+
int groups,
|
| 21 |
+
int merge_count)
|
| 22 |
+
{
|
| 23 |
+
unsigned merge_hidden = hidden_dim >> merge_count;
|
| 24 |
+
unsigned quantization_stride = (merge_hidden * output_size) / groups;
|
| 25 |
+
|
| 26 |
+
unsigned bid = blockIdx.x;
|
| 27 |
+
unsigned tid = threadIdx.x;
|
| 28 |
+
|
| 29 |
+
while (tid < output_size) {
|
| 30 |
+
unsigned w_index = bid / merge_hidden;
|
| 31 |
+
unsigned q_index = tid + bid * output_size;
|
| 32 |
+
|
| 33 |
+
auto q = input[q_index];
|
| 34 |
+
|
| 35 |
+
unsigned merge_hidden_total = w_index * merge_hidden;
|
| 36 |
+
unsigned scale_index =
|
| 37 |
+
((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride)
|
| 38 |
+
<< merge_count) +
|
| 39 |
+
w_index;
|
| 40 |
+
|
| 41 |
+
float scale_data = qscale[scale_index];
|
| 42 |
+
|
| 43 |
+
output[q_index] = conversion::to<T>(scale_data * (float)q);
|
| 44 |
+
tid += blockDim.x;
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename T>
|
| 49 |
+
void launch_dequantize(T* output,
|
| 50 |
+
const int8_t* input,
|
| 51 |
+
const float* qscale,
|
| 52 |
+
unsigned output_size,
|
| 53 |
+
unsigned hidden_dim,
|
| 54 |
+
unsigned groups,
|
| 55 |
+
unsigned merge_count,
|
| 56 |
+
cudaStream_t stream)
|
| 57 |
+
{
|
| 58 |
+
unsigned threads = 1024;
|
| 59 |
+
dim3 block_dims(threads);
|
| 60 |
+
dim3 grid_dims(hidden_dim);
|
| 61 |
+
|
| 62 |
+
dequantize_kernel<<<grid_dims, block_dims, 0, stream>>>(
|
| 63 |
+
output, input, qscale, output_size, hidden_dim, groups, merge_count);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
#define INSTANTIATE_DEQUANTIZE_MERGE(T) \
|
| 67 |
+
template void launch_dequantize<T>( \
|
| 68 |
+
T*, const int8_t*, const float*, unsigned, unsigned, unsigned, unsigned, cudaStream_t);
|
| 69 |
+
|
| 70 |
+
INSTANTIATE_DEQUANTIZE_MERGE(float);
|
| 71 |
+
#ifdef BF16_AVAILABLE
|
| 72 |
+
INSTANTIATE_DEQUANTIZE_MERGE(__nv_bfloat16);
|
| 73 |
+
#endif
|
| 74 |
+
INSTANTIATE_DEQUANTIZE_MERGE(__half);
|
| 75 |
+
|
| 76 |
+
__global__ void dequantize_kernel(float* output,
|
| 77 |
+
const int8_t* input,
|
| 78 |
+
const float* qscale,
|
| 79 |
+
int hidden_dim,
|
| 80 |
+
unsigned merge_hidden,
|
| 81 |
+
int cnt)
|
| 82 |
+
{
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
template <typename T>
|
| 86 |
+
__global__ void dequantize_kernel(T* output,
|
| 87 |
+
const int8_t* input,
|
| 88 |
+
const float* qscale,
|
| 89 |
+
unsigned hidden_dim,
|
| 90 |
+
unsigned merge_hidden,
|
| 91 |
+
int cnt)
|
| 92 |
+
{
|
| 93 |
+
unsigned bid = blockIdx.x * gridDim.y + blockIdx.y;
|
| 94 |
+
unsigned tid = threadIdx.x;
|
| 95 |
+
|
| 96 |
+
float local_scale = qscale[blockIdx.x];
|
| 97 |
+
|
| 98 |
+
const float* input_cast = reinterpret_cast<const float*>(input);
|
| 99 |
+
float2* output_cast = reinterpret_cast<float2*>(output);
|
| 100 |
+
|
| 101 |
+
input_cast += bid * merge_hidden;
|
| 102 |
+
output_cast += bid * merge_hidden;
|
| 103 |
+
|
| 104 |
+
for (int c = 0; c < cnt; c++) {
|
| 105 |
+
if (tid < merge_hidden) {
|
| 106 |
+
float q = input_cast[tid];
|
| 107 |
+
int8_t* q_int8 = (int8_t*)&q;
|
| 108 |
+
|
| 109 |
+
float2 q_f;
|
| 110 |
+
T* q_h = (T*)&q_f;
|
| 111 |
+
|
| 112 |
+
q_h[0] = conversion::to<T>(local_scale * (float)q_int8[0]);
|
| 113 |
+
q_h[1] = conversion::to<T>(local_scale * (float)q_int8[1]);
|
| 114 |
+
q_h[2] = conversion::to<T>(local_scale * (float)q_int8[2]);
|
| 115 |
+
q_h[3] = conversion::to<T>(local_scale * (float)q_int8[3]);
|
| 116 |
+
output_cast[tid] = q_f;
|
| 117 |
+
tid += blockDim.x;
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
template <typename T>
|
| 123 |
+
void launch_dequantize(T* output,
|
| 124 |
+
const int8_t* input,
|
| 125 |
+
const float* qscale,
|
| 126 |
+
unsigned output_size,
|
| 127 |
+
unsigned hidden_dim,
|
| 128 |
+
unsigned groups,
|
| 129 |
+
cudaStream_t stream)
|
| 130 |
+
{
|
| 131 |
+
unsigned threads = 1024;
|
| 132 |
+
hidden_dim /= 4;
|
| 133 |
+
unsigned thd_cnt = (hidden_dim - 1) / threads + 1;
|
| 134 |
+
|
| 135 |
+
assert(output_size % groups == 0);
|
| 136 |
+
unsigned blocks = output_size / groups;
|
| 137 |
+
|
| 138 |
+
dim3 block_dims(threads);
|
| 139 |
+
dim3 grid_dims(groups, blocks);
|
| 140 |
+
|
| 141 |
+
dequantize_kernel<<<grid_dims, block_dims, 0, stream>>>(
|
| 142 |
+
output, input, qscale, hidden_dim, hidden_dim, thd_cnt);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
#define INSTANTIATE_DEQUANTIZE_NO_MERGE(T) \
|
| 146 |
+
template void launch_dequantize<T>( \
|
| 147 |
+
T*, const int8_t*, const float*, unsigned, unsigned, unsigned, cudaStream_t);
|
| 148 |
+
|
| 149 |
+
INSTANTIATE_DEQUANTIZE_NO_MERGE(float);
|
| 150 |
+
#ifdef BF16_AVAILABLE
|
| 151 |
+
INSTANTIATE_DEQUANTIZE_NO_MERGE(__nv_bfloat16);
|
| 152 |
+
#endif
|
| 153 |
+
INSTANTIATE_DEQUANTIZE_NO_MERGE(__half);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu
ADDED
|
@@ -0,0 +1,710 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "inference_cuda_layers.h"
|
| 8 |
+
#include "memory_access_utils.h"
|
| 9 |
+
|
| 10 |
+
namespace cg = cooperative_groups;
|
| 11 |
+
#define MAX_CAP 4
|
| 12 |
+
#define MAX_SEQ 2048
|
| 13 |
+
|
| 14 |
+
// only used to avoid compilation error due to lack of definition.
|
| 15 |
+
#ifndef BF16_AVAILABLE
|
| 16 |
+
using __nv_bfloat162 = __half2;
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
inline __device__ float gelu(const float x)
|
| 20 |
+
{
|
| 21 |
+
constexpr float sqrt_param = 0.79788456080286535587989211986876f;
|
| 22 |
+
constexpr float mul_param = 0.044715;
|
| 23 |
+
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/*
|
| 27 |
+
In-place gelu(biasAdd(x)) for channels last
|
| 28 |
+
*/
|
| 29 |
+
template <typename T>
|
| 30 |
+
__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size)
|
| 31 |
+
{
|
| 32 |
+
// Input restriction: intermediate_size % vals_per_access == 0
|
| 33 |
+
constexpr int granularity = 16;
|
| 34 |
+
constexpr int values_per_access = granularity / sizeof(T);
|
| 35 |
+
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
|
| 36 |
+
|
| 37 |
+
if (offset < total_count) {
|
| 38 |
+
T data[values_per_access];
|
| 39 |
+
T data_bias[values_per_access];
|
| 40 |
+
mem_access::load_global<granularity>(data, input + offset);
|
| 41 |
+
mem_access::load_global<granularity>(
|
| 42 |
+
data_bias, bias + (offset % intermediate_size), bias != nullptr);
|
| 43 |
+
|
| 44 |
+
#pragma unroll
|
| 45 |
+
for (int i = 0; i < values_per_access; i++) {
|
| 46 |
+
float data_f = conversion::to<float>(data[i]);
|
| 47 |
+
float bias_f = conversion::to<float>(data_bias[i]);
|
| 48 |
+
data[i] = conversion::to<T>(gelu(data_f + bias_f));
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
mem_access::store_global<granularity>(input + offset, data);
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
template <typename T>
|
| 56 |
+
void launch_bias_gelu(T* input,
|
| 57 |
+
const T* bias,
|
| 58 |
+
int intermediate_size,
|
| 59 |
+
int batch_size,
|
| 60 |
+
cudaStream_t stream)
|
| 61 |
+
{
|
| 62 |
+
constexpr int threads = 1024;
|
| 63 |
+
constexpr int granularity = 16;
|
| 64 |
+
|
| 65 |
+
const int total_count = batch_size * intermediate_size;
|
| 66 |
+
const int elems_per_block = threads * (granularity / sizeof(T));
|
| 67 |
+
dim3 block_dims(threads);
|
| 68 |
+
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
|
| 69 |
+
|
| 70 |
+
fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>(
|
| 71 |
+
input, bias, total_count, intermediate_size);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
#define INSTANTIATE_LAUNCH_BIAS_GELU(T) \
|
| 75 |
+
template void launch_bias_gelu<T>(T*, const T*, int, int, cudaStream_t);
|
| 76 |
+
|
| 77 |
+
INSTANTIATE_LAUNCH_BIAS_GELU(float)
|
| 78 |
+
#ifdef BF16_AVAILABLE
|
| 79 |
+
INSTANTIATE_LAUNCH_BIAS_GELU(__nv_bfloat16)
|
| 80 |
+
#endif
|
| 81 |
+
INSTANTIATE_LAUNCH_BIAS_GELU(__half)
|
| 82 |
+
|
| 83 |
+
/*
|
| 84 |
+
In-place channels-last bias add
|
| 85 |
+
*/
|
| 86 |
+
template <typename T>
|
| 87 |
+
__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size)
|
| 88 |
+
{
|
| 89 |
+
// Input restriction: intermediate_size % vals_per_access == 0
|
| 90 |
+
constexpr int granularity = 16;
|
| 91 |
+
constexpr int values_per_access = granularity / sizeof(T);
|
| 92 |
+
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
|
| 93 |
+
|
| 94 |
+
if (offset < total_count) {
|
| 95 |
+
T data[values_per_access];
|
| 96 |
+
T data_bias[values_per_access];
|
| 97 |
+
mem_access::load_global<granularity>(data, input + offset);
|
| 98 |
+
mem_access::load_global<granularity>(
|
| 99 |
+
data_bias, bias + (offset % intermediate_size), bias != nullptr);
|
| 100 |
+
|
| 101 |
+
#pragma unroll
|
| 102 |
+
for (int i = 0; i < values_per_access; i++) {
|
| 103 |
+
float data_f = conversion::to<float>(data[i]);
|
| 104 |
+
float bias_f = conversion::to<float>(data_bias[i]);
|
| 105 |
+
data[i] = conversion::to<T>(data_f + bias_f);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
mem_access::store_global<granularity>(input + offset, data);
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template <typename T>
|
| 113 |
+
void launch_bias_add(T* input,
|
| 114 |
+
const T* bias,
|
| 115 |
+
int intermediate_size,
|
| 116 |
+
int batch_size,
|
| 117 |
+
cudaStream_t stream)
|
| 118 |
+
{
|
| 119 |
+
constexpr int threads = 1024;
|
| 120 |
+
constexpr int granularity = 16;
|
| 121 |
+
|
| 122 |
+
const int total_count = batch_size * intermediate_size;
|
| 123 |
+
const int elems_per_block = threads * (granularity / sizeof(T));
|
| 124 |
+
dim3 block_dims(threads);
|
| 125 |
+
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
|
| 126 |
+
|
| 127 |
+
fused_bias_add<<<grid_dims, block_dims, 0, stream>>>(
|
| 128 |
+
input, bias, total_count, intermediate_size);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
#define INSTANTIATE_LAUNCH_BIAS_ADD(T) \
|
| 132 |
+
template void launch_bias_add<T>(T*, const T*, int, int, cudaStream_t);
|
| 133 |
+
|
| 134 |
+
INSTANTIATE_LAUNCH_BIAS_ADD(float)
|
| 135 |
+
#ifdef BF16_AVAILABLE
|
| 136 |
+
INSTANTIATE_LAUNCH_BIAS_ADD(__nv_bfloat16)
|
| 137 |
+
#endif
|
| 138 |
+
INSTANTIATE_LAUNCH_BIAS_ADD(__half)
|
| 139 |
+
|
| 140 |
+
__global__ void fused_bias_residual(float* residual,
|
| 141 |
+
const float* hidden_state,
|
| 142 |
+
const float* attn,
|
| 143 |
+
const float* bias,
|
| 144 |
+
const float* attn_bias,
|
| 145 |
+
const int total_count,
|
| 146 |
+
const int intermediate_size,
|
| 147 |
+
const float mp_scale,
|
| 148 |
+
const bool preln)
|
| 149 |
+
{
|
| 150 |
+
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
|
| 151 |
+
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
|
| 152 |
+
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
|
| 153 |
+
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
|
| 154 |
+
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
|
| 155 |
+
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
|
| 156 |
+
|
| 157 |
+
if (offset < total_count) {
|
| 158 |
+
float4 res_fl4 = res_fl4_ptr[offset];
|
| 159 |
+
const float4 hs_fl4 = hs_fl4_ptr[offset];
|
| 160 |
+
const float4 attn_fl4 = attn_fl4_ptr[offset];
|
| 161 |
+
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
|
| 162 |
+
const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
|
| 163 |
+
if (preln) {
|
| 164 |
+
// residual = (residual + attention + bias + attention_bias) *
|
| 165 |
+
// mp_scale + hidden_state
|
| 166 |
+
res_fl4.x =
|
| 167 |
+
(res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x);
|
| 168 |
+
res_fl4.y =
|
| 169 |
+
(res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y);
|
| 170 |
+
res_fl4.z =
|
| 171 |
+
(res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z);
|
| 172 |
+
res_fl4.w =
|
| 173 |
+
(res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w);
|
| 174 |
+
} else {
|
| 175 |
+
// residual += hidden_state + bias
|
| 176 |
+
res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x;
|
| 177 |
+
res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y;
|
| 178 |
+
res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z;
|
| 179 |
+
res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w;
|
| 180 |
+
}
|
| 181 |
+
res_fl4_ptr[offset] = res_fl4;
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
template <typename T>
|
| 186 |
+
__global__ void fused_bias_residual(T* residual,
|
| 187 |
+
const T* hidden_state,
|
| 188 |
+
const T* attn,
|
| 189 |
+
const T* bias,
|
| 190 |
+
const T* attn_bias,
|
| 191 |
+
const int total_count,
|
| 192 |
+
const int intermediate_size,
|
| 193 |
+
const float mp_scale,
|
| 194 |
+
const bool preln)
|
| 195 |
+
{
|
| 196 |
+
using T2 =
|
| 197 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 198 |
+
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
|
| 199 |
+
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
|
| 200 |
+
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
|
| 201 |
+
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
|
| 202 |
+
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
|
| 203 |
+
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
|
| 204 |
+
|
| 205 |
+
if (offset < total_count) {
|
| 206 |
+
float2 res_fl2 = res_fl2_ptr[offset];
|
| 207 |
+
const float2 hs_fl2 = hs_fl2_ptr[offset];
|
| 208 |
+
const float2 attn_fl2 = attn_fl2_ptr[offset];
|
| 209 |
+
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
|
| 210 |
+
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
|
| 211 |
+
|
| 212 |
+
T2* res_half2 = reinterpret_cast<T2*>(&res_fl2);
|
| 213 |
+
const T2* hs_half2 = reinterpret_cast<const T2*>(&hs_fl2);
|
| 214 |
+
const T2* attn_half2 = reinterpret_cast<const T2*>(&attn_fl2);
|
| 215 |
+
const T2* bias_half2 = reinterpret_cast<const T2*>(&bias_fl2);
|
| 216 |
+
const T2* attn_bias_half2 = reinterpret_cast<const T2*>(&attn_bias_fl2);
|
| 217 |
+
|
| 218 |
+
float2 res_low = conversion::to<float2>(res_half2[0]);
|
| 219 |
+
float2 res_high = conversion::to<float2>(res_half2[1]);
|
| 220 |
+
|
| 221 |
+
const float2 hs_low = conversion::to<float2>(hs_half2[0]);
|
| 222 |
+
const float2 hs_high = conversion::to<float2>(hs_half2[1]);
|
| 223 |
+
|
| 224 |
+
const float2 attn_low = conversion::to<float2>(attn_half2[0]);
|
| 225 |
+
const float2 attn_high = conversion::to<float2>(attn_half2[1]);
|
| 226 |
+
|
| 227 |
+
const float2 bias_low = conversion::to<float2>(bias_half2[0]);
|
| 228 |
+
const float2 bias_high = conversion::to<float2>(bias_half2[1]);
|
| 229 |
+
|
| 230 |
+
const float2 attn_bias_low = conversion::to<float2>(attn_bias_half2[0]);
|
| 231 |
+
const float2 attn_bias_high = conversion::to<float2>(attn_bias_half2[1]);
|
| 232 |
+
|
| 233 |
+
if (preln) {
|
| 234 |
+
// residual = (residual + attention + bias + attention_bias) *
|
| 235 |
+
// mp_scale + hidden_state
|
| 236 |
+
res_low.x =
|
| 237 |
+
(res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x;
|
| 238 |
+
res_low.y =
|
| 239 |
+
(res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y;
|
| 240 |
+
res_high.x =
|
| 241 |
+
(res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x;
|
| 242 |
+
res_high.y =
|
| 243 |
+
(res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y;
|
| 244 |
+
} else {
|
| 245 |
+
// residual += hidden_state + bias
|
| 246 |
+
res_low.x = (res_low.x + hs_low.x + bias_low.x);
|
| 247 |
+
res_low.y = (res_low.y + hs_low.y + bias_low.y);
|
| 248 |
+
res_high.x = (res_high.x + hs_high.x + bias_high.x);
|
| 249 |
+
res_high.y = (res_high.y + hs_high.y + bias_high.y);
|
| 250 |
+
}
|
| 251 |
+
res_half2[0] = conversion::to<T2>(res_low);
|
| 252 |
+
res_half2[1] = conversion::to<T2>(res_high);
|
| 253 |
+
|
| 254 |
+
res_fl2_ptr[offset] = res_fl2;
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
template <typename T>
|
| 259 |
+
void launch_bias_residual(T* residual,
|
| 260 |
+
T* hidden_state,
|
| 261 |
+
T* attn,
|
| 262 |
+
T* bias,
|
| 263 |
+
T* attn_bias,
|
| 264 |
+
int batch,
|
| 265 |
+
int hidden_dim,
|
| 266 |
+
int mp_size,
|
| 267 |
+
bool preln,
|
| 268 |
+
cudaStream_t stream)
|
| 269 |
+
{
|
| 270 |
+
int total_count = batch * hidden_dim / 4;
|
| 271 |
+
dim3 block_dims(1024);
|
| 272 |
+
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
|
| 273 |
+
|
| 274 |
+
fused_bias_residual<<<grid_dims, block_dims, 0, stream>>>(residual,
|
| 275 |
+
hidden_state,
|
| 276 |
+
attn,
|
| 277 |
+
bias,
|
| 278 |
+
attn_bias,
|
| 279 |
+
total_count,
|
| 280 |
+
hidden_dim / 4,
|
| 281 |
+
1.0 / mp_size,
|
| 282 |
+
preln);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
#define INSTANTIATE_LAUNCH_BIAS_RESIDUAL(T) \
|
| 286 |
+
template void launch_bias_residual<T>(T*, T*, T*, T*, T*, int, int, int, bool, cudaStream_t);
|
| 287 |
+
|
| 288 |
+
INSTANTIATE_LAUNCH_BIAS_RESIDUAL(float);
|
| 289 |
+
#ifdef BF16_AVAILABLE
|
| 290 |
+
INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__nv_bfloat16);
|
| 291 |
+
#endif
|
| 292 |
+
INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__half);
|
| 293 |
+
|
| 294 |
+
__global__ void gptj_residual_add(float* residual,
|
| 295 |
+
const float* hidden_state,
|
| 296 |
+
const float* attn,
|
| 297 |
+
const float* bias,
|
| 298 |
+
const float* attn_bias,
|
| 299 |
+
const int total_count,
|
| 300 |
+
const int intermediate_size,
|
| 301 |
+
const float mp_scale)
|
| 302 |
+
{
|
| 303 |
+
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
|
| 304 |
+
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
|
| 305 |
+
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
|
| 306 |
+
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
|
| 307 |
+
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
|
| 308 |
+
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
|
| 309 |
+
|
| 310 |
+
if (offset < total_count) {
|
| 311 |
+
float4 res_fl4 = res_fl4_ptr[offset];
|
| 312 |
+
const float4 hs_fl4 = hs_fl4_ptr[offset];
|
| 313 |
+
const float4 attn_fl4 = attn_fl4_ptr[offset];
|
| 314 |
+
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
|
| 315 |
+
|
| 316 |
+
if (attn_bias) {
|
| 317 |
+
float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
|
| 318 |
+
// residual += attention_bias
|
| 319 |
+
res_fl4.x += attn_bias_fl4.x;
|
| 320 |
+
res_fl4.y += attn_bias_fl4.y;
|
| 321 |
+
res_fl4.z += attn_bias_fl4.z;
|
| 322 |
+
res_fl4.w += attn_bias_fl4.w;
|
| 323 |
+
}
|
| 324 |
+
// residual = hidden_state + attention + (residual + bias) * mp_scale
|
| 325 |
+
res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale;
|
| 326 |
+
res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale;
|
| 327 |
+
res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale;
|
| 328 |
+
res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale;
|
| 329 |
+
|
| 330 |
+
res_fl4_ptr[offset] = res_fl4;
|
| 331 |
+
}
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
template <typename T>
|
| 335 |
+
__global__ void gptj_residual_add(T* residual,
|
| 336 |
+
const T* hidden_state,
|
| 337 |
+
const T* attn,
|
| 338 |
+
const T* bias,
|
| 339 |
+
const T* attn_bias,
|
| 340 |
+
const int total_count,
|
| 341 |
+
const int intermediate_size,
|
| 342 |
+
const float mp_scale)
|
| 343 |
+
{
|
| 344 |
+
using T2 =
|
| 345 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 346 |
+
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
|
| 347 |
+
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
|
| 348 |
+
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
|
| 349 |
+
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
|
| 350 |
+
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
|
| 351 |
+
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
|
| 352 |
+
|
| 353 |
+
if (offset < total_count) {
|
| 354 |
+
float2 res_fl2 = res_fl2_ptr[offset];
|
| 355 |
+
const float2 hs_fl2 = hs_fl2_ptr[offset];
|
| 356 |
+
const float2 attn_fl2 = attn_fl2_ptr[offset];
|
| 357 |
+
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
|
| 358 |
+
|
| 359 |
+
T2* res_half2 = reinterpret_cast<T2*>(&res_fl2);
|
| 360 |
+
const T2* hs_half2 = reinterpret_cast<const T2*>(&hs_fl2);
|
| 361 |
+
const T2* attn_half2 = reinterpret_cast<const T2*>(&attn_fl2);
|
| 362 |
+
const T2* bias_half2 = reinterpret_cast<const T2*>(&bias_fl2);
|
| 363 |
+
|
| 364 |
+
float2 res_low = conversion::to<float2>(res_half2[0]);
|
| 365 |
+
float2 res_high = conversion::to<float2>(res_half2[1]);
|
| 366 |
+
|
| 367 |
+
const float2 hs_low = conversion::to<float2>(hs_half2[0]);
|
| 368 |
+
const float2 hs_high = conversion::to<float2>(hs_half2[1]);
|
| 369 |
+
|
| 370 |
+
const float2 attn_low = conversion::to<float2>(attn_half2[0]);
|
| 371 |
+
const float2 attn_high = conversion::to<float2>(attn_half2[1]);
|
| 372 |
+
|
| 373 |
+
const float2 bias_low = conversion::to<float2>(bias_half2[0]);
|
| 374 |
+
const float2 bias_high = conversion::to<float2>(bias_half2[1]);
|
| 375 |
+
|
| 376 |
+
if (attn_bias) {
|
| 377 |
+
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
|
| 378 |
+
const T2* attn_bias_half2 = reinterpret_cast<const T2*>(&attn_bias_fl2);
|
| 379 |
+
const float2 attn_bias_low = conversion::to<float2>(attn_bias_half2[0]);
|
| 380 |
+
const float2 attn_bias_high = conversion::to<float2>(attn_bias_half2[1]);
|
| 381 |
+
// residual += attention_bias
|
| 382 |
+
res_low.x += attn_bias_low.x;
|
| 383 |
+
res_low.y += attn_bias_low.y;
|
| 384 |
+
res_high.x += attn_bias_high.x;
|
| 385 |
+
res_high.y += attn_bias_high.y;
|
| 386 |
+
}
|
| 387 |
+
// residual = hidden_state + attention + (residual + bias) * mp_scale
|
| 388 |
+
res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale;
|
| 389 |
+
res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale;
|
| 390 |
+
res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale;
|
| 391 |
+
res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale;
|
| 392 |
+
|
| 393 |
+
res_half2[0] = conversion::to<T2>(res_low);
|
| 394 |
+
res_half2[1] = conversion::to<T2>(res_high);
|
| 395 |
+
|
| 396 |
+
res_fl2_ptr[offset] = res_fl2;
|
| 397 |
+
}
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
template <typename T>
|
| 401 |
+
void launch_gptj_residual_add(T* residual,
|
| 402 |
+
T* hidden_state,
|
| 403 |
+
T* attn,
|
| 404 |
+
T* bias,
|
| 405 |
+
T* attn_bias,
|
| 406 |
+
int hidden_dim,
|
| 407 |
+
int batch,
|
| 408 |
+
int mp_size,
|
| 409 |
+
cudaStream_t stream)
|
| 410 |
+
{
|
| 411 |
+
int total_count = batch * hidden_dim / 4;
|
| 412 |
+
dim3 block_dims(1024);
|
| 413 |
+
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
|
| 414 |
+
|
| 415 |
+
gptj_residual_add<<<grid_dims, block_dims, 0, stream>>>(
|
| 416 |
+
residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size);
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
#define INSTANTIATE_GPT_RES_ADD(T) \
|
| 420 |
+
template void launch_gptj_residual_add<T>(T*, T*, T*, T*, T*, int, int, int, cudaStream_t);
|
| 421 |
+
|
| 422 |
+
INSTANTIATE_GPT_RES_ADD(float);
|
| 423 |
+
INSTANTIATE_GPT_RES_ADD(__half);
|
| 424 |
+
#ifdef BF16_AVAILABLE
|
| 425 |
+
INSTANTIATE_GPT_RES_ADD(__nv_bfloat16);
|
| 426 |
+
#endif
|
| 427 |
+
|
| 428 |
+
template <typename T>
|
| 429 |
+
__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim)
|
| 430 |
+
{
|
| 431 |
+
constexpr int granularity = 16;
|
| 432 |
+
constexpr int vals_per_access = granularity / sizeof(T);
|
| 433 |
+
|
| 434 |
+
T* residual_seq = residual + blockIdx.x * hidden_dim;
|
| 435 |
+
T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim;
|
| 436 |
+
|
| 437 |
+
for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim;
|
| 438 |
+
tid += blockDim.x * vals_per_access) {
|
| 439 |
+
T mlp[vals_per_access];
|
| 440 |
+
T res[vals_per_access];
|
| 441 |
+
T coef1[vals_per_access];
|
| 442 |
+
T coef2[vals_per_access];
|
| 443 |
+
|
| 444 |
+
mem_access::load_global<granularity>(mlp, mlp_out_seq + tid);
|
| 445 |
+
mem_access::load_global<granularity>(res, residual_seq + tid);
|
| 446 |
+
mem_access::load_global<granularity>(coef1, coef + tid);
|
| 447 |
+
mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim);
|
| 448 |
+
|
| 449 |
+
#pragma unroll
|
| 450 |
+
for (int idx = 0; idx < vals_per_access; idx++) {
|
| 451 |
+
mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx];
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
mem_access::store_global<granularity>(mlp_out_seq + tid, mlp);
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
template <typename T>
|
| 459 |
+
void launch_moe_res_matmul(T* residual,
|
| 460 |
+
T* coef,
|
| 461 |
+
T* mlp_out,
|
| 462 |
+
int seq_len,
|
| 463 |
+
int hidden_dim,
|
| 464 |
+
cudaStream_t stream)
|
| 465 |
+
{
|
| 466 |
+
dim3 grid_dim(seq_len);
|
| 467 |
+
dim3 block_dim(1024);
|
| 468 |
+
moe_res_matmul<<<grid_dim, block_dim, 0, stream>>>(
|
| 469 |
+
residual, coef, mlp_out, seq_len, hidden_dim);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
#define INSTANTIATE_LAUNCH_MOE_RES_MATMUL(T) \
|
| 473 |
+
template void launch_moe_res_matmul<T>(T*, T*, T*, int, int, cudaStream_t);
|
| 474 |
+
|
| 475 |
+
INSTANTIATE_LAUNCH_MOE_RES_MATMUL(float);
|
| 476 |
+
#ifdef BF16_AVAILABLE
|
| 477 |
+
INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__nv_bfloat16);
|
| 478 |
+
#endif
|
| 479 |
+
INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__half);
|
| 480 |
+
|
| 481 |
+
template <typename T>
|
| 482 |
+
__global__ void pad_data_kernel(T* padded_output, T* output, int head_size, int padded_head_size)
|
| 483 |
+
{
|
| 484 |
+
using T2 =
|
| 485 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 486 |
+
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
|
| 487 |
+
float4* output_cast = reinterpret_cast<float4*>(output);
|
| 488 |
+
int bid = blockIdx.x * (blockDim.y) + threadIdx.y;
|
| 489 |
+
int idx = threadIdx.x;
|
| 490 |
+
padded_output_cast += (bid * padded_head_size);
|
| 491 |
+
output_cast += (bid * head_size);
|
| 492 |
+
float4 ZERO;
|
| 493 |
+
const T2 zero_h = conversion::to<T2>(0.f);
|
| 494 |
+
T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
|
| 495 |
+
#pragma unroll
|
| 496 |
+
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
|
| 497 |
+
if (idx < head_size)
|
| 498 |
+
padded_output_cast[idx] = output_cast[idx];
|
| 499 |
+
else
|
| 500 |
+
padded_output_cast[idx] = ZERO;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
__global__ void pad_data_kernel(float* padded_output,
|
| 504 |
+
float* output,
|
| 505 |
+
int head_size,
|
| 506 |
+
int padded_head_size)
|
| 507 |
+
{
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
template <typename T>
|
| 511 |
+
void pad_data(T* padded_output,
|
| 512 |
+
T* output,
|
| 513 |
+
int bsz,
|
| 514 |
+
int head_size,
|
| 515 |
+
int padded_head_size,
|
| 516 |
+
cudaStream_t stream)
|
| 517 |
+
{
|
| 518 |
+
dim3 grid_dim((bsz - 1) / 16 + 1);
|
| 519 |
+
dim3 block_dim(padded_head_size / 8, 16);
|
| 520 |
+
pad_data_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 521 |
+
padded_output, output, head_size / 8, padded_head_size / 8);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
#define INSTANTIATE_PAD_DATA(T) template void pad_data(T*, T*, int, int, int, cudaStream_t stream);
|
| 525 |
+
|
| 526 |
+
INSTANTIATE_PAD_DATA(float);
|
| 527 |
+
INSTANTIATE_PAD_DATA(__half);
|
| 528 |
+
#ifdef BF16_AVAILABLE
|
| 529 |
+
INSTANTIATE_PAD_DATA(__nv_bfloat16);
|
| 530 |
+
#endif
|
| 531 |
+
|
| 532 |
+
template <typename T>
|
| 533 |
+
__global__ void pad_head_seq_kernel(T* padded_output,
|
| 534 |
+
T* output,
|
| 535 |
+
int seq_len,
|
| 536 |
+
int padded_seq_len,
|
| 537 |
+
int head_size,
|
| 538 |
+
int padded_head_size)
|
| 539 |
+
{
|
| 540 |
+
using T2 =
|
| 541 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 542 |
+
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
|
| 543 |
+
float4* output_cast = reinterpret_cast<float4*>(output);
|
| 544 |
+
int bsz = blockIdx.x;
|
| 545 |
+
int bid = blockIdx.y * (blockDim.y) + threadIdx.y;
|
| 546 |
+
int idx = threadIdx.x;
|
| 547 |
+
padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size;
|
| 548 |
+
output_cast += (bsz * seq_len + bid) * head_size;
|
| 549 |
+
float4 ZERO;
|
| 550 |
+
const T2 zero_h = conversion::to<T2>(0.f);
|
| 551 |
+
T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
|
| 552 |
+
#pragma unroll
|
| 553 |
+
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
|
| 554 |
+
|
| 555 |
+
if (idx < head_size && bid < seq_len)
|
| 556 |
+
padded_output_cast[idx] = output_cast[idx];
|
| 557 |
+
else
|
| 558 |
+
padded_output_cast[idx] = ZERO;
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
__global__ void pad_head_seq_kernel(float* padded_output,
|
| 562 |
+
float* output,
|
| 563 |
+
int seq_len,
|
| 564 |
+
int padded_seq_len,
|
| 565 |
+
int head_size,
|
| 566 |
+
int padded_head_size)
|
| 567 |
+
{
|
| 568 |
+
}
|
| 569 |
+
|
| 570 |
+
template <typename T>
|
| 571 |
+
void pad_head_seq(T* padded_output,
|
| 572 |
+
T* output,
|
| 573 |
+
int bsz,
|
| 574 |
+
int seq_len,
|
| 575 |
+
int padded_seq_len,
|
| 576 |
+
int head_size,
|
| 577 |
+
int padded_head_size,
|
| 578 |
+
cudaStream_t stream)
|
| 579 |
+
{
|
| 580 |
+
dim3 grid_dim(bsz, padded_seq_len / 16);
|
| 581 |
+
dim3 block_dim(padded_head_size / 8, 16);
|
| 582 |
+
pad_head_seq_kernel<<<grid_dim, block_dim, 0, stream>>>(
|
| 583 |
+
padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8);
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
#define INSTANTIATE_PAD_HEAD_SEQ(T) \
|
| 587 |
+
template void pad_head_seq<T>(T*, T*, int, int, int, int, int, cudaStream_t);
|
| 588 |
+
|
| 589 |
+
INSTANTIATE_PAD_HEAD_SEQ(__half);
|
| 590 |
+
#ifdef BF16_AVAILABLE
|
| 591 |
+
INSTANTIATE_PAD_HEAD_SEQ(__nv_bfloat16);
|
| 592 |
+
#endif
|
| 593 |
+
INSTANTIATE_PAD_HEAD_SEQ(float);
|
| 594 |
+
|
| 595 |
+
// TODO(cmikeh2): evaluate different GeLU performance
|
| 596 |
+
__device__ __forceinline__ float old_gelu(float val)
|
| 597 |
+
{
|
| 598 |
+
// 1 / sqrt(2)
|
| 599 |
+
constexpr float rsqrt_2 = 0.707106769084930419922;
|
| 600 |
+
return val * 0.5f * (1.0f + erff(val * rsqrt_2));
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
namespace fused_geglu {
|
| 604 |
+
constexpr int threads = 256;
|
| 605 |
+
constexpr int steps = 2;
|
| 606 |
+
constexpr int granularity = 16;
|
| 607 |
+
} // namespace fused_geglu
|
| 608 |
+
|
| 609 |
+
__device__ __forceinline__ float silu(float val) { return val / (1.0f + expf(-val)); }
|
| 610 |
+
|
| 611 |
+
template <typename T, bool useGelu>
|
| 612 |
+
__global__ void fused_gate_activation(T* output,
|
| 613 |
+
const T* activation,
|
| 614 |
+
const T* bias,
|
| 615 |
+
int base_channels,
|
| 616 |
+
int output_stride,
|
| 617 |
+
int total_elems)
|
| 618 |
+
{
|
| 619 |
+
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
|
| 620 |
+
constexpr int T_per_step = T_per_access * fused_geglu::threads;
|
| 621 |
+
constexpr int T_per_block = T_per_step * fused_geglu::steps;
|
| 622 |
+
|
| 623 |
+
const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access;
|
| 624 |
+
|
| 625 |
+
#pragma unroll
|
| 626 |
+
for (int i = 0; i < fused_geglu::steps; i++) {
|
| 627 |
+
T activation_buffer_1[T_per_access];
|
| 628 |
+
T activation_buffer_2[T_per_access];
|
| 629 |
+
T bias_buffer_1[T_per_access];
|
| 630 |
+
T bias_buffer_2[T_per_access];
|
| 631 |
+
|
| 632 |
+
const int iter_id = id + T_per_step * i;
|
| 633 |
+
if (iter_id < total_elems) {
|
| 634 |
+
const int channel_id = iter_id % base_channels;
|
| 635 |
+
const int seq_id = iter_id / base_channels;
|
| 636 |
+
const int seq_offset = seq_id * base_channels * 2;
|
| 637 |
+
|
| 638 |
+
mem_access::load_global<fused_geglu::granularity>(activation_buffer_1,
|
| 639 |
+
activation + seq_offset + channel_id);
|
| 640 |
+
mem_access::load_global<fused_geglu::granularity>(
|
| 641 |
+
activation_buffer_2, activation + seq_offset + channel_id + base_channels);
|
| 642 |
+
mem_access::load_global<fused_geglu::granularity>(
|
| 643 |
+
bias_buffer_1, bias + channel_id, bias != nullptr);
|
| 644 |
+
mem_access::load_global<fused_geglu::granularity>(
|
| 645 |
+
bias_buffer_2, bias + channel_id + base_channels, bias != nullptr);
|
| 646 |
+
|
| 647 |
+
// Since the GeLU is going to happen at float, might as well
|
| 648 |
+
// convert
|
| 649 |
+
#pragma unroll
|
| 650 |
+
for (int v = 0; v < T_per_access; v++) {
|
| 651 |
+
T hidden_state = activation_buffer_1[v] + bias_buffer_1[v];
|
| 652 |
+
T pre_gate = activation_buffer_2[v] + bias_buffer_2[v];
|
| 653 |
+
float pre_gate_f = conversion::to<float>(pre_gate);
|
| 654 |
+
float gate_f = (useGelu) ? old_gelu(pre_gate_f) : silu(pre_gate_f);
|
| 655 |
+
T gate = conversion::to<T>(gate_f);
|
| 656 |
+
activation_buffer_1[v] = hidden_state * gate;
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
mem_access::store_global<fused_geglu::granularity>(
|
| 660 |
+
output + seq_id * output_stride + channel_id, activation_buffer_1);
|
| 661 |
+
}
|
| 662 |
+
}
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
template <typename T>
|
| 666 |
+
void launch_gated_activation(T* output,
|
| 667 |
+
const T* activation,
|
| 668 |
+
const T* bias,
|
| 669 |
+
int rows,
|
| 670 |
+
int output_stride,
|
| 671 |
+
int elems_per_row,
|
| 672 |
+
bool use_gelu,
|
| 673 |
+
cudaStream_t stream)
|
| 674 |
+
{
|
| 675 |
+
/*
|
| 676 |
+
Fused bias GEGLU is a variant of the gated activation functions.
|
| 677 |
+
The input here is a matrix of [batch, seq_len, 2 * intermediate_dim]
|
| 678 |
+
where the second half of the channels act as GeLU gates for the first
|
| 679 |
+
half.
|
| 680 |
+
*/
|
| 681 |
+
|
| 682 |
+
// Re-derive the above figures
|
| 683 |
+
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
|
| 684 |
+
constexpr int T_per_step = T_per_access * fused_geglu::threads;
|
| 685 |
+
constexpr int T_per_block = T_per_step * fused_geglu::steps;
|
| 686 |
+
|
| 687 |
+
const int base_channels = elems_per_row / 2;
|
| 688 |
+
const int total_elems = base_channels * rows;
|
| 689 |
+
|
| 690 |
+
dim3 block(fused_geglu::threads);
|
| 691 |
+
dim3 grid((total_elems + T_per_block - 1) / T_per_block);
|
| 692 |
+
|
| 693 |
+
if (use_gelu) {
|
| 694 |
+
fused_gate_activation<T, true><<<grid, block, 0, stream>>>(
|
| 695 |
+
output, activation, bias, base_channels, output_stride, total_elems);
|
| 696 |
+
} else {
|
| 697 |
+
fused_gate_activation<T, false><<<grid, block, 0, stream>>>(
|
| 698 |
+
output, activation, bias, base_channels, output_stride, total_elems);
|
| 699 |
+
}
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
#define INSTANTIATE_LAUNCH_GATED_ACTIVATION(T) \
|
| 703 |
+
template void launch_gated_activation( \
|
| 704 |
+
T*, const T*, const T*, int, int, int, bool, cudaStream_t);
|
| 705 |
+
|
| 706 |
+
INSTANTIATE_LAUNCH_GATED_ACTIVATION(__half);
|
| 707 |
+
#ifdef BF16_AVAILABLE
|
| 708 |
+
INSTANTIATE_LAUNCH_GATED_ACTIVATION(__nv_bfloat16);
|
| 709 |
+
#endif
|
| 710 |
+
INSTANTIATE_LAUNCH_GATED_ACTIVATION(float);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "ds_kernel_utils.h"
|
| 8 |
+
#include "inference_cuda_layers.h"
|
| 9 |
+
#include "memory_access_utils.h"
|
| 10 |
+
#include "reduction_utils.h"
|
| 11 |
+
|
| 12 |
+
namespace cg = cooperative_groups;
|
| 13 |
+
using rop = reduce::ROpType;
|
| 14 |
+
|
| 15 |
+
namespace ln {
|
| 16 |
+
constexpr int granularity = 16;
|
| 17 |
+
} // namespace ln
|
| 18 |
+
|
| 19 |
+
/*
|
| 20 |
+
Primary layer norm implementation. Assumes elems_per_row % 8
|
| 21 |
+
is equal to 0.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
output: buffer for output data
|
| 25 |
+
vals: buffer for input data
|
| 26 |
+
gamma: gain for normalization
|
| 27 |
+
beta: bias for normalization
|
| 28 |
+
epsilon: numeric stability
|
| 29 |
+
elems_per_row: number of elements each block will normalize
|
| 30 |
+
*/
|
| 31 |
+
template <typename T, int unRoll, int threadsPerGroup, int maxThreads>
|
| 32 |
+
__global__ void fused_ln(T* output,
|
| 33 |
+
const T* vals,
|
| 34 |
+
const T* gamma,
|
| 35 |
+
const T* beta,
|
| 36 |
+
float epsilon,
|
| 37 |
+
int elems_per_row)
|
| 38 |
+
{
|
| 39 |
+
constexpr int T_per_load = ln::granularity / sizeof(T);
|
| 40 |
+
|
| 41 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 42 |
+
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
|
| 43 |
+
|
| 44 |
+
// X-dimension of the block
|
| 45 |
+
const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
|
| 46 |
+
(tb.thread_index().y * elems_per_row);
|
| 47 |
+
const int thread_offset = tb.thread_index().x * T_per_load;
|
| 48 |
+
const int base_offset = block_offset + thread_offset;
|
| 49 |
+
const int stride = blockDim.x * T_per_load;
|
| 50 |
+
|
| 51 |
+
float sum = reduce::init<rop::Add, float>();
|
| 52 |
+
|
| 53 |
+
const T* input_base = vals + base_offset;
|
| 54 |
+
|
| 55 |
+
T local_buffer[unRoll * T_per_load];
|
| 56 |
+
|
| 57 |
+
#pragma unRoll
|
| 58 |
+
for (int i = 0; i < unRoll; i++) {
|
| 59 |
+
T* iteration_buffer = local_buffer + i * T_per_load;
|
| 60 |
+
|
| 61 |
+
mem_access::load_global<ln::granularity>(
|
| 62 |
+
iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
|
| 63 |
+
|
| 64 |
+
#pragma unRoll
|
| 65 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 66 |
+
float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
|
| 67 |
+
sum = reduce::element<rop::Add>(sum, vals_up_cast);
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
|
| 72 |
+
const float mean = sum / elems_per_row;
|
| 73 |
+
|
| 74 |
+
float mean_diff = reduce::init<rop::Add, float>();
|
| 75 |
+
|
| 76 |
+
#pragma unRoll
|
| 77 |
+
for (int i = 0; i < unRoll; i++) {
|
| 78 |
+
#pragma unRoll
|
| 79 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 80 |
+
// Using a 0 value here skews the variance, have to if-guard
|
| 81 |
+
if (thread_offset + i * stride < elems_per_row) {
|
| 82 |
+
float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
|
| 83 |
+
mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
|
| 84 |
+
}
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
|
| 89 |
+
const float variance = mean_diff / elems_per_row;
|
| 90 |
+
const float denom = __frsqrt_rn(variance + epsilon);
|
| 91 |
+
|
| 92 |
+
// const T mean_compute = conversion::to<T>(mean);
|
| 93 |
+
// const T denom_compute = conversion::to<T>(denom);
|
| 94 |
+
|
| 95 |
+
T* block_output = output + block_offset;
|
| 96 |
+
|
| 97 |
+
#pragma unRoll
|
| 98 |
+
for (int i = 0; i < unRoll; i++) {
|
| 99 |
+
T* iteration_buffer = local_buffer + i * T_per_load;
|
| 100 |
+
const int iter_idx = i * stride + thread_offset;
|
| 101 |
+
const bool do_loads = iter_idx < elems_per_row;
|
| 102 |
+
|
| 103 |
+
T gamma_local[T_per_load], beta_local[T_per_load];
|
| 104 |
+
|
| 105 |
+
mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
|
| 106 |
+
mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
|
| 107 |
+
|
| 108 |
+
#pragma unRoll
|
| 109 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 110 |
+
float val = conversion::to<float>(iteration_buffer[j]);
|
| 111 |
+
val = (val - mean) * denom;
|
| 112 |
+
val =
|
| 113 |
+
val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
|
| 114 |
+
iteration_buffer[j] = conversion::to<T>(val);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
if (do_loads) {
|
| 118 |
+
mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \
|
| 124 |
+
fused_ln<T, unRollFactor, threadsPerGroup, maxThreads> \
|
| 125 |
+
<<<grid, block, 0, stream>>>(output, vals, gamma, beta, epsilon, elems_per_row);
|
| 126 |
+
|
| 127 |
+
template <typename T>
|
| 128 |
+
void launch_fused_ln(T* output,
|
| 129 |
+
const T* vals,
|
| 130 |
+
const T* gamma,
|
| 131 |
+
const T* beta,
|
| 132 |
+
float epsilon,
|
| 133 |
+
int rows,
|
| 134 |
+
int elems_per_row,
|
| 135 |
+
cudaStream_t stream)
|
| 136 |
+
{
|
| 137 |
+
// 8 for __half, 4 for float
|
| 138 |
+
constexpr int T_per_load = ln::granularity / sizeof(T);
|
| 139 |
+
|
| 140 |
+
constexpr int maxThreads = 256;
|
| 141 |
+
|
| 142 |
+
// For Flaoat, unRoll 4, for __half, unRoll 2
|
| 143 |
+
constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
|
| 144 |
+
|
| 145 |
+
const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
|
| 146 |
+
const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
|
| 147 |
+
|
| 148 |
+
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
|
| 149 |
+
// warp-sized blocks rather than stepping up to 64/96 threads
|
| 150 |
+
const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
|
| 151 |
+
const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
|
| 152 |
+
|
| 153 |
+
const int groups_per_block_max =
|
| 154 |
+
is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
|
| 155 |
+
const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
|
| 156 |
+
const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
|
| 157 |
+
|
| 158 |
+
dim3 block(threadsPerGroup, groups_per_block);
|
| 159 |
+
dim3 grid(groups_launch);
|
| 160 |
+
|
| 161 |
+
const int elems_per_step = threadsPerGroup * h_per_step;
|
| 162 |
+
const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
|
| 163 |
+
|
| 164 |
+
if (is_subblock_schedule) {
|
| 165 |
+
// <=128
|
| 166 |
+
if (threadsPerGroup == 1) {
|
| 167 |
+
LAUNCH_FUSED_LN(1, 1, maxThreads);
|
| 168 |
+
} else if (threadsPerGroup == 2) {
|
| 169 |
+
LAUNCH_FUSED_LN(1, 2, maxThreads);
|
| 170 |
+
} else if (threadsPerGroup == 4) {
|
| 171 |
+
LAUNCH_FUSED_LN(1, 4, maxThreads);
|
| 172 |
+
} else if (threadsPerGroup == 8) {
|
| 173 |
+
LAUNCH_FUSED_LN(1, 8, maxThreads);
|
| 174 |
+
} else if (threadsPerGroup == 16) {
|
| 175 |
+
LAUNCH_FUSED_LN(1, 16, maxThreads);
|
| 176 |
+
}
|
| 177 |
+
} else if (external_unRoll == 1) {
|
| 178 |
+
// 129 - 4096 elems
|
| 179 |
+
// (this can launch with 1-7 warps as well)
|
| 180 |
+
LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads);
|
| 181 |
+
} else if (external_unRoll == 2) {
|
| 182 |
+
// 4097 - 8192 elems
|
| 183 |
+
LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads);
|
| 184 |
+
} else if (external_unRoll == 3) {
|
| 185 |
+
// 8193 - 12288 elems
|
| 186 |
+
LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads);
|
| 187 |
+
} else if (external_unRoll == 4) {
|
| 188 |
+
// 12289 - 16384 elems
|
| 189 |
+
LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads);
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
#define INSTANTIATE_FUSED_LN(T) \
|
| 194 |
+
template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
|
| 195 |
+
|
| 196 |
+
INSTANTIATE_FUSED_LN(__half);
|
| 197 |
+
#ifdef BF16_AVAILABLE
|
| 198 |
+
INSTANTIATE_FUSED_LN(__nv_bfloat16);
|
| 199 |
+
#endif
|
| 200 |
+
INSTANTIATE_FUSED_LN(float);
|
| 201 |
+
|
| 202 |
+
/*
|
| 203 |
+
Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8
|
| 204 |
+
is equal to 0.
|
| 205 |
+
|
| 206 |
+
TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual
|
| 207 |
+
need to be fused into compute-bound producer operations.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
output: buffer for output data
|
| 211 |
+
res_output: output of residual addition
|
| 212 |
+
vals: buffer for input data
|
| 213 |
+
residual: residual data
|
| 214 |
+
bias: bias of of input data
|
| 215 |
+
gamma: gain for normalization
|
| 216 |
+
beta: bias for normalization
|
| 217 |
+
epsilon: numeric stability
|
| 218 |
+
elems_per_row: number of elements each block will normalize
|
| 219 |
+
Template arg:
|
| 220 |
+
StoreResidual: controls whether the residual calculation is stored
|
| 221 |
+
or not. When set to false, the input `res_output` is unused.
|
| 222 |
+
*/
|
| 223 |
+
template <typename T, int unRoll, int threadsPerGroup, int maxThreads, bool preLnResidual>
|
| 224 |
+
__global__ void fused_residual_ln(T* output,
|
| 225 |
+
T* res_output,
|
| 226 |
+
const T* vals,
|
| 227 |
+
const T* residual,
|
| 228 |
+
const T* bias,
|
| 229 |
+
const T* gamma,
|
| 230 |
+
const T* beta,
|
| 231 |
+
float epsilon,
|
| 232 |
+
int elems_per_row)
|
| 233 |
+
{
|
| 234 |
+
constexpr int T_per_load = ln::granularity / sizeof(T);
|
| 235 |
+
|
| 236 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 237 |
+
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
|
| 238 |
+
|
| 239 |
+
// X-dimension of the block
|
| 240 |
+
const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
|
| 241 |
+
(tb.thread_index().y * elems_per_row);
|
| 242 |
+
const int thread_offset = tb.thread_index().x * T_per_load;
|
| 243 |
+
const int base_offset = block_offset + thread_offset;
|
| 244 |
+
const int stride = tb.size() * T_per_load;
|
| 245 |
+
|
| 246 |
+
float sum = reduce::init<rop::Add, float>();
|
| 247 |
+
|
| 248 |
+
const T* input_base = vals + base_offset;
|
| 249 |
+
const T* residual_base = residual + base_offset;
|
| 250 |
+
const T* bias_base = bias + thread_offset;
|
| 251 |
+
|
| 252 |
+
T local_buffer[unRoll * T_per_load];
|
| 253 |
+
|
| 254 |
+
// Unlike a vanilla layernorm, since we're fusing the two adds as well
|
| 255 |
+
// an inner unRoll seems to be less valuable. If anything, a double unRoll
|
| 256 |
+
// makes the most sense if we find we are having performance issues.
|
| 257 |
+
#pragma unRoll
|
| 258 |
+
for (int i = 0; i < unRoll; i++) {
|
| 259 |
+
T* iteration_buffer = local_buffer + i * T_per_load;
|
| 260 |
+
T residual_buffer[T_per_load];
|
| 261 |
+
T bias_buffer[T_per_load];
|
| 262 |
+
|
| 263 |
+
mem_access::load_global<ln::granularity>(
|
| 264 |
+
iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
|
| 265 |
+
mem_access::load_global<ln::granularity>(residual_buffer,
|
| 266 |
+
residual_base + i * stride,
|
| 267 |
+
thread_offset + i * stride < elems_per_row);
|
| 268 |
+
mem_access::load_global<ln::granularity>(
|
| 269 |
+
bias_buffer, bias_base + i * stride, thread_offset + i * stride < elems_per_row);
|
| 270 |
+
|
| 271 |
+
#pragma unRoll
|
| 272 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 273 |
+
float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
|
| 274 |
+
float res_up_cast = conversion::to<float>(residual_buffer[j]);
|
| 275 |
+
float bias_up_cast = conversion::to<float>(bias_buffer[j]);
|
| 276 |
+
vals_up_cast = vals_up_cast + bias_up_cast + res_up_cast;
|
| 277 |
+
sum = reduce::element<rop::Add>(sum, vals_up_cast);
|
| 278 |
+
iteration_buffer[j] = conversion::to<T>(vals_up_cast);
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
if (preLnResidual && (thread_offset + i * stride < elems_per_row)) {
|
| 282 |
+
mem_access::store_global<ln::granularity>(res_output + base_offset + i * stride,
|
| 283 |
+
iteration_buffer);
|
| 284 |
+
}
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
|
| 288 |
+
const float mean = sum / elems_per_row;
|
| 289 |
+
|
| 290 |
+
float mean_diff = reduce::init<rop::Add, float>();
|
| 291 |
+
#pragma unRoll
|
| 292 |
+
for (int i = 0; i < unRoll; i++) {
|
| 293 |
+
#pragma unRoll
|
| 294 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 295 |
+
// Using a 0 value here skews the variance, have to if-guard
|
| 296 |
+
if (thread_offset + i * stride < elems_per_row) {
|
| 297 |
+
float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
|
| 298 |
+
mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
|
| 304 |
+
const float variance = mean_diff / elems_per_row;
|
| 305 |
+
const float denom = __frsqrt_rn(variance + epsilon);
|
| 306 |
+
|
| 307 |
+
T* block_output = output + block_offset;
|
| 308 |
+
|
| 309 |
+
#pragma unRoll
|
| 310 |
+
for (int i = 0; i < unRoll; i++) {
|
| 311 |
+
T* iteration_buffer = local_buffer + i * T_per_load;
|
| 312 |
+
const int iter_idx = i * stride + thread_offset;
|
| 313 |
+
const bool do_loads = iter_idx < elems_per_row;
|
| 314 |
+
|
| 315 |
+
T gamma_local[T_per_load], beta_local[T_per_load];
|
| 316 |
+
|
| 317 |
+
mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
|
| 318 |
+
mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
|
| 319 |
+
|
| 320 |
+
#pragma unRoll
|
| 321 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 322 |
+
// iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute;
|
| 323 |
+
// iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j];
|
| 324 |
+
float val = conversion::to<float>(iteration_buffer[j]);
|
| 325 |
+
val = (val - mean) * denom;
|
| 326 |
+
val =
|
| 327 |
+
val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
|
| 328 |
+
iteration_buffer[j] = conversion::to<T>(val);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
if (do_loads) {
|
| 332 |
+
mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified.
|
| 338 |
+
#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \
|
| 339 |
+
fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, false> \
|
| 340 |
+
<<<grid, block, 0, stream>>>( \
|
| 341 |
+
output, nullptr, vals, residual, bias, gamma, beta, epsilon, elems_per_row);
|
| 342 |
+
|
| 343 |
+
template <typename T>
|
| 344 |
+
void launch_fused_residual_ln(T* output,
|
| 345 |
+
const T* vals,
|
| 346 |
+
const T* residual,
|
| 347 |
+
const T* bias,
|
| 348 |
+
const T* gamma,
|
| 349 |
+
const T* beta,
|
| 350 |
+
float epsilon,
|
| 351 |
+
int rows,
|
| 352 |
+
int elems_per_row,
|
| 353 |
+
cudaStream_t stream)
|
| 354 |
+
{
|
| 355 |
+
// 8 for __half, 4 for float
|
| 356 |
+
constexpr int T_per_load = ln::granularity / sizeof(T);
|
| 357 |
+
|
| 358 |
+
constexpr int maxThreads = 256;
|
| 359 |
+
|
| 360 |
+
// For Flaoat, unRoll 4, for __half, unRoll 2
|
| 361 |
+
constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
|
| 362 |
+
|
| 363 |
+
const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
|
| 364 |
+
const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
|
| 365 |
+
|
| 366 |
+
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
|
| 367 |
+
// warp-sized blocks rather than stepping up to 64/96 threads
|
| 368 |
+
const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
|
| 369 |
+
const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
|
| 370 |
+
|
| 371 |
+
const int groups_per_block_max =
|
| 372 |
+
is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
|
| 373 |
+
const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
|
| 374 |
+
const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
|
| 375 |
+
|
| 376 |
+
dim3 block(threadsPerGroup, groups_per_block);
|
| 377 |
+
dim3 grid(groups_launch);
|
| 378 |
+
|
| 379 |
+
const int elems_per_step = threadsPerGroup * h_per_step;
|
| 380 |
+
const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
|
| 381 |
+
|
| 382 |
+
if (is_subblock_schedule) {
|
| 383 |
+
// <=128
|
| 384 |
+
if (threadsPerGroup == 1) {
|
| 385 |
+
LAUNCH_FUSED_RES_LN(1, 1, maxThreads);
|
| 386 |
+
} else if (threadsPerGroup == 2) {
|
| 387 |
+
LAUNCH_FUSED_RES_LN(1, 2, maxThreads);
|
| 388 |
+
} else if (threadsPerGroup == 4) {
|
| 389 |
+
LAUNCH_FUSED_RES_LN(1, 4, maxThreads);
|
| 390 |
+
} else if (threadsPerGroup == 8) {
|
| 391 |
+
LAUNCH_FUSED_RES_LN(1, 8, maxThreads);
|
| 392 |
+
} else if (threadsPerGroup == 16) {
|
| 393 |
+
LAUNCH_FUSED_RES_LN(1, 16, maxThreads);
|
| 394 |
+
}
|
| 395 |
+
} else if (external_unRoll == 1) {
|
| 396 |
+
// 129 - 4096 elems
|
| 397 |
+
// (this can launch with 1-7 warps as well)
|
| 398 |
+
LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads);
|
| 399 |
+
} else if (external_unRoll == 2) {
|
| 400 |
+
// 4097 - 8192 elems
|
| 401 |
+
LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads);
|
| 402 |
+
} else if (external_unRoll == 3) {
|
| 403 |
+
// 8193 - 12288 elems
|
| 404 |
+
LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads);
|
| 405 |
+
} else if (external_unRoll == 4) {
|
| 406 |
+
// 12289 - 16384 elems
|
| 407 |
+
LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads);
|
| 408 |
+
}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \
|
| 412 |
+
fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, true> \
|
| 413 |
+
<<<grid, block, 0, stream>>>( \
|
| 414 |
+
norm_output, res_output, vals, residual, bias, gamma, beta, epsilon, elems_per_row);
|
| 415 |
+
|
| 416 |
+
template <typename T>
|
| 417 |
+
void launch_fused_residual_ln_store_pre_ln_res(T* norm_output,
|
| 418 |
+
T* res_output,
|
| 419 |
+
const T* vals,
|
| 420 |
+
const T* residual,
|
| 421 |
+
const T* bias,
|
| 422 |
+
const T* gamma,
|
| 423 |
+
const T* beta,
|
| 424 |
+
float epsilon,
|
| 425 |
+
int rows,
|
| 426 |
+
int elems_per_row,
|
| 427 |
+
cudaStream_t stream)
|
| 428 |
+
{
|
| 429 |
+
// 8 for __half, 4 for float
|
| 430 |
+
constexpr int T_per_load = ln::granularity / sizeof(T);
|
| 431 |
+
|
| 432 |
+
constexpr int maxThreads = 256;
|
| 433 |
+
|
| 434 |
+
// For Flaoat, unRoll 4, for __half, unRoll 2
|
| 435 |
+
constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
|
| 436 |
+
|
| 437 |
+
const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
|
| 438 |
+
const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
|
| 439 |
+
|
| 440 |
+
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
|
| 441 |
+
// warp-sized blocks rather than stepping up to 64/96 threads
|
| 442 |
+
const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
|
| 443 |
+
const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
|
| 444 |
+
|
| 445 |
+
const int groups_per_block_max =
|
| 446 |
+
is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
|
| 447 |
+
const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
|
| 448 |
+
const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
|
| 449 |
+
|
| 450 |
+
dim3 block(threadsPerGroup, groups_per_block);
|
| 451 |
+
dim3 grid(groups_launch);
|
| 452 |
+
|
| 453 |
+
const int elems_per_step = threadsPerGroup * h_per_step;
|
| 454 |
+
const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
|
| 455 |
+
|
| 456 |
+
if (is_subblock_schedule) {
|
| 457 |
+
// <=128
|
| 458 |
+
if (threadsPerGroup == 1) {
|
| 459 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads);
|
| 460 |
+
} else if (threadsPerGroup == 2) {
|
| 461 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads);
|
| 462 |
+
} else if (threadsPerGroup == 4) {
|
| 463 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads);
|
| 464 |
+
} else if (threadsPerGroup == 8) {
|
| 465 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads);
|
| 466 |
+
} else if (threadsPerGroup == 16) {
|
| 467 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads);
|
| 468 |
+
}
|
| 469 |
+
} else if (external_unRoll == 1) {
|
| 470 |
+
// 129 - 4096 elems
|
| 471 |
+
// (this can launch with 1-7 warps as well)
|
| 472 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads);
|
| 473 |
+
} else if (external_unRoll == 2) {
|
| 474 |
+
// 4097 - 8192 elems
|
| 475 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads);
|
| 476 |
+
} else if (external_unRoll == 3) {
|
| 477 |
+
// 8193 - 12288 elems
|
| 478 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads);
|
| 479 |
+
} else if (external_unRoll == 4) {
|
| 480 |
+
// 12289 - 16384 elems
|
| 481 |
+
LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads);
|
| 482 |
+
}
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
#define INSTANTIATE_RES_LN(T) \
|
| 486 |
+
template void launch_fused_residual_ln<T>( \
|
| 487 |
+
T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
|
| 488 |
+
|
| 489 |
+
#define INSTANTIATE_PRE_LN_RES(T) \
|
| 490 |
+
template void launch_fused_residual_ln_store_pre_ln_res<T>( \
|
| 491 |
+
T*, T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
|
| 492 |
+
|
| 493 |
+
INSTANTIATE_RES_LN(__half);
|
| 494 |
+
INSTANTIATE_RES_LN(float);
|
| 495 |
+
#ifdef BF16_AVAILABLE
|
| 496 |
+
INSTANTIATE_RES_LN(__nv_bfloat16);
|
| 497 |
+
#endif
|
| 498 |
+
|
| 499 |
+
INSTANTIATE_PRE_LN_RES(__half);
|
| 500 |
+
INSTANTIATE_PRE_LN_RES(float);
|
| 501 |
+
#ifdef BF16_AVAILABLE
|
| 502 |
+
INSTANTIATE_PRE_LN_RES(__nv_bfloat16);
|
| 503 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <cuda_fp16.h>
|
| 7 |
+
#include "conversion_utils.h"
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
#include "memory_access_utils.h"
|
| 10 |
+
|
| 11 |
+
namespace pwise {
|
| 12 |
+
constexpr int granularity = 16;
|
| 13 |
+
constexpr int unroll = 4;
|
| 14 |
+
constexpr int threads = 256;
|
| 15 |
+
} // namespace pwise
|
| 16 |
+
|
| 17 |
+
template <typename T>
|
| 18 |
+
__global__ void vector_add_kernel(T* out, const T* a, const T* b, float gamma, int num_elems)
|
| 19 |
+
{
|
| 20 |
+
constexpr int T_per_access = pwise::granularity / sizeof(T);
|
| 21 |
+
|
| 22 |
+
const int block_offset = blockIdx.x * pwise::threads * pwise::unroll * T_per_access;
|
| 23 |
+
const int thread_offset = threadIdx.x * T_per_access;
|
| 24 |
+
const int total_offset = block_offset + thread_offset;
|
| 25 |
+
constexpr int stride = pwise::threads * T_per_access;
|
| 26 |
+
|
| 27 |
+
#pragma unroll
|
| 28 |
+
for (int i = 0; i < pwise::unroll; i++) {
|
| 29 |
+
T temp_buf_a[T_per_access], temp_buf_b[T_per_access];
|
| 30 |
+
|
| 31 |
+
const int iter_idx = total_offset + i * stride;
|
| 32 |
+
|
| 33 |
+
mem_access::load_global<pwise::granularity>(temp_buf_a, a + iter_idx, iter_idx < num_elems);
|
| 34 |
+
mem_access::load_global<pwise::granularity>(temp_buf_b, b + iter_idx, iter_idx < num_elems);
|
| 35 |
+
|
| 36 |
+
#pragma unroll
|
| 37 |
+
for (int j = 0; j < T_per_access; j++) {
|
| 38 |
+
float up_cast_a = conversion::to<float>(temp_buf_a[j]);
|
| 39 |
+
float up_cast_b = conversion::to<float>(temp_buf_b[j]);
|
| 40 |
+
temp_buf_a[j] = conversion::to<T>((gamma * up_cast_a) + up_cast_b);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
if (iter_idx < num_elems) {
|
| 44 |
+
mem_access::store_global<pwise::granularity>(out + iter_idx, temp_buf_a);
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template <typename T>
|
| 50 |
+
void launch_vector_add(T* out,
|
| 51 |
+
const T* a,
|
| 52 |
+
const T* b,
|
| 53 |
+
float gamma,
|
| 54 |
+
int num_elems,
|
| 55 |
+
cudaStream_t stream)
|
| 56 |
+
{
|
| 57 |
+
constexpr int T_per_access = pwise::granularity / sizeof(T);
|
| 58 |
+
constexpr int T_per_block = pwise::threads * T_per_access * pwise::unroll;
|
| 59 |
+
|
| 60 |
+
dim3 block(pwise::threads);
|
| 61 |
+
dim3 grid((num_elems + T_per_block - 1) / T_per_block);
|
| 62 |
+
|
| 63 |
+
vector_add_kernel<<<grid, block, 0, stream>>>(out, a, b, gamma, num_elems);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
#define INSTANTIATE_VECTOR_ADD(T) \
|
| 67 |
+
template void launch_vector_add<T>( \
|
| 68 |
+
T * out, const T* a, const T* b, float gamma, int num_elems, cudaStream_t stream);
|
| 69 |
+
|
| 70 |
+
INSTANTIATE_VECTOR_ADD(float)
|
| 71 |
+
INSTANTIATE_VECTOR_ADD(__half)
|
| 72 |
+
#ifdef BF16_AVAILABLE
|
| 73 |
+
INSTANTIATE_VECTOR_ADD(__nv_bfloat16)
|
| 74 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp
ADDED
|
@@ -0,0 +1,2020 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <c10/cuda/CUDAStream.h>
|
| 7 |
+
#include <torch/extension.h>
|
| 8 |
+
#include <stdexcept>
|
| 9 |
+
#include <vector>
|
| 10 |
+
#include "inference_context.h"
|
| 11 |
+
#include "inference_cublas_wrappers.h"
|
| 12 |
+
#include "inference_cuda_layers.h"
|
| 13 |
+
|
| 14 |
+
std::array<int, 3> gemm_algos = std::array<int, 3>({99, 99, 99});
|
| 15 |
+
|
| 16 |
+
// NOTE: This activation function type enum should be always in sync
|
| 17 |
+
// with the python counterpart, otherwise the casting from python binding
|
| 18 |
+
// will be incorrect.
|
| 19 |
+
enum class ActivationFuncType { UNKNOWN = 0, GELU = 1, ReLU = 2, GATED_GELU = 3, GATED_SILU = 4 };
|
| 20 |
+
|
| 21 |
+
enum class NormType { UNKNOWN = 0, LayerNorm = 1, GroupNorm = 2, RMSNorm = 3 };
|
| 22 |
+
|
| 23 |
+
enum class TransformerType : uint8_t { UNKNOWN = 0, GPTType = 1, BERTType = 2 };
|
| 24 |
+
|
| 25 |
+
// NOTE: this is a temporary and dodgy solution to distinguish GPT and BERT style models
|
| 26 |
+
// based on the dimensions of the corresponding attention mask.
|
| 27 |
+
inline auto infer_transformer_type(at::Tensor& attn_mask) -> TransformerType
|
| 28 |
+
{
|
| 29 |
+
auto attn_mask_num_dims = attn_mask.sizes().size();
|
| 30 |
+
|
| 31 |
+
if (attn_mask_num_dims > 2) {
|
| 32 |
+
return TransformerType::GPTType;
|
| 33 |
+
} else if (attn_mask_num_dims == 2) {
|
| 34 |
+
return TransformerType::BERTType;
|
| 35 |
+
} else {
|
| 36 |
+
return TransformerType::UNKNOWN;
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// infer stride of attention mask memory layout based on the model type.
|
| 41 |
+
inline auto get_attn_mask_stride(at::Tensor& attn_mask) -> int
|
| 42 |
+
{
|
| 43 |
+
auto trnsfrmr_type = infer_transformer_type(attn_mask);
|
| 44 |
+
|
| 45 |
+
if (trnsfrmr_type == TransformerType::GPTType) {
|
| 46 |
+
return attn_mask.size(2);
|
| 47 |
+
} else if (trnsfrmr_type == TransformerType::BERTType) {
|
| 48 |
+
// Bert style models have always a mask stride of 1.
|
| 49 |
+
return 1;
|
| 50 |
+
} else if (trnsfrmr_type == TransformerType::UNKNOWN) {
|
| 51 |
+
return 0;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
// this is just to make the compiler happy.
|
| 55 |
+
return 0;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
template <typename T>
|
| 59 |
+
at::Tensor ds_softmax(at::Tensor& attn_scores,
|
| 60 |
+
at::Tensor& attn_mask,
|
| 61 |
+
at::Tensor& alibi,
|
| 62 |
+
bool triangular,
|
| 63 |
+
bool recompute,
|
| 64 |
+
bool local_attention,
|
| 65 |
+
int window_size,
|
| 66 |
+
bool async_op,
|
| 67 |
+
float layer_scale,
|
| 68 |
+
int head_offset,
|
| 69 |
+
int mp_size)
|
| 70 |
+
{
|
| 71 |
+
auto attn_scores_c = attn_scores.contiguous();
|
| 72 |
+
int bsz = attn_scores_c.size(0);
|
| 73 |
+
|
| 74 |
+
int seq_len = attn_scores_c.size(1);
|
| 75 |
+
int len = attn_scores_c.sizes().size();
|
| 76 |
+
if (len > 2) seq_len = attn_scores_c.size(2);
|
| 77 |
+
|
| 78 |
+
int soft_len = attn_scores_c.size(2);
|
| 79 |
+
if (len > 3) soft_len = attn_scores_c.size(3);
|
| 80 |
+
|
| 81 |
+
int heads = 1;
|
| 82 |
+
if (len > 1) heads = attn_scores_c.size(1);
|
| 83 |
+
|
| 84 |
+
auto mask_stride = get_attn_mask_stride(attn_mask);
|
| 85 |
+
|
| 86 |
+
launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(),
|
| 87 |
+
(attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr),
|
| 88 |
+
(alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr),
|
| 89 |
+
layer_scale,
|
| 90 |
+
triangular,
|
| 91 |
+
recompute,
|
| 92 |
+
local_attention,
|
| 93 |
+
window_size,
|
| 94 |
+
bsz,
|
| 95 |
+
heads,
|
| 96 |
+
seq_len,
|
| 97 |
+
soft_len,
|
| 98 |
+
head_offset,
|
| 99 |
+
mask_stride,
|
| 100 |
+
mp_size,
|
| 101 |
+
InferenceContext::Instance().GetCurrentStream(async_op));
|
| 102 |
+
|
| 103 |
+
return attn_scores_c;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
template <typename T>
|
| 107 |
+
void allocate_workspace(unsigned hidden_dim,
|
| 108 |
+
unsigned num_heads,
|
| 109 |
+
unsigned prompt_length,
|
| 110 |
+
unsigned batch_size,
|
| 111 |
+
unsigned num_layers,
|
| 112 |
+
unsigned mp_size = 1,
|
| 113 |
+
bool external_cache = false,
|
| 114 |
+
unsigned rank = 0,
|
| 115 |
+
unsigned max_out_tokens = 1024,
|
| 116 |
+
unsigned min_out_tokens = 1)
|
| 117 |
+
{
|
| 118 |
+
InferenceContext::Instance().GenWorkSpace(num_layers,
|
| 119 |
+
num_heads,
|
| 120 |
+
batch_size,
|
| 121 |
+
prompt_length,
|
| 122 |
+
hidden_dim,
|
| 123 |
+
mp_size,
|
| 124 |
+
external_cache,
|
| 125 |
+
sizeof(T),
|
| 126 |
+
rank,
|
| 127 |
+
max_out_tokens,
|
| 128 |
+
min_out_tokens);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
template <typename T>
|
| 132 |
+
at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W)
|
| 133 |
+
{
|
| 134 |
+
auto options = at::TensorOptions()
|
| 135 |
+
.dtype(Q.options().dtype())
|
| 136 |
+
.layout(at::kStrided)
|
| 137 |
+
.device(at::kCUDA)
|
| 138 |
+
.requires_grad(false);
|
| 139 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 140 |
+
float alpha = 1;
|
| 141 |
+
float gemm_beta = 0.0;
|
| 142 |
+
|
| 143 |
+
/*
|
| 144 |
+
// Reallocate memory if we received a new prompt
|
| 145 |
+
if (!workspace || input.size(1) != 1) {
|
| 146 |
+
allocate_workspace<T>(W.size(1), InferenceContext::Instance().GetMaxTokenLength(),
|
| 147 |
+
Q.size(0), 1, head_size); workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 148 |
+
}
|
| 149 |
+
*/
|
| 150 |
+
|
| 151 |
+
auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options);
|
| 152 |
+
unsigned m = W.size(1);
|
| 153 |
+
unsigned n = Q.size(1) * Q.size(2);
|
| 154 |
+
unsigned k = Q.size(0);
|
| 155 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 156 |
+
CUBLAS_OP_N,
|
| 157 |
+
CUBLAS_OP_T,
|
| 158 |
+
m,
|
| 159 |
+
n,
|
| 160 |
+
k,
|
| 161 |
+
&alpha,
|
| 162 |
+
&gemm_beta,
|
| 163 |
+
(T*)W.data_ptr(),
|
| 164 |
+
(T*)Q.data_ptr(),
|
| 165 |
+
(T*)O.data_ptr(),
|
| 166 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 167 |
+
rocblas_gemm_algo_standard);
|
| 168 |
+
#else
|
| 169 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 170 |
+
#endif
|
| 171 |
+
return O;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
template <typename T>
|
| 175 |
+
void attention_unfused(at::Tensor& prev_key_cont,
|
| 176 |
+
at::Tensor& query_cont,
|
| 177 |
+
at::Tensor& attn_mask,
|
| 178 |
+
at::Tensor& prev_value_cont,
|
| 179 |
+
at::Tensor& output,
|
| 180 |
+
int& bsz,
|
| 181 |
+
int& seq_len,
|
| 182 |
+
int& soft_len,
|
| 183 |
+
int& heads,
|
| 184 |
+
float& norm_factor,
|
| 185 |
+
bool triangular,
|
| 186 |
+
bool recompute,
|
| 187 |
+
bool local_attention,
|
| 188 |
+
int window_size)
|
| 189 |
+
{
|
| 190 |
+
auto options = at::TensorOptions()
|
| 191 |
+
.dtype(query_cont.options().dtype())
|
| 192 |
+
.layout(at::kStrided)
|
| 193 |
+
.device(at::kCUDA)
|
| 194 |
+
.requires_grad(false);
|
| 195 |
+
float alpha = norm_factor;
|
| 196 |
+
float gemm_beta = 0.0;
|
| 197 |
+
auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options);
|
| 198 |
+
int k = prev_value_cont.size(2) / heads;
|
| 199 |
+
|
| 200 |
+
auto mask_stride = get_attn_mask_stride(attn_mask);
|
| 201 |
+
|
| 202 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 203 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 204 |
+
cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(),
|
| 205 |
+
soft_len,
|
| 206 |
+
seq_len,
|
| 207 |
+
k,
|
| 208 |
+
&alpha,
|
| 209 |
+
&gemm_beta,
|
| 210 |
+
(T*)prev_key_cont.data_ptr(),
|
| 211 |
+
(T*)query_cont.data_ptr(),
|
| 212 |
+
(T*)attn_score.data_ptr(),
|
| 213 |
+
CUBLAS_OP_N,
|
| 214 |
+
CUBLAS_OP_N,
|
| 215 |
+
soft_len * k,
|
| 216 |
+
seq_len * k,
|
| 217 |
+
seq_len * soft_len,
|
| 218 |
+
bsz * heads,
|
| 219 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 220 |
+
rocblas_gemm_algo_standard);
|
| 221 |
+
#else
|
| 222 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 223 |
+
#endif
|
| 224 |
+
launch_attn_softmax_v2((T*)attn_score.data_ptr(),
|
| 225 |
+
(T*)(attn_mask.sizes().size() > 1 ? attn_mask.data_ptr() : nullptr),
|
| 226 |
+
(T*)nullptr,
|
| 227 |
+
1.0,
|
| 228 |
+
triangular,
|
| 229 |
+
recompute,
|
| 230 |
+
local_attention,
|
| 231 |
+
window_size,
|
| 232 |
+
bsz,
|
| 233 |
+
heads,
|
| 234 |
+
seq_len,
|
| 235 |
+
soft_len,
|
| 236 |
+
0,
|
| 237 |
+
mask_stride,
|
| 238 |
+
1,
|
| 239 |
+
InferenceContext::Instance().GetCurrentStream(false));
|
| 240 |
+
alpha = 1.0;
|
| 241 |
+
cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(),
|
| 242 |
+
k,
|
| 243 |
+
seq_len,
|
| 244 |
+
soft_len,
|
| 245 |
+
&alpha,
|
| 246 |
+
&gemm_beta,
|
| 247 |
+
(T*)prev_value_cont.data_ptr(),
|
| 248 |
+
(T*)attn_score.data_ptr(),
|
| 249 |
+
(T*)output.data_ptr(),
|
| 250 |
+
CUBLAS_OP_N,
|
| 251 |
+
CUBLAS_OP_N,
|
| 252 |
+
soft_len * k,
|
| 253 |
+
seq_len * soft_len,
|
| 254 |
+
seq_len * k,
|
| 255 |
+
bsz * heads,
|
| 256 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 257 |
+
rocblas_gemm_algo_standard);
|
| 258 |
+
#else
|
| 259 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 260 |
+
#endif
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
template <typename T>
|
| 264 |
+
std::vector<at::Tensor> ds_softmax_context1(at::Tensor& query,
|
| 265 |
+
at::Tensor& prev_key,
|
| 266 |
+
at::Tensor& new_key,
|
| 267 |
+
at::Tensor& attn_mask,
|
| 268 |
+
at::Tensor& prev_value,
|
| 269 |
+
at::Tensor& new_value,
|
| 270 |
+
int heads,
|
| 271 |
+
float norm_factor,
|
| 272 |
+
bool merging,
|
| 273 |
+
bool triangular,
|
| 274 |
+
bool local_attention,
|
| 275 |
+
int window_size,
|
| 276 |
+
bool no_masking)
|
| 277 |
+
{
|
| 278 |
+
auto query_cont = query.contiguous();
|
| 279 |
+
auto prev_key_cont = prev_key.contiguous();
|
| 280 |
+
auto prev_value_cont = prev_value.contiguous();
|
| 281 |
+
|
| 282 |
+
int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0);
|
| 283 |
+
|
| 284 |
+
// Attn_Score [ batch Head Sequence-length Softmax-length]
|
| 285 |
+
|
| 286 |
+
int bsz = query_cont.size(0);
|
| 287 |
+
int seq_len = query_cont.size(1);
|
| 288 |
+
int soft_len = prev_value.size(1);
|
| 289 |
+
|
| 290 |
+
auto options = at::TensorOptions()
|
| 291 |
+
.dtype(query_cont.options().dtype())
|
| 292 |
+
.layout(at::kStrided)
|
| 293 |
+
.device(at::kCUDA)
|
| 294 |
+
.requires_grad(false);
|
| 295 |
+
|
| 296 |
+
auto output =
|
| 297 |
+
at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options);
|
| 298 |
+
attention_unfused<T>(prev_key_cont,
|
| 299 |
+
query_cont,
|
| 300 |
+
attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()),
|
| 301 |
+
prev_value_cont,
|
| 302 |
+
output,
|
| 303 |
+
bsz,
|
| 304 |
+
seq_len,
|
| 305 |
+
soft_len,
|
| 306 |
+
heads,
|
| 307 |
+
norm_factor,
|
| 308 |
+
(triangular && (new_size == 0)),
|
| 309 |
+
(new_size == 0),
|
| 310 |
+
local_attention,
|
| 311 |
+
window_size);
|
| 312 |
+
|
| 313 |
+
return {output, prev_key, prev_value};
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
template <typename T>
|
| 317 |
+
void ds_softmax_internal(T* attn_scores,
|
| 318 |
+
at::Tensor& attn_mask,
|
| 319 |
+
at::Tensor& alibi,
|
| 320 |
+
float& layer_scale,
|
| 321 |
+
bool triangular,
|
| 322 |
+
bool recompute,
|
| 323 |
+
bool local_attention,
|
| 324 |
+
int window_size,
|
| 325 |
+
int bsz,
|
| 326 |
+
int seq_len,
|
| 327 |
+
int soft_len,
|
| 328 |
+
int heads)
|
| 329 |
+
{
|
| 330 |
+
auto mask_stride = get_attn_mask_stride(attn_mask);
|
| 331 |
+
|
| 332 |
+
launch_attn_softmax_v2((T*)attn_scores,
|
| 333 |
+
(attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr),
|
| 334 |
+
(alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr),
|
| 335 |
+
layer_scale,
|
| 336 |
+
triangular,
|
| 337 |
+
recompute,
|
| 338 |
+
local_attention,
|
| 339 |
+
window_size,
|
| 340 |
+
bsz,
|
| 341 |
+
heads,
|
| 342 |
+
seq_len,
|
| 343 |
+
soft_len,
|
| 344 |
+
0,
|
| 345 |
+
mask_stride,
|
| 346 |
+
1,
|
| 347 |
+
at::cuda::getCurrentCUDAStream());
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
template <typename T>
|
| 351 |
+
void attention_unfused(T* prev_key_cont,
|
| 352 |
+
T* query_cont,
|
| 353 |
+
at::Tensor& attn_mask,
|
| 354 |
+
T* prev_value_cont,
|
| 355 |
+
T* output,
|
| 356 |
+
unsigned& bsz,
|
| 357 |
+
int& k,
|
| 358 |
+
unsigned& seq_len,
|
| 359 |
+
unsigned& soft_len,
|
| 360 |
+
int& heads,
|
| 361 |
+
float& norm_factor,
|
| 362 |
+
bool triangular,
|
| 363 |
+
bool recompute,
|
| 364 |
+
bool local_attention,
|
| 365 |
+
int window_size,
|
| 366 |
+
at::Tensor& alibi,
|
| 367 |
+
int layer_id)
|
| 368 |
+
{
|
| 369 |
+
float layer_scale = alibi.sizes().size() > 1 ? std::max(1, layer_id) : 1.0;
|
| 370 |
+
float alpha = norm_factor * norm_factor / layer_scale;
|
| 371 |
+
float gemm_beta = 0.0;
|
| 372 |
+
T* workspace = (T*)InferenceContext::Instance().GetAttentionUnfusedWorkspace();
|
| 373 |
+
|
| 374 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 375 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 376 |
+
cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(),
|
| 377 |
+
soft_len,
|
| 378 |
+
seq_len,
|
| 379 |
+
k,
|
| 380 |
+
&alpha,
|
| 381 |
+
&gemm_beta,
|
| 382 |
+
(T*)prev_key_cont,
|
| 383 |
+
(T*)query_cont,
|
| 384 |
+
workspace,
|
| 385 |
+
CUBLAS_OP_T,
|
| 386 |
+
CUBLAS_OP_N,
|
| 387 |
+
InferenceContext::Instance().GetMaxTokenLength() * k,
|
| 388 |
+
seq_len * k,
|
| 389 |
+
seq_len * soft_len,
|
| 390 |
+
bsz * heads,
|
| 391 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 392 |
+
rocblas_gemm_algo_standard);
|
| 393 |
+
#else
|
| 394 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 395 |
+
#endif
|
| 396 |
+
ds_softmax_internal<T>(workspace,
|
| 397 |
+
attn_mask,
|
| 398 |
+
alibi,
|
| 399 |
+
layer_scale,
|
| 400 |
+
triangular,
|
| 401 |
+
recompute,
|
| 402 |
+
local_attention,
|
| 403 |
+
window_size,
|
| 404 |
+
bsz,
|
| 405 |
+
seq_len,
|
| 406 |
+
soft_len,
|
| 407 |
+
heads);
|
| 408 |
+
alpha = 1.0;
|
| 409 |
+
cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(),
|
| 410 |
+
k,
|
| 411 |
+
seq_len,
|
| 412 |
+
soft_len,
|
| 413 |
+
&alpha,
|
| 414 |
+
&gemm_beta,
|
| 415 |
+
(T*)prev_value_cont,
|
| 416 |
+
workspace,
|
| 417 |
+
(T*)output,
|
| 418 |
+
CUBLAS_OP_N,
|
| 419 |
+
CUBLAS_OP_N,
|
| 420 |
+
InferenceContext::Instance().GetMaxTokenLength() * k,
|
| 421 |
+
seq_len * soft_len,
|
| 422 |
+
seq_len * k,
|
| 423 |
+
bsz * heads,
|
| 424 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 425 |
+
rocblas_gemm_algo_standard);
|
| 426 |
+
#else
|
| 427 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 428 |
+
#endif
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
void reset_cache() { InferenceContext::Instance().reset_tokens(); }
|
| 432 |
+
|
| 433 |
+
template <typename T>
|
| 434 |
+
std::vector<at::Tensor> ds_softmax_context(at::Tensor& query_key_value,
|
| 435 |
+
at::Tensor& attn_mask,
|
| 436 |
+
int rotary_dim,
|
| 437 |
+
bool rotate_half,
|
| 438 |
+
bool rotate_every_two,
|
| 439 |
+
int heads,
|
| 440 |
+
int num_kv,
|
| 441 |
+
float norm_factor,
|
| 442 |
+
bool triangular,
|
| 443 |
+
bool local_attention,
|
| 444 |
+
int window_size,
|
| 445 |
+
bool no_masking,
|
| 446 |
+
unsigned layer_id,
|
| 447 |
+
unsigned num_layers,
|
| 448 |
+
at::Tensor& alibi,
|
| 449 |
+
float rope_theta)
|
| 450 |
+
{
|
| 451 |
+
unsigned bsz = query_key_value.size(0);
|
| 452 |
+
unsigned seq_len = query_key_value.size(1);
|
| 453 |
+
int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads));
|
| 454 |
+
unsigned hidden_dim = heads * k;
|
| 455 |
+
|
| 456 |
+
bool is_prompt = (seq_len > 1);
|
| 457 |
+
|
| 458 |
+
if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len);
|
| 459 |
+
unsigned soft_len = InferenceContext::Instance().current_tokens();
|
| 460 |
+
|
| 461 |
+
auto options = at::TensorOptions()
|
| 462 |
+
.dtype(query_key_value.options().dtype())
|
| 463 |
+
.layout(at::kStrided)
|
| 464 |
+
.device(at::kCUDA)
|
| 465 |
+
.requires_grad(false);
|
| 466 |
+
|
| 467 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 468 |
+
size_t buf_size = bsz * seq_len * hidden_dim;
|
| 469 |
+
auto output = torch::from_blob(workspace + 4 * buf_size, {bsz, seq_len, hidden_dim}, options);
|
| 470 |
+
|
| 471 |
+
auto query_cont = workspace + 5 * buf_size;
|
| 472 |
+
size_t offset =
|
| 473 |
+
10 * (hidden_dim * bsz * InferenceContext::Instance().GetMaxTokenLength()) +
|
| 474 |
+
layer_id * 2 * bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim;
|
| 475 |
+
unsigned all_tokens = soft_len;
|
| 476 |
+
auto kv_cache = workspace + offset + (hidden_dim / heads) * (is_prompt ? 0 : soft_len - 1);
|
| 477 |
+
size_t value_offset = bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim;
|
| 478 |
+
|
| 479 |
+
T* temp_buf = (T*)output.data_ptr() + at::numel(output);
|
| 480 |
+
launch_bias_add_transform_0213<T>((T*)query_cont,
|
| 481 |
+
kv_cache,
|
| 482 |
+
kv_cache + value_offset,
|
| 483 |
+
(T*)query_key_value.data_ptr(),
|
| 484 |
+
nullptr,
|
| 485 |
+
bsz,
|
| 486 |
+
seq_len,
|
| 487 |
+
(is_prompt ? 0 : soft_len - 1),
|
| 488 |
+
soft_len,
|
| 489 |
+
hidden_dim,
|
| 490 |
+
heads,
|
| 491 |
+
(num_kv > 0 ? num_kv : heads),
|
| 492 |
+
rotary_dim,
|
| 493 |
+
rotate_half,
|
| 494 |
+
rotate_every_two,
|
| 495 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 496 |
+
3,
|
| 497 |
+
InferenceContext::Instance().GetMaxTokenLength(),
|
| 498 |
+
rope_theta);
|
| 499 |
+
if (rotary_dim > 0 && rotate_half)
|
| 500 |
+
launch_apply_rotary_pos_emb(query_cont,
|
| 501 |
+
kv_cache,
|
| 502 |
+
k,
|
| 503 |
+
seq_len,
|
| 504 |
+
rotary_dim,
|
| 505 |
+
(is_prompt ? 0 : soft_len - 1),
|
| 506 |
+
heads,
|
| 507 |
+
bsz,
|
| 508 |
+
rope_theta,
|
| 509 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 510 |
+
InferenceContext::Instance().GetMaxTokenLength());
|
| 511 |
+
|
| 512 |
+
attention_unfused<T>(workspace + offset,
|
| 513 |
+
(T*)query_cont,
|
| 514 |
+
attn_mask,
|
| 515 |
+
workspace + offset + value_offset,
|
| 516 |
+
temp_buf,
|
| 517 |
+
bsz,
|
| 518 |
+
k,
|
| 519 |
+
seq_len,
|
| 520 |
+
all_tokens,
|
| 521 |
+
heads,
|
| 522 |
+
norm_factor,
|
| 523 |
+
(triangular && is_prompt),
|
| 524 |
+
is_prompt,
|
| 525 |
+
local_attention,
|
| 526 |
+
window_size,
|
| 527 |
+
alibi,
|
| 528 |
+
layer_id);
|
| 529 |
+
launch_transform4d_0213<T>((T*)output.data_ptr(),
|
| 530 |
+
temp_buf,
|
| 531 |
+
bsz,
|
| 532 |
+
heads,
|
| 533 |
+
seq_len,
|
| 534 |
+
output.size(2),
|
| 535 |
+
InferenceContext::Instance().GetCurrentStream(false),
|
| 536 |
+
1);
|
| 537 |
+
|
| 538 |
+
if (layer_id == num_layers - 1) InferenceContext::Instance().advance_tokens();
|
| 539 |
+
auto prev_key = torch::from_blob(workspace + offset,
|
| 540 |
+
{bsz, heads, all_tokens, k},
|
| 541 |
+
{hidden_dim * InferenceContext::Instance().GetMaxTokenLength(),
|
| 542 |
+
k * InferenceContext::Instance().GetMaxTokenLength(),
|
| 543 |
+
k,
|
| 544 |
+
1},
|
| 545 |
+
options);
|
| 546 |
+
|
| 547 |
+
auto prev_value =
|
| 548 |
+
torch::from_blob(workspace + offset + value_offset,
|
| 549 |
+
{bsz, heads, all_tokens, k},
|
| 550 |
+
{hidden_dim * InferenceContext::Instance().GetMaxTokenLength(),
|
| 551 |
+
k * InferenceContext::Instance().GetMaxTokenLength(),
|
| 552 |
+
k,
|
| 553 |
+
1},
|
| 554 |
+
options);
|
| 555 |
+
|
| 556 |
+
return {output, prev_key, prev_value};
|
| 557 |
+
}
|
| 558 |
+
|
| 559 |
+
template <typename T>
|
| 560 |
+
at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias)
|
| 561 |
+
{
|
| 562 |
+
auto input_cont = input.contiguous();
|
| 563 |
+
|
| 564 |
+
int bsz = input_cont.size(0) * input_cont.size(1);
|
| 565 |
+
int intermediate_size = input_cont.size(2);
|
| 566 |
+
|
| 567 |
+
launch_bias_gelu((T*)input_cont.data_ptr(),
|
| 568 |
+
(T*)bias.data_ptr(),
|
| 569 |
+
intermediate_size,
|
| 570 |
+
bsz,
|
| 571 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 572 |
+
return input_cont;
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
#define DISPATCH_GATED_ACT(T_TYPE, C_TYPE) \
|
| 576 |
+
if (activation.options().dtype() == torch::T_TYPE) { \
|
| 577 |
+
launch_gated_activation((C_TYPE*)output.data_ptr(), \
|
| 578 |
+
(const C_TYPE*)activation.data_ptr(), \
|
| 579 |
+
(const C_TYPE*)bias.data_ptr(), \
|
| 580 |
+
rows, \
|
| 581 |
+
out_channels, \
|
| 582 |
+
channels, \
|
| 583 |
+
activation_type == ActivationFuncType::GATED_GELU, \
|
| 584 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
at::Tensor ds_gated_activation(at::Tensor& activation, at::Tensor& bias, int actFun)
|
| 588 |
+
{
|
| 589 |
+
/*
|
| 590 |
+
Used in FF of Stable diffusion
|
| 591 |
+
*/
|
| 592 |
+
|
| 593 |
+
const ActivationFuncType activation_type = static_cast<ActivationFuncType>(actFun);
|
| 594 |
+
|
| 595 |
+
assert(activation_type == ActivationFuncType::GATED_GELU ||
|
| 596 |
+
activation_type == ActivationFuncType::GATED_SILU);
|
| 597 |
+
|
| 598 |
+
const int batch_size = activation.size(0);
|
| 599 |
+
const int seq_len = activation.size(1);
|
| 600 |
+
const int channels = activation.size(2);
|
| 601 |
+
|
| 602 |
+
const int rows = batch_size * seq_len;
|
| 603 |
+
// Dimensionality is cut in half
|
| 604 |
+
const int out_channels = channels / 2;
|
| 605 |
+
|
| 606 |
+
auto output = at::empty({batch_size, seq_len, out_channels}, activation.options());
|
| 607 |
+
|
| 608 |
+
DISPATCH_GATED_ACT(kFloat, float);
|
| 609 |
+
DISPATCH_GATED_ACT(kHalf, __half);
|
| 610 |
+
#ifdef BF16_AVAILABLE
|
| 611 |
+
DISPATCH_GATED_ACT(kBFloat16, __nv_bfloat16);
|
| 612 |
+
#endif
|
| 613 |
+
|
| 614 |
+
return output;
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
template <typename T>
|
| 618 |
+
at::Tensor ds_bias_relu(at::Tensor& input, at::Tensor& bias)
|
| 619 |
+
{
|
| 620 |
+
auto input_cont = input.contiguous();
|
| 621 |
+
|
| 622 |
+
int bsz = input_cont.size(0) * input_cont.size(1);
|
| 623 |
+
int intermediate_size = input_cont.size(2);
|
| 624 |
+
|
| 625 |
+
launch_bias_relu((T*)input_cont.data_ptr(),
|
| 626 |
+
(T*)bias.data_ptr(),
|
| 627 |
+
intermediate_size,
|
| 628 |
+
bsz,
|
| 629 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 630 |
+
return input_cont;
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
template <typename T>
|
| 634 |
+
at::Tensor ds_bias_add(at::Tensor& input, at::Tensor& bias)
|
| 635 |
+
{
|
| 636 |
+
auto input_cont = input.contiguous();
|
| 637 |
+
|
| 638 |
+
int bsz = input_cont.size(0) * input_cont.size(1);
|
| 639 |
+
int hidden_size = input_cont.size(2);
|
| 640 |
+
|
| 641 |
+
launch_bias_add((T*)input_cont.data_ptr(),
|
| 642 |
+
(T*)bias.data_ptr(),
|
| 643 |
+
hidden_size,
|
| 644 |
+
bsz,
|
| 645 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 646 |
+
return input_cont;
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
template <typename T>
|
| 650 |
+
at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias)
|
| 651 |
+
{
|
| 652 |
+
auto input_cont = input.contiguous();
|
| 653 |
+
auto residual_cont = residual.contiguous();
|
| 654 |
+
|
| 655 |
+
int bsz = input_cont.size(0) * input_cont.size(1);
|
| 656 |
+
// launch_bias_residual((T*)input_cont.data_ptr(),
|
| 657 |
+
// (T*)residual_cont.data_ptr(),
|
| 658 |
+
// (T*)bias.data_ptr(),
|
| 659 |
+
// bsz,
|
| 660 |
+
// input_cont.size(2),
|
| 661 |
+
// (bias.size(0) > 1),
|
| 662 |
+
// InferenceContext::Instance().GetCurrentStream());
|
| 663 |
+
return input_cont;
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
#define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \
|
| 667 |
+
if (input.options().dtype() == torch::T_TYPE) { \
|
| 668 |
+
launch_fused_ln((C_TYPE*)output.data_ptr(), \
|
| 669 |
+
(const C_TYPE*)input.data_ptr(), \
|
| 670 |
+
(const C_TYPE*)gamma.data_ptr(), \
|
| 671 |
+
(const C_TYPE*)beta.data_ptr(), \
|
| 672 |
+
epsilon, \
|
| 673 |
+
rows, \
|
| 674 |
+
elems_per_row, \
|
| 675 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
at::Tensor ds_layer_norm(at::Tensor& input, at::Tensor& gamma, at::Tensor& beta, float epsilon)
|
| 679 |
+
{
|
| 680 |
+
const int rows = input.size(0) * input.size(1);
|
| 681 |
+
const int elems_per_row = input.size(2);
|
| 682 |
+
auto output = at::empty_like(input);
|
| 683 |
+
|
| 684 |
+
DISPATCH_LAYER_NORM(kFloat, float);
|
| 685 |
+
DISPATCH_LAYER_NORM(kHalf, __half);
|
| 686 |
+
#ifdef BF16_AVAILABLE
|
| 687 |
+
DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16);
|
| 688 |
+
#endif
|
| 689 |
+
|
| 690 |
+
return output;
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
#define DISPATCH_RMS_NORM(T_TYPE, C_TYPE) \
|
| 694 |
+
if (input.options().dtype() == torch::T_TYPE) { \
|
| 695 |
+
launch_rms_norm((C_TYPE*)output.data_ptr(), \
|
| 696 |
+
(C_TYPE*)nullptr, \
|
| 697 |
+
(const C_TYPE*)input.data_ptr(), \
|
| 698 |
+
(const C_TYPE*)nullptr, \
|
| 699 |
+
(const C_TYPE*)gamma.data_ptr(), \
|
| 700 |
+
epsilon, \
|
| 701 |
+
rows, \
|
| 702 |
+
elems_per_row, \
|
| 703 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
at::Tensor ds_rms_norm(at::Tensor& input, at::Tensor& gamma, float epsilon)
|
| 707 |
+
{
|
| 708 |
+
// Get number of dims of tensor
|
| 709 |
+
int num_dims = input.dim();
|
| 710 |
+
const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1);
|
| 711 |
+
const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2);
|
| 712 |
+
|
| 713 |
+
auto output = at::empty_like(input);
|
| 714 |
+
|
| 715 |
+
DISPATCH_RMS_NORM(kFloat, float);
|
| 716 |
+
DISPATCH_RMS_NORM(kHalf, __half);
|
| 717 |
+
#ifdef BF16_AVAILABLE
|
| 718 |
+
DISPATCH_RMS_NORM(kBFloat16, __nv_bfloat16);
|
| 719 |
+
#endif
|
| 720 |
+
|
| 721 |
+
return output;
|
| 722 |
+
}
|
| 723 |
+
|
| 724 |
+
#define DISPATCH_PRE_RMS_NORM(T_TYPE, C_TYPE) \
|
| 725 |
+
if (input.options().dtype() == torch::T_TYPE) { \
|
| 726 |
+
launch_rms_norm((C_TYPE*)output.data_ptr(), \
|
| 727 |
+
(C_TYPE*)res_out.data_ptr(), \
|
| 728 |
+
(const C_TYPE*)input.data_ptr(), \
|
| 729 |
+
(const C_TYPE*)residual.data_ptr(), \
|
| 730 |
+
(const C_TYPE*)gamma.data_ptr(), \
|
| 731 |
+
epsilon, \
|
| 732 |
+
rows, \
|
| 733 |
+
elems_per_row, \
|
| 734 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
std::vector<at::Tensor> ds_pre_rms_norm(at::Tensor& input,
|
| 738 |
+
at::Tensor& residual,
|
| 739 |
+
at::Tensor& gamma,
|
| 740 |
+
float epsilon)
|
| 741 |
+
{
|
| 742 |
+
// Get number of dims of tensor
|
| 743 |
+
int num_dims = input.dim();
|
| 744 |
+
const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1);
|
| 745 |
+
const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2);
|
| 746 |
+
|
| 747 |
+
auto output = at::empty_like(input);
|
| 748 |
+
auto res_out = at::empty_like(residual);
|
| 749 |
+
|
| 750 |
+
DISPATCH_PRE_RMS_NORM(kFloat, float);
|
| 751 |
+
DISPATCH_PRE_RMS_NORM(kHalf, __half);
|
| 752 |
+
#ifdef BF16_AVAILABLE
|
| 753 |
+
DISPATCH_PRE_RMS_NORM(kBFloat16, __nv_bfloat16);
|
| 754 |
+
#endif
|
| 755 |
+
|
| 756 |
+
return {output, res_out};
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
template <typename T>
|
| 760 |
+
void ds_layer_norm_internal(T* workspace,
|
| 761 |
+
at::Tensor& input,
|
| 762 |
+
at::Tensor& gamma,
|
| 763 |
+
at::Tensor& beta,
|
| 764 |
+
float epsilon)
|
| 765 |
+
{
|
| 766 |
+
int bsz = input.size(0) * input.size(1);
|
| 767 |
+
launch_fused_ln(workspace,
|
| 768 |
+
(const T*)input.data_ptr(),
|
| 769 |
+
(const T*)gamma.data_ptr(),
|
| 770 |
+
(const T*)beta.data_ptr(),
|
| 771 |
+
epsilon,
|
| 772 |
+
bsz,
|
| 773 |
+
input.size(2),
|
| 774 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 775 |
+
}
|
| 776 |
+
|
| 777 |
+
#define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
|
| 778 |
+
if (input.options().dtype() == torch::T_TYPE) { \
|
| 779 |
+
launch_fused_residual_ln((C_TYPE*)output.data_ptr(), \
|
| 780 |
+
(const C_TYPE*)input.data_ptr(), \
|
| 781 |
+
(const C_TYPE*)residual.data_ptr(), \
|
| 782 |
+
(const C_TYPE*)bias.data_ptr(), \
|
| 783 |
+
(const C_TYPE*)gamma.data_ptr(), \
|
| 784 |
+
(const C_TYPE*)beta.data_ptr(), \
|
| 785 |
+
epsilon, \
|
| 786 |
+
rows, \
|
| 787 |
+
elems_per_row, \
|
| 788 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 789 |
+
}
|
| 790 |
+
|
| 791 |
+
/* Currently only used in unit testing */
|
| 792 |
+
at::Tensor ds_layer_norm_residual(at::Tensor& input,
|
| 793 |
+
at::Tensor& bias,
|
| 794 |
+
at::Tensor& residual,
|
| 795 |
+
at::Tensor& gamma,
|
| 796 |
+
at::Tensor& beta,
|
| 797 |
+
float epsilon)
|
| 798 |
+
{
|
| 799 |
+
const int rows = input.size(0) * input.size(1);
|
| 800 |
+
const int elems_per_row = input.size(2);
|
| 801 |
+
auto output = at::empty_like(input);
|
| 802 |
+
|
| 803 |
+
DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float);
|
| 804 |
+
DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half);
|
| 805 |
+
#ifdef BF16_AVAILABLE
|
| 806 |
+
DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
|
| 807 |
+
#endif
|
| 808 |
+
|
| 809 |
+
return output;
|
| 810 |
+
}
|
| 811 |
+
|
| 812 |
+
#define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
|
| 813 |
+
if (input.options().dtype() == torch::T_TYPE) { \
|
| 814 |
+
launch_fused_residual_ln_store_pre_ln_res( \
|
| 815 |
+
(C_TYPE*)norm_output.data_ptr(), \
|
| 816 |
+
(C_TYPE*)res_output.data_ptr(), \
|
| 817 |
+
(const C_TYPE*)input.data_ptr(), \
|
| 818 |
+
(const C_TYPE*)residual.data_ptr(), \
|
| 819 |
+
(const C_TYPE*)bias.data_ptr(), \
|
| 820 |
+
(const C_TYPE*)gamma.data_ptr(), \
|
| 821 |
+
(const C_TYPE*)beta.data_ptr(), \
|
| 822 |
+
epsilon, \
|
| 823 |
+
rows, \
|
| 824 |
+
elems_per_row, \
|
| 825 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
/* Currently only used in unit testing */
|
| 829 |
+
std::vector<at::Tensor> ds_layer_norm_residual_store_pre_ln_res(at::Tensor& input,
|
| 830 |
+
at::Tensor& bias,
|
| 831 |
+
at::Tensor& residual,
|
| 832 |
+
at::Tensor& gamma,
|
| 833 |
+
at::Tensor& beta,
|
| 834 |
+
float epsilon)
|
| 835 |
+
{
|
| 836 |
+
const int rows = input.size(0) * input.size(1);
|
| 837 |
+
const int elems_per_row = input.size(2);
|
| 838 |
+
auto norm_output = at::empty_like(input);
|
| 839 |
+
auto res_output = at::empty_like(input);
|
| 840 |
+
|
| 841 |
+
DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float);
|
| 842 |
+
DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half);
|
| 843 |
+
#ifdef BF16_AVAILABLE
|
| 844 |
+
DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
|
| 845 |
+
#endif
|
| 846 |
+
|
| 847 |
+
return {norm_output, res_output};
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
template <typename T>
|
| 851 |
+
void quantized_gemm(void* output,
|
| 852 |
+
T* input,
|
| 853 |
+
at::Tensor& weight,
|
| 854 |
+
at::Tensor& qscale,
|
| 855 |
+
int groups,
|
| 856 |
+
int bsz,
|
| 857 |
+
int hidden_size)
|
| 858 |
+
{
|
| 859 |
+
// T* weight16 = (T*)InferenceContext::Instance().GetWorkSpace() + 12 * hidden_size * bsz;
|
| 860 |
+
|
| 861 |
+
auto options = at::TensorOptions()
|
| 862 |
+
.dtype(at::kHalf)
|
| 863 |
+
.layout(at::kStrided)
|
| 864 |
+
.device(at::kCUDA)
|
| 865 |
+
.requires_grad(false);
|
| 866 |
+
auto tmp = torch::empty(weight.sizes(), options);
|
| 867 |
+
T* weight16 = (T*)tmp.data_ptr();
|
| 868 |
+
launch_dequantize(weight16,
|
| 869 |
+
(int8_t*)weight.data_ptr(),
|
| 870 |
+
(float*)qscale.data_ptr(),
|
| 871 |
+
weight.size(0),
|
| 872 |
+
weight.size(1),
|
| 873 |
+
groups,
|
| 874 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 875 |
+
|
| 876 |
+
float alpha = (T)1.0;
|
| 877 |
+
float gemm_beta = (T)0.0;
|
| 878 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 879 |
+
CUBLAS_OP_T,
|
| 880 |
+
CUBLAS_OP_N,
|
| 881 |
+
weight.size(0),
|
| 882 |
+
bsz,
|
| 883 |
+
weight.size(1),
|
| 884 |
+
&alpha,
|
| 885 |
+
&gemm_beta,
|
| 886 |
+
weight16,
|
| 887 |
+
(T*)input,
|
| 888 |
+
(T*)output,
|
| 889 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 890 |
+
rocblas_gemm_algo_standard);
|
| 891 |
+
#else
|
| 892 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 893 |
+
#endif
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
template <typename T>
|
| 897 |
+
at::Tensor qkv_unfused_cublas(at::Tensor& output,
|
| 898 |
+
at::Tensor& input,
|
| 899 |
+
at::Tensor& weight,
|
| 900 |
+
at::Tensor& q_scale,
|
| 901 |
+
at::Tensor& bias,
|
| 902 |
+
at::Tensor& gamma,
|
| 903 |
+
at::Tensor& beta,
|
| 904 |
+
const float epsilon,
|
| 905 |
+
bool add_bias,
|
| 906 |
+
bool q_int8,
|
| 907 |
+
bool transposed_mode)
|
| 908 |
+
{
|
| 909 |
+
int bsz = input.size(0) * input.size(1);
|
| 910 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 911 |
+
workspace += (3 * bsz * input.size(2));
|
| 912 |
+
ds_layer_norm_internal<T>(workspace, input, gamma, beta, epsilon);
|
| 913 |
+
|
| 914 |
+
if (q_int8) {
|
| 915 |
+
quantized_gemm<T>(
|
| 916 |
+
output.data_ptr(), workspace, weight, q_scale, q_scale.size(0), bsz, input.size(2));
|
| 917 |
+
} else {
|
| 918 |
+
float alpha = (T)1.0;
|
| 919 |
+
float gemm_beta = (T)0.0;
|
| 920 |
+
|
| 921 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 922 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 923 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 924 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 925 |
+
CUBLAS_OP_N,
|
| 926 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 927 |
+
bsz,
|
| 928 |
+
input.size(2),
|
| 929 |
+
&alpha,
|
| 930 |
+
&gemm_beta,
|
| 931 |
+
(T*)weight.data_ptr(),
|
| 932 |
+
workspace,
|
| 933 |
+
(T*)output.data_ptr(),
|
| 934 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 935 |
+
rocblas_gemm_algo_standard);
|
| 936 |
+
#else
|
| 937 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 938 |
+
#endif
|
| 939 |
+
}
|
| 940 |
+
if (add_bias)
|
| 941 |
+
launch_bias_add((T*)output.data_ptr(),
|
| 942 |
+
(T*)bias.data_ptr(),
|
| 943 |
+
(transposed_mode || q_int8) ? weight.size(0) : weight.size(1),
|
| 944 |
+
bsz,
|
| 945 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 946 |
+
return torch::from_blob(workspace, input.sizes(), input.options());
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
template <typename T>
|
| 950 |
+
std::vector<at::Tensor> ds_rms_qkv(at::Tensor& input,
|
| 951 |
+
at::Tensor& weight,
|
| 952 |
+
at::Tensor& q_scale,
|
| 953 |
+
at::Tensor& gamma,
|
| 954 |
+
const float epsilon,
|
| 955 |
+
bool q_int8,
|
| 956 |
+
bool transposed_mode)
|
| 957 |
+
{
|
| 958 |
+
const int bsz = input.size(0) * input.size(1);
|
| 959 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 960 |
+
T* rms_norm_ptr = workspace + (3 * bsz * input.size(2));
|
| 961 |
+
int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1);
|
| 962 |
+
|
| 963 |
+
auto options = at::TensorOptions()
|
| 964 |
+
.dtype(input.options().dtype())
|
| 965 |
+
.layout(at::kStrided)
|
| 966 |
+
.device(at::kCUDA)
|
| 967 |
+
.requires_grad(false);
|
| 968 |
+
auto rms_norm = at::from_blob(rms_norm_ptr, input.sizes(), options);
|
| 969 |
+
auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options);
|
| 970 |
+
|
| 971 |
+
launch_rms_norm((T*)rms_norm.data_ptr(),
|
| 972 |
+
(T*)nullptr,
|
| 973 |
+
(const T*)input.data_ptr(),
|
| 974 |
+
(const T*)nullptr,
|
| 975 |
+
(const T*)gamma.data_ptr(),
|
| 976 |
+
epsilon,
|
| 977 |
+
bsz,
|
| 978 |
+
input.size(2),
|
| 979 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 980 |
+
|
| 981 |
+
if (q_int8) {
|
| 982 |
+
quantized_gemm<T>((T*)output.data_ptr(),
|
| 983 |
+
(T*)rms_norm.data_ptr(),
|
| 984 |
+
weight,
|
| 985 |
+
q_scale,
|
| 986 |
+
q_scale.size(0),
|
| 987 |
+
bsz,
|
| 988 |
+
input.size(2));
|
| 989 |
+
} else {
|
| 990 |
+
float alpha = (T)1.0;
|
| 991 |
+
float gemm_beta = (T)0.0;
|
| 992 |
+
|
| 993 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 994 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 995 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 996 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 997 |
+
CUBLAS_OP_N,
|
| 998 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 999 |
+
bsz,
|
| 1000 |
+
input.size(2),
|
| 1001 |
+
&alpha,
|
| 1002 |
+
&gemm_beta,
|
| 1003 |
+
(T*)weight.data_ptr(),
|
| 1004 |
+
(T*)rms_norm.data_ptr(),
|
| 1005 |
+
(T*)output.data_ptr(),
|
| 1006 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1007 |
+
rocblas_gemm_algo_standard);
|
| 1008 |
+
#else
|
| 1009 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1010 |
+
#endif
|
| 1011 |
+
}
|
| 1012 |
+
|
| 1013 |
+
return {output, rms_norm};
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
template <typename T>
|
| 1017 |
+
std::vector<at::Tensor> ds_qkv_gemm(at::Tensor& input,
|
| 1018 |
+
at::Tensor& weight,
|
| 1019 |
+
at::Tensor& q_scale,
|
| 1020 |
+
at::Tensor& bias,
|
| 1021 |
+
at::Tensor& gamma,
|
| 1022 |
+
at::Tensor& beta,
|
| 1023 |
+
const float epsilon,
|
| 1024 |
+
bool add_bias,
|
| 1025 |
+
bool q_int8,
|
| 1026 |
+
bool transposed_mode)
|
| 1027 |
+
{
|
| 1028 |
+
int bsz = input.size(0) * input.size(1);
|
| 1029 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 1030 |
+
int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1);
|
| 1031 |
+
|
| 1032 |
+
auto options = at::TensorOptions()
|
| 1033 |
+
.dtype(input.options().dtype())
|
| 1034 |
+
.layout(at::kStrided)
|
| 1035 |
+
.device(at::kCUDA)
|
| 1036 |
+
.requires_grad(false);
|
| 1037 |
+
|
| 1038 |
+
auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options);
|
| 1039 |
+
auto inp_norm = qkv_unfused_cublas<T>(output,
|
| 1040 |
+
input,
|
| 1041 |
+
weight,
|
| 1042 |
+
q_scale,
|
| 1043 |
+
bias,
|
| 1044 |
+
gamma,
|
| 1045 |
+
beta,
|
| 1046 |
+
epsilon,
|
| 1047 |
+
add_bias,
|
| 1048 |
+
q_int8,
|
| 1049 |
+
transposed_mode);
|
| 1050 |
+
|
| 1051 |
+
return {output, inp_norm};
|
| 1052 |
+
}
|
| 1053 |
+
|
| 1054 |
+
template <typename T>
|
| 1055 |
+
void quantized_gemm(at::Tensor& output,
|
| 1056 |
+
at::Tensor& input,
|
| 1057 |
+
at::Tensor& weight,
|
| 1058 |
+
at::Tensor& qscale,
|
| 1059 |
+
int groups,
|
| 1060 |
+
int merge_count)
|
| 1061 |
+
{
|
| 1062 |
+
int bsz = input.size(0) * input.size(1);
|
| 1063 |
+
auto options = at::TensorOptions()
|
| 1064 |
+
.dtype(input.options().dtype())
|
| 1065 |
+
.layout(at::kStrided)
|
| 1066 |
+
.device(at::kCUDA)
|
| 1067 |
+
.requires_grad(false);
|
| 1068 |
+
auto weight16 = at::empty({weight.size(0), weight.size(1)}, options);
|
| 1069 |
+
|
| 1070 |
+
launch_dequantize((T*)weight16.data_ptr(),
|
| 1071 |
+
(int8_t*)weight.data_ptr(),
|
| 1072 |
+
(float*)qscale.data_ptr(),
|
| 1073 |
+
weight.size(0),
|
| 1074 |
+
weight.size(1),
|
| 1075 |
+
groups,
|
| 1076 |
+
merge_count,
|
| 1077 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1078 |
+
|
| 1079 |
+
float alpha = (T)1.0;
|
| 1080 |
+
float gemm_beta = (T)0.0;
|
| 1081 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1082 |
+
CUBLAS_OP_T,
|
| 1083 |
+
CUBLAS_OP_N,
|
| 1084 |
+
weight.size(0),
|
| 1085 |
+
bsz,
|
| 1086 |
+
input.size(2),
|
| 1087 |
+
&alpha,
|
| 1088 |
+
&gemm_beta,
|
| 1089 |
+
(T*)weight16.data_ptr(),
|
| 1090 |
+
(T*)input.data_ptr(),
|
| 1091 |
+
(T*)output.data_ptr(),
|
| 1092 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1093 |
+
rocblas_gemm_algo_standard);
|
| 1094 |
+
#else
|
| 1095 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1096 |
+
#endif
|
| 1097 |
+
}
|
| 1098 |
+
|
| 1099 |
+
template <typename T>
|
| 1100 |
+
at::Tensor ds_linear_layer(at::Tensor& input,
|
| 1101 |
+
at::Tensor& weight,
|
| 1102 |
+
at::Tensor& bias,
|
| 1103 |
+
bool add_bias,
|
| 1104 |
+
bool do_flash_attn,
|
| 1105 |
+
int num_heads,
|
| 1106 |
+
bool transposed_mode,
|
| 1107 |
+
float rope_theta)
|
| 1108 |
+
{
|
| 1109 |
+
auto input_cont = input.contiguous();
|
| 1110 |
+
auto options = at::TensorOptions()
|
| 1111 |
+
.dtype(input_cont.options().dtype())
|
| 1112 |
+
.layout(at::kStrided)
|
| 1113 |
+
.device(at::kCUDA)
|
| 1114 |
+
.requires_grad(false);
|
| 1115 |
+
|
| 1116 |
+
int head_size = input_cont.size(2) / num_heads;
|
| 1117 |
+
int bsz = input.size(0) * input.size(1);
|
| 1118 |
+
int out_size = transposed_mode ? weight.size(0) : weight.size(1);
|
| 1119 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 1120 |
+
auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options);
|
| 1121 |
+
|
| 1122 |
+
float alpha = (T)1.0;
|
| 1123 |
+
float gemm_beta = (T)0.0;
|
| 1124 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1125 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1126 |
+
|
| 1127 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1128 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1129 |
+
CUBLAS_OP_N,
|
| 1130 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 1131 |
+
bsz,
|
| 1132 |
+
input_cont.size(2),
|
| 1133 |
+
&alpha,
|
| 1134 |
+
&gemm_beta,
|
| 1135 |
+
(T*)weight.data_ptr(),
|
| 1136 |
+
(T*)input_cont.data_ptr(),
|
| 1137 |
+
(T*)output.data_ptr(),
|
| 1138 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1139 |
+
rocblas_gemm_algo_standard);
|
| 1140 |
+
#else
|
| 1141 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1142 |
+
#endif
|
| 1143 |
+
if (add_bias)
|
| 1144 |
+
launch_bias_add((T*)output.data_ptr(),
|
| 1145 |
+
(T*)bias.data_ptr(),
|
| 1146 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 1147 |
+
bsz,
|
| 1148 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1149 |
+
bool add_padding = (head_size % 32 != 0 && head_size < 64) || (head_size % 64 != 0);
|
| 1150 |
+
if (do_flash_attn) {
|
| 1151 |
+
if (add_padding) {
|
| 1152 |
+
int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128);
|
| 1153 |
+
auto padded_output = workspace + output.numel();
|
| 1154 |
+
auto final_output =
|
| 1155 |
+
padded_output + (input.size(0) * input.size(1) * 3 * num_heads * padded_head_size);
|
| 1156 |
+
pad_data(padded_output,
|
| 1157 |
+
workspace,
|
| 1158 |
+
3 * bsz * num_heads,
|
| 1159 |
+
head_size,
|
| 1160 |
+
padded_head_size,
|
| 1161 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1162 |
+
|
| 1163 |
+
launch_bias_add_transform_0213<T>(
|
| 1164 |
+
final_output,
|
| 1165 |
+
final_output + (input.size(0) * input.size(1) * num_heads * padded_head_size),
|
| 1166 |
+
final_output + (input.size(0) * input.size(1) * 2 * num_heads * padded_head_size),
|
| 1167 |
+
padded_output,
|
| 1168 |
+
nullptr,
|
| 1169 |
+
input.size(0),
|
| 1170 |
+
input.size(1),
|
| 1171 |
+
0,
|
| 1172 |
+
input.size(1),
|
| 1173 |
+
(num_heads * padded_head_size),
|
| 1174 |
+
num_heads,
|
| 1175 |
+
-1,
|
| 1176 |
+
-1,
|
| 1177 |
+
false,
|
| 1178 |
+
false,
|
| 1179 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 1180 |
+
3,
|
| 1181 |
+
input.size(1),
|
| 1182 |
+
rope_theta);
|
| 1183 |
+
return at::from_blob(final_output,
|
| 1184 |
+
{3, input.size(0), num_heads, input.size(1), padded_head_size},
|
| 1185 |
+
options);
|
| 1186 |
+
// return at::from_blob(padded_output, {input.size(0) * input.size(1), 3, num_heads,
|
| 1187 |
+
// padded_head_size}, options);
|
| 1188 |
+
} else {
|
| 1189 |
+
auto final_output = workspace + output.numel();
|
| 1190 |
+
launch_bias_add_transform_0213<T>(
|
| 1191 |
+
final_output,
|
| 1192 |
+
final_output + (input.size(0) * input.size(1) * input_cont.size(2)),
|
| 1193 |
+
final_output + (input.size(0) * input.size(1) * 2 * input_cont.size(2)),
|
| 1194 |
+
workspace,
|
| 1195 |
+
nullptr,
|
| 1196 |
+
input.size(0),
|
| 1197 |
+
input.size(1),
|
| 1198 |
+
0,
|
| 1199 |
+
input.size(1),
|
| 1200 |
+
input_cont.size(2),
|
| 1201 |
+
num_heads,
|
| 1202 |
+
-1,
|
| 1203 |
+
-1,
|
| 1204 |
+
false,
|
| 1205 |
+
false,
|
| 1206 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 1207 |
+
3,
|
| 1208 |
+
input.size(1),
|
| 1209 |
+
rope_theta);
|
| 1210 |
+
return at::from_blob(
|
| 1211 |
+
final_output, {3, input.size(0), num_heads, input.size(1), head_size}, options);
|
| 1212 |
+
// return at::from_blob(workspace, {input.size(0) * input.size(1), 3, num_heads,
|
| 1213 |
+
// head_size}, options);
|
| 1214 |
+
}
|
| 1215 |
+
|
| 1216 |
+
} else
|
| 1217 |
+
return output;
|
| 1218 |
+
}
|
| 1219 |
+
|
| 1220 |
+
template <typename T>
|
| 1221 |
+
std::vector<at::Tensor> add_padding(at::Tensor& query, at::Tensor& key, at::Tensor& value)
|
| 1222 |
+
{
|
| 1223 |
+
int head_size = query.size(3);
|
| 1224 |
+
int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128);
|
| 1225 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 1226 |
+
T* key_pad_ptr = workspace + padded_head_size * query.size(0) * query.size(1) * query.size(2);
|
| 1227 |
+
T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * query.size(1) * 128;
|
| 1228 |
+
pad_head_seq(workspace,
|
| 1229 |
+
(T*)query.data_ptr(),
|
| 1230 |
+
query.size(0) * query.size(1),
|
| 1231 |
+
query.size(2),
|
| 1232 |
+
query.size(2),
|
| 1233 |
+
head_size,
|
| 1234 |
+
padded_head_size,
|
| 1235 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1236 |
+
pad_head_seq(key_pad_ptr,
|
| 1237 |
+
(T*)key.data_ptr(),
|
| 1238 |
+
query.size(0) * query.size(1),
|
| 1239 |
+
key.size(2),
|
| 1240 |
+
128,
|
| 1241 |
+
head_size,
|
| 1242 |
+
padded_head_size,
|
| 1243 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1244 |
+
pad_head_seq(value_pad_ptr,
|
| 1245 |
+
(T*)value.data_ptr(),
|
| 1246 |
+
query.size(0) * query.size(1),
|
| 1247 |
+
key.size(2),
|
| 1248 |
+
128,
|
| 1249 |
+
head_size,
|
| 1250 |
+
padded_head_size,
|
| 1251 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1252 |
+
return {
|
| 1253 |
+
at::from_blob(workspace,
|
| 1254 |
+
{query.size(0), query.size(1), query.size(2), padded_head_size},
|
| 1255 |
+
query.options()),
|
| 1256 |
+
at::from_blob(
|
| 1257 |
+
key_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options()),
|
| 1258 |
+
at::from_blob(
|
| 1259 |
+
value_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options())};
|
| 1260 |
+
}
|
| 1261 |
+
|
| 1262 |
+
template <typename T>
|
| 1263 |
+
std::vector<at::Tensor> padd_add_transform(at::Tensor& query,
|
| 1264 |
+
at::Tensor& key,
|
| 1265 |
+
at::Tensor& value,
|
| 1266 |
+
int heads,
|
| 1267 |
+
bool add_padding)
|
| 1268 |
+
{
|
| 1269 |
+
int head_size = query.size(2) / heads;
|
| 1270 |
+
int key_value_length = add_padding ? 128 : key.size(1);
|
| 1271 |
+
int padded_head_size = add_padding ? (head_size < 32 ? 32 : (head_size < 64 ? 64 : 128))
|
| 1272 |
+
: head_size;
|
| 1273 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 1274 |
+
T* key_pad_ptr = workspace + padded_head_size * query.size(0) * heads * query.size(1);
|
| 1275 |
+
T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * heads * key_value_length;
|
| 1276 |
+
launch_pad_add_transform_0213(workspace,
|
| 1277 |
+
(T*)query.data_ptr(),
|
| 1278 |
+
query.size(0),
|
| 1279 |
+
query.size(2),
|
| 1280 |
+
query.size(1),
|
| 1281 |
+
query.size(1),
|
| 1282 |
+
heads,
|
| 1283 |
+
padded_head_size,
|
| 1284 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1285 |
+
launch_pad_add_transform_0213(key_pad_ptr,
|
| 1286 |
+
(T*)key.data_ptr(),
|
| 1287 |
+
key.size(0),
|
| 1288 |
+
key.size(2),
|
| 1289 |
+
key.size(1),
|
| 1290 |
+
key_value_length,
|
| 1291 |
+
heads,
|
| 1292 |
+
padded_head_size,
|
| 1293 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1294 |
+
launch_pad_add_transform_0213(value_pad_ptr,
|
| 1295 |
+
(T*)value.data_ptr(),
|
| 1296 |
+
value.size(0),
|
| 1297 |
+
value.size(2),
|
| 1298 |
+
value.size(1),
|
| 1299 |
+
key_value_length,
|
| 1300 |
+
heads,
|
| 1301 |
+
padded_head_size,
|
| 1302 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1303 |
+
return {
|
| 1304 |
+
at::from_blob(
|
| 1305 |
+
workspace, {query.size(0), heads, query.size(1), padded_head_size}, query.options()),
|
| 1306 |
+
at::from_blob(key_pad_ptr,
|
| 1307 |
+
{query.size(0), heads, key_value_length, padded_head_size},
|
| 1308 |
+
query.options()),
|
| 1309 |
+
at::from_blob(value_pad_ptr,
|
| 1310 |
+
{query.size(0), heads, key_value_length, padded_head_size},
|
| 1311 |
+
query.options())};
|
| 1312 |
+
}
|
| 1313 |
+
|
| 1314 |
+
template <typename T>
|
| 1315 |
+
at::Tensor ds_vector_matmul(at::Tensor& input,
|
| 1316 |
+
at::Tensor& weight,
|
| 1317 |
+
bool async_op,
|
| 1318 |
+
at::Tensor& q_scale,
|
| 1319 |
+
bool q_int8,
|
| 1320 |
+
bool transposed_mode)
|
| 1321 |
+
{
|
| 1322 |
+
auto options = at::TensorOptions()
|
| 1323 |
+
.dtype(input.options().dtype())
|
| 1324 |
+
.layout(at::kStrided)
|
| 1325 |
+
.device(at::kCUDA)
|
| 1326 |
+
.requires_grad(false);
|
| 1327 |
+
int out_size = (q_int8 || transposed_mode) ? weight.size(0) : weight.size(1);
|
| 1328 |
+
int bsz = input.size(0) * input.size(1);
|
| 1329 |
+
|
| 1330 |
+
T* workspace = (T*)InferenceContext::Instance().GetWorkSpace();
|
| 1331 |
+
auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options);
|
| 1332 |
+
if (q_int8) {
|
| 1333 |
+
quantized_gemm<T>(output.data_ptr(),
|
| 1334 |
+
(T*)input.data_ptr(),
|
| 1335 |
+
weight,
|
| 1336 |
+
q_scale,
|
| 1337 |
+
q_scale.size(0),
|
| 1338 |
+
bsz,
|
| 1339 |
+
input.size(2));
|
| 1340 |
+
} else {
|
| 1341 |
+
float alpha = (T)1.0;
|
| 1342 |
+
float gemm_beta = (T)0.0;
|
| 1343 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1344 |
+
InferenceContext::Instance().GetCurrentStream(async_op));
|
| 1345 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1346 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1347 |
+
CUBLAS_OP_N,
|
| 1348 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 1349 |
+
bsz,
|
| 1350 |
+
input.size(2),
|
| 1351 |
+
&alpha,
|
| 1352 |
+
&gemm_beta,
|
| 1353 |
+
(T*)weight.data_ptr(),
|
| 1354 |
+
(T*)input.data_ptr(),
|
| 1355 |
+
(T*)output.data_ptr(),
|
| 1356 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1357 |
+
rocblas_gemm_algo_standard);
|
| 1358 |
+
#else
|
| 1359 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1360 |
+
#endif
|
| 1361 |
+
}
|
| 1362 |
+
return output;
|
| 1363 |
+
}
|
| 1364 |
+
|
| 1365 |
+
template <typename T>
|
| 1366 |
+
at::Tensor ds_vector_matmul_int8(at::Tensor& input,
|
| 1367 |
+
at::Tensor& weight,
|
| 1368 |
+
at::Tensor& q_scale,
|
| 1369 |
+
int groups,
|
| 1370 |
+
int merge_count)
|
| 1371 |
+
{
|
| 1372 |
+
auto input_cont = input.contiguous();
|
| 1373 |
+
auto options = at::TensorOptions()
|
| 1374 |
+
.dtype(input_cont.options().dtype())
|
| 1375 |
+
.layout(at::kStrided)
|
| 1376 |
+
.device(at::kCUDA)
|
| 1377 |
+
.requires_grad(false);
|
| 1378 |
+
|
| 1379 |
+
auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options);
|
| 1380 |
+
|
| 1381 |
+
quantized_gemm<T>(output, input_cont, weight, q_scale, groups, merge_count);
|
| 1382 |
+
return output;
|
| 1383 |
+
}
|
| 1384 |
+
|
| 1385 |
+
template <typename T>
|
| 1386 |
+
at::Tensor mlp_unfused_cublas(at::Tensor& output,
|
| 1387 |
+
at::Tensor& input,
|
| 1388 |
+
at::Tensor& residual,
|
| 1389 |
+
at::Tensor& input_bias,
|
| 1390 |
+
at::Tensor& weight,
|
| 1391 |
+
at::Tensor& weight1,
|
| 1392 |
+
at::Tensor& bias,
|
| 1393 |
+
at::Tensor& gamma,
|
| 1394 |
+
at::Tensor& beta,
|
| 1395 |
+
const float epsilon,
|
| 1396 |
+
bool preLayerNorm,
|
| 1397 |
+
bool mlp_after_attn,
|
| 1398 |
+
at::Tensor& q_scale,
|
| 1399 |
+
at::Tensor& q_scale1,
|
| 1400 |
+
bool q_int8,
|
| 1401 |
+
ActivationFuncType act_func_type,
|
| 1402 |
+
bool transposed_mode)
|
| 1403 |
+
{
|
| 1404 |
+
int bsz = input.size(0) * input.size(1);
|
| 1405 |
+
T* inp_norm = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input) +
|
| 1406 |
+
torch::numel(output);
|
| 1407 |
+
T* intermediate = inp_norm + torch::numel(input);
|
| 1408 |
+
|
| 1409 |
+
if (mlp_after_attn) {
|
| 1410 |
+
launch_fused_residual_ln((T*)inp_norm,
|
| 1411 |
+
(const T*)input.data_ptr(),
|
| 1412 |
+
(const T*)residual.data_ptr(),
|
| 1413 |
+
(const T*)input_bias.data_ptr(),
|
| 1414 |
+
(const T*)gamma.data_ptr(),
|
| 1415 |
+
(const T*)beta.data_ptr(),
|
| 1416 |
+
epsilon,
|
| 1417 |
+
bsz,
|
| 1418 |
+
input.size(2),
|
| 1419 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1420 |
+
} else {
|
| 1421 |
+
ds_layer_norm_internal(inp_norm, input, gamma, beta, epsilon);
|
| 1422 |
+
}
|
| 1423 |
+
if (q_int8) {
|
| 1424 |
+
quantized_gemm<T>(
|
| 1425 |
+
intermediate, inp_norm, weight, q_scale, q_scale.size(0), bsz, input.size(2));
|
| 1426 |
+
} else {
|
| 1427 |
+
float alpha = (T)1.0;
|
| 1428 |
+
float gemm_beta = (T)0.0;
|
| 1429 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1430 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1431 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1432 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1433 |
+
CUBLAS_OP_N,
|
| 1434 |
+
weight.size(transposed_mode ? 0 : 1),
|
| 1435 |
+
bsz,
|
| 1436 |
+
input.size(2),
|
| 1437 |
+
&alpha,
|
| 1438 |
+
&gemm_beta,
|
| 1439 |
+
(T*)weight.data_ptr(),
|
| 1440 |
+
inp_norm,
|
| 1441 |
+
intermediate,
|
| 1442 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1443 |
+
rocblas_gemm_algo_standard);
|
| 1444 |
+
#else
|
| 1445 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1446 |
+
#endif
|
| 1447 |
+
}
|
| 1448 |
+
if (act_func_type == ActivationFuncType::GELU) {
|
| 1449 |
+
launch_bias_gelu(intermediate,
|
| 1450 |
+
(T*)bias.data_ptr(),
|
| 1451 |
+
(transposed_mode || q_int8) ? weight.size(0) : weight.size(1),
|
| 1452 |
+
bsz,
|
| 1453 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1454 |
+
} else if (act_func_type == ActivationFuncType::ReLU) {
|
| 1455 |
+
launch_bias_relu(intermediate,
|
| 1456 |
+
(T*)bias.data_ptr(),
|
| 1457 |
+
(transposed_mode || q_int8) ? weight.size(0) : weight.size(1),
|
| 1458 |
+
bsz,
|
| 1459 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1460 |
+
}
|
| 1461 |
+
|
| 1462 |
+
if (q_int8) {
|
| 1463 |
+
quantized_gemm<T>(output.data_ptr(),
|
| 1464 |
+
intermediate,
|
| 1465 |
+
weight1,
|
| 1466 |
+
q_scale1,
|
| 1467 |
+
q_scale1.size(0),
|
| 1468 |
+
bsz,
|
| 1469 |
+
input.size(2));
|
| 1470 |
+
} else {
|
| 1471 |
+
float alpha = (T)1.0;
|
| 1472 |
+
float gemm_beta = (T)0.0;
|
| 1473 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1474 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1475 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1476 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1477 |
+
CUBLAS_OP_N,
|
| 1478 |
+
weight1.size(transposed_mode ? 0 : 1),
|
| 1479 |
+
bsz,
|
| 1480 |
+
weight1.size(transposed_mode ? 1 : 0),
|
| 1481 |
+
&alpha,
|
| 1482 |
+
&gemm_beta,
|
| 1483 |
+
(T*)weight1.data_ptr(),
|
| 1484 |
+
intermediate,
|
| 1485 |
+
(T*)output.data_ptr(),
|
| 1486 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1487 |
+
rocblas_gemm_algo_standard);
|
| 1488 |
+
#else
|
| 1489 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1490 |
+
#endif
|
| 1491 |
+
}
|
| 1492 |
+
|
| 1493 |
+
return torch::from_blob(inp_norm, input.sizes(), input.options());
|
| 1494 |
+
}
|
| 1495 |
+
|
| 1496 |
+
template <typename T>
|
| 1497 |
+
std::vector<at::Tensor> ds_mlp_gemm(at::Tensor& input,
|
| 1498 |
+
at::Tensor& residual,
|
| 1499 |
+
at::Tensor& input_bias,
|
| 1500 |
+
at::Tensor& weight_interm,
|
| 1501 |
+
at::Tensor& weight_out,
|
| 1502 |
+
at::Tensor& bias,
|
| 1503 |
+
at::Tensor& gamma,
|
| 1504 |
+
at::Tensor& beta,
|
| 1505 |
+
const float epsilon,
|
| 1506 |
+
bool preLayerNorm,
|
| 1507 |
+
bool mlp_after_attn,
|
| 1508 |
+
at::Tensor& q_scale,
|
| 1509 |
+
at::Tensor& q_scale1,
|
| 1510 |
+
bool q_int8,
|
| 1511 |
+
int activation_type,
|
| 1512 |
+
bool transposed_mode)
|
| 1513 |
+
{
|
| 1514 |
+
auto options = at::TensorOptions()
|
| 1515 |
+
.dtype(input.options().dtype())
|
| 1516 |
+
.layout(at::kStrided)
|
| 1517 |
+
.device(at::kCUDA)
|
| 1518 |
+
.requires_grad(false);
|
| 1519 |
+
|
| 1520 |
+
int out_size = (q_int8 || transposed_mode) ? weight_out.size(0) : weight_out.size(1);
|
| 1521 |
+
auto output =
|
| 1522 |
+
at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input),
|
| 1523 |
+
{input.size(0), input.size(1), out_size},
|
| 1524 |
+
options);
|
| 1525 |
+
int bsz = input.size(0) * input.size(1);
|
| 1526 |
+
|
| 1527 |
+
auto act_func_type = static_cast<ActivationFuncType>(activation_type);
|
| 1528 |
+
auto res_add = mlp_unfused_cublas<T>(output,
|
| 1529 |
+
mlp_after_attn ? input : residual,
|
| 1530 |
+
residual,
|
| 1531 |
+
input_bias,
|
| 1532 |
+
weight_interm,
|
| 1533 |
+
weight_out,
|
| 1534 |
+
bias,
|
| 1535 |
+
gamma,
|
| 1536 |
+
beta,
|
| 1537 |
+
epsilon,
|
| 1538 |
+
preLayerNorm,
|
| 1539 |
+
mlp_after_attn,
|
| 1540 |
+
q_scale,
|
| 1541 |
+
q_scale1,
|
| 1542 |
+
q_int8,
|
| 1543 |
+
act_func_type,
|
| 1544 |
+
transposed_mode);
|
| 1545 |
+
|
| 1546 |
+
return {output, res_add};
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
template <typename T>
|
| 1550 |
+
std::vector<at::Tensor> ds_rms_mlp_gemm(at::Tensor& input,
|
| 1551 |
+
at::Tensor& residual,
|
| 1552 |
+
at::Tensor& weight_interm,
|
| 1553 |
+
at::Tensor& weight_out,
|
| 1554 |
+
at::Tensor& gamma,
|
| 1555 |
+
const float epsilon,
|
| 1556 |
+
at::Tensor& q_scale,
|
| 1557 |
+
at::Tensor& q_scale1,
|
| 1558 |
+
bool q_int8,
|
| 1559 |
+
int activation_type,
|
| 1560 |
+
bool transposed_mode)
|
| 1561 |
+
{
|
| 1562 |
+
const int bsz = input.size(0) * input.size(1);
|
| 1563 |
+
const size_t input_neurons = input.size(2);
|
| 1564 |
+
const size_t mlp_1_out_neurons = transposed_mode ? weight_interm.size(0)
|
| 1565 |
+
: weight_interm.size(1);
|
| 1566 |
+
const size_t mlp_2_in_neurons = transposed_mode ? weight_out.size(1) : weight_out.size(0);
|
| 1567 |
+
|
| 1568 |
+
auto options = at::TensorOptions()
|
| 1569 |
+
.dtype(input.options().dtype())
|
| 1570 |
+
.layout(at::kStrided)
|
| 1571 |
+
.device(at::kCUDA)
|
| 1572 |
+
.requires_grad(false);
|
| 1573 |
+
|
| 1574 |
+
T* output_ptr = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input);
|
| 1575 |
+
T* inp_norm_ptr = output_ptr + torch::numel(input);
|
| 1576 |
+
T* intermediate_ptr = inp_norm_ptr + torch::numel(input);
|
| 1577 |
+
|
| 1578 |
+
auto output = at::from_blob(output_ptr, input.sizes(), options);
|
| 1579 |
+
auto inp_norm = at::from_blob(inp_norm_ptr, input.sizes(), options);
|
| 1580 |
+
auto intermediate_gemm =
|
| 1581 |
+
at::from_blob(intermediate_ptr, {input.size(0), input.size(1), mlp_1_out_neurons}, options);
|
| 1582 |
+
|
| 1583 |
+
auto act_func_type = static_cast<ActivationFuncType>(activation_type);
|
| 1584 |
+
|
| 1585 |
+
// RMS Norm, we'll update the residual in-place
|
| 1586 |
+
launch_rms_norm((T*)inp_norm.data_ptr(),
|
| 1587 |
+
(T*)residual.data_ptr(),
|
| 1588 |
+
(const T*)input.data_ptr(),
|
| 1589 |
+
(const T*)residual.data_ptr(),
|
| 1590 |
+
(const T*)gamma.data_ptr(),
|
| 1591 |
+
epsilon,
|
| 1592 |
+
bsz,
|
| 1593 |
+
input_neurons,
|
| 1594 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1595 |
+
|
| 1596 |
+
if (q_int8) {
|
| 1597 |
+
quantized_gemm<T>(intermediate_ptr,
|
| 1598 |
+
(T*)inp_norm.data_ptr(),
|
| 1599 |
+
weight_interm,
|
| 1600 |
+
q_scale,
|
| 1601 |
+
q_scale.size(0),
|
| 1602 |
+
bsz,
|
| 1603 |
+
input_neurons);
|
| 1604 |
+
} else {
|
| 1605 |
+
float alpha = (T)1.0;
|
| 1606 |
+
float gemm_beta = (T)0.0;
|
| 1607 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1608 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1609 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1610 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1611 |
+
CUBLAS_OP_N,
|
| 1612 |
+
mlp_1_out_neurons,
|
| 1613 |
+
bsz,
|
| 1614 |
+
input_neurons,
|
| 1615 |
+
&alpha,
|
| 1616 |
+
&gemm_beta,
|
| 1617 |
+
(T*)weight_interm.data_ptr(),
|
| 1618 |
+
(T*)inp_norm.data_ptr(),
|
| 1619 |
+
intermediate_ptr,
|
| 1620 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1621 |
+
rocblas_gemm_algo_standard);
|
| 1622 |
+
#else
|
| 1623 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1624 |
+
#endif
|
| 1625 |
+
}
|
| 1626 |
+
|
| 1627 |
+
if (act_func_type == ActivationFuncType::GELU) {
|
| 1628 |
+
launch_bias_gelu(intermediate_ptr,
|
| 1629 |
+
(T*)nullptr,
|
| 1630 |
+
mlp_1_out_neurons,
|
| 1631 |
+
bsz,
|
| 1632 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1633 |
+
} else if (act_func_type == ActivationFuncType::ReLU) {
|
| 1634 |
+
launch_bias_relu(intermediate_ptr,
|
| 1635 |
+
(T*)nullptr,
|
| 1636 |
+
mlp_1_out_neurons,
|
| 1637 |
+
bsz,
|
| 1638 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1639 |
+
} else if (act_func_type == ActivationFuncType::GATED_GELU) {
|
| 1640 |
+
launch_gated_activation(intermediate_ptr,
|
| 1641 |
+
(const T*)intermediate_ptr,
|
| 1642 |
+
(const T*)nullptr,
|
| 1643 |
+
bsz,
|
| 1644 |
+
mlp_1_out_neurons,
|
| 1645 |
+
mlp_1_out_neurons,
|
| 1646 |
+
true,
|
| 1647 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1648 |
+
} else if (act_func_type == ActivationFuncType::GATED_SILU) {
|
| 1649 |
+
launch_gated_activation(intermediate_ptr,
|
| 1650 |
+
(const T*)intermediate_ptr,
|
| 1651 |
+
(const T*)nullptr,
|
| 1652 |
+
bsz,
|
| 1653 |
+
mlp_1_out_neurons,
|
| 1654 |
+
mlp_1_out_neurons,
|
| 1655 |
+
false,
|
| 1656 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1657 |
+
}
|
| 1658 |
+
|
| 1659 |
+
if (q_int8) {
|
| 1660 |
+
quantized_gemm<T>(output.data_ptr(),
|
| 1661 |
+
intermediate_ptr,
|
| 1662 |
+
weight_out,
|
| 1663 |
+
q_scale1,
|
| 1664 |
+
q_scale1.size(0),
|
| 1665 |
+
bsz,
|
| 1666 |
+
input.size(2));
|
| 1667 |
+
} else {
|
| 1668 |
+
float alpha = (T)1.0;
|
| 1669 |
+
float gemm_beta = (T)0.0;
|
| 1670 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1671 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1672 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1673 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1674 |
+
CUBLAS_OP_N,
|
| 1675 |
+
input_neurons,
|
| 1676 |
+
bsz,
|
| 1677 |
+
mlp_2_in_neurons,
|
| 1678 |
+
&alpha,
|
| 1679 |
+
&gemm_beta,
|
| 1680 |
+
(T*)weight_out.data_ptr(),
|
| 1681 |
+
intermediate_ptr,
|
| 1682 |
+
(T*)output.data_ptr(),
|
| 1683 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1684 |
+
rocblas_gemm_algo_standard,
|
| 1685 |
+
#else
|
| 1686 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP,
|
| 1687 |
+
#endif
|
| 1688 |
+
mlp_1_out_neurons);
|
| 1689 |
+
}
|
| 1690 |
+
|
| 1691 |
+
return {output, residual};
|
| 1692 |
+
}
|
| 1693 |
+
|
| 1694 |
+
template <typename T>
|
| 1695 |
+
at::Tensor fused_gemm_gelu(at::Tensor& input,
|
| 1696 |
+
at::Tensor& weight,
|
| 1697 |
+
at::Tensor& weight_scale,
|
| 1698 |
+
at::Tensor& bias,
|
| 1699 |
+
at::Tensor& weight_out,
|
| 1700 |
+
at::Tensor& weight_out_scale,
|
| 1701 |
+
bool q_int8,
|
| 1702 |
+
bool transposed_mode)
|
| 1703 |
+
{
|
| 1704 |
+
auto options = at::TensorOptions()
|
| 1705 |
+
.dtype(input.options().dtype())
|
| 1706 |
+
.layout(at::kStrided)
|
| 1707 |
+
.device(at::kCUDA)
|
| 1708 |
+
.requires_grad(false);
|
| 1709 |
+
|
| 1710 |
+
int intm_dim = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1);
|
| 1711 |
+
|
| 1712 |
+
// auto output = at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() +
|
| 1713 |
+
// torch::numel(input),
|
| 1714 |
+
// {input.size(0), input.size(1), out_size},
|
| 1715 |
+
// options);
|
| 1716 |
+
// T* intermediate = (T*)input.data_ptr() + torch::numel(input);
|
| 1717 |
+
auto intermediate = at::empty({input.size(0), input.size(1), intm_dim}, options);
|
| 1718 |
+
|
| 1719 |
+
int bsz = input.size(0) * input.size(1);
|
| 1720 |
+
|
| 1721 |
+
float alpha = (T)1.0;
|
| 1722 |
+
float gemm_beta = (T)0.0;
|
| 1723 |
+
if (q_int8) {
|
| 1724 |
+
quantized_gemm<T>(intermediate.data_ptr(),
|
| 1725 |
+
(T*)input.data_ptr(),
|
| 1726 |
+
weight,
|
| 1727 |
+
weight_scale,
|
| 1728 |
+
weight_scale.size(0),
|
| 1729 |
+
bsz,
|
| 1730 |
+
input.size(2));
|
| 1731 |
+
} else {
|
| 1732 |
+
cublasSetStream(InferenceContext::Instance().GetCublasHandle(),
|
| 1733 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1734 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1735 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1736 |
+
CUBLAS_OP_N,
|
| 1737 |
+
intm_dim,
|
| 1738 |
+
bsz,
|
| 1739 |
+
input.size(2),
|
| 1740 |
+
&alpha,
|
| 1741 |
+
&gemm_beta,
|
| 1742 |
+
(T*)weight.data_ptr(),
|
| 1743 |
+
(T*)input.data_ptr(),
|
| 1744 |
+
(T*)intermediate.data_ptr(),
|
| 1745 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1746 |
+
rocblas_gemm_algo_standard);
|
| 1747 |
+
#else
|
| 1748 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1749 |
+
#endif
|
| 1750 |
+
}
|
| 1751 |
+
launch_bias_gelu((T*)intermediate.data_ptr(),
|
| 1752 |
+
(T*)bias.data_ptr(),
|
| 1753 |
+
intm_dim,
|
| 1754 |
+
bsz,
|
| 1755 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1756 |
+
|
| 1757 |
+
int out_size = (transposed_mode || q_int8) ? weight_out.size(0) : weight_out.size(1);
|
| 1758 |
+
auto output = at::empty({input.size(0), input.size(1), out_size}, options);
|
| 1759 |
+
if (q_int8) {
|
| 1760 |
+
quantized_gemm<T>(output.data_ptr(),
|
| 1761 |
+
(T*)intermediate.data_ptr(),
|
| 1762 |
+
weight_out,
|
| 1763 |
+
weight_out_scale,
|
| 1764 |
+
weight_out_scale.size(0),
|
| 1765 |
+
bsz,
|
| 1766 |
+
input.size(2));
|
| 1767 |
+
} else {
|
| 1768 |
+
cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(),
|
| 1769 |
+
(transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N),
|
| 1770 |
+
CUBLAS_OP_N,
|
| 1771 |
+
out_size,
|
| 1772 |
+
bsz,
|
| 1773 |
+
intm_dim,
|
| 1774 |
+
&alpha,
|
| 1775 |
+
&gemm_beta,
|
| 1776 |
+
(T*)weight_out.data_ptr(),
|
| 1777 |
+
(T*)intermediate.data_ptr(),
|
| 1778 |
+
(T*)output.data_ptr(),
|
| 1779 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 1780 |
+
rocblas_gemm_algo_standard);
|
| 1781 |
+
#else
|
| 1782 |
+
CUBLAS_GEMM_DEFAULT_TENSOR_OP);
|
| 1783 |
+
#endif
|
| 1784 |
+
}
|
| 1785 |
+
// cudaEventRecord(InferenceContext::Instance().GetCompEvent(2),
|
| 1786 |
+
// InferenceContext::Instance().GetCurrentStream(true));
|
| 1787 |
+
return output;
|
| 1788 |
+
}
|
| 1789 |
+
|
| 1790 |
+
template <typename T>
|
| 1791 |
+
at::Tensor& residual_add_bias(at::Tensor& hidden_state,
|
| 1792 |
+
at::Tensor& residual,
|
| 1793 |
+
const at::Tensor& attention_output,
|
| 1794 |
+
const at::Tensor& attention_bias,
|
| 1795 |
+
const at::Tensor& final_bias,
|
| 1796 |
+
const int mp_size,
|
| 1797 |
+
const bool mlp_after_attn,
|
| 1798 |
+
const bool add_bias,
|
| 1799 |
+
const bool preln)
|
| 1800 |
+
{
|
| 1801 |
+
int bsz = residual.size(0) * residual.size(1);
|
| 1802 |
+
int hidden_size = residual.size(2);
|
| 1803 |
+
if (mlp_after_attn)
|
| 1804 |
+
launch_bias_residual(static_cast<T*>(residual.data_ptr()),
|
| 1805 |
+
static_cast<T*>(hidden_state.data_ptr()),
|
| 1806 |
+
static_cast<T*>(attention_output.data_ptr()),
|
| 1807 |
+
static_cast<T*>(final_bias.data_ptr()),
|
| 1808 |
+
static_cast<T*>(attention_bias.data_ptr()),
|
| 1809 |
+
bsz,
|
| 1810 |
+
hidden_size,
|
| 1811 |
+
mp_size,
|
| 1812 |
+
preln,
|
| 1813 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1814 |
+
else
|
| 1815 |
+
launch_gptj_residual_add<T>(
|
| 1816 |
+
static_cast<T*>(residual.data_ptr()),
|
| 1817 |
+
static_cast<T*>(hidden_state.data_ptr()),
|
| 1818 |
+
static_cast<T*>(attention_output.data_ptr()),
|
| 1819 |
+
static_cast<T*>(final_bias.data_ptr()),
|
| 1820 |
+
static_cast<T*>((add_bias ? attention_bias.data_ptr() : nullptr)),
|
| 1821 |
+
hidden_size,
|
| 1822 |
+
bsz,
|
| 1823 |
+
mp_size,
|
| 1824 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1825 |
+
return residual;
|
| 1826 |
+
}
|
| 1827 |
+
|
| 1828 |
+
#define DISPATCH_VECTOR_ADD(T_TYPE, C_TYPE) \
|
| 1829 |
+
if (a.scalar_type() == at::k##T_TYPE) { \
|
| 1830 |
+
launch_vector_add<C_TYPE>((C_TYPE*)(a.data_ptr()), \
|
| 1831 |
+
(const C_TYPE*)(a.data_ptr()), \
|
| 1832 |
+
(const C_TYPE*)(b.data_ptr()), \
|
| 1833 |
+
gamma, \
|
| 1834 |
+
total_elems, \
|
| 1835 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 1836 |
+
}
|
| 1837 |
+
|
| 1838 |
+
at::Tensor& _vector_add(at::Tensor& a, at::Tensor& b, float gamma)
|
| 1839 |
+
{
|
| 1840 |
+
const int total_elems = a.numel();
|
| 1841 |
+
|
| 1842 |
+
DISPATCH_VECTOR_ADD(Float, float)
|
| 1843 |
+
DISPATCH_VECTOR_ADD(Half, __half)
|
| 1844 |
+
#ifdef BF16_AVAILABLE
|
| 1845 |
+
DISPATCH_VECTOR_ADD(BFloat16, __nv_bfloat16)
|
| 1846 |
+
#endif
|
| 1847 |
+
|
| 1848 |
+
return a;
|
| 1849 |
+
}
|
| 1850 |
+
|
| 1851 |
+
std::vector<at::Tensor> apply_rotary_pos_emb(at::Tensor& mixed_query,
|
| 1852 |
+
at::Tensor& key_layer,
|
| 1853 |
+
unsigned rotary_dim,
|
| 1854 |
+
unsigned offset,
|
| 1855 |
+
unsigned num_heads,
|
| 1856 |
+
bool rotate_half,
|
| 1857 |
+
float rope_theta)
|
| 1858 |
+
{
|
| 1859 |
+
auto query_cont = mixed_query.contiguous();
|
| 1860 |
+
auto key_cont = key_layer.contiguous();
|
| 1861 |
+
|
| 1862 |
+
unsigned bsz = mixed_query.size(0);
|
| 1863 |
+
unsigned head_size = mixed_query.size(2) / num_heads;
|
| 1864 |
+
unsigned seq_len = mixed_query.size(1);
|
| 1865 |
+
|
| 1866 |
+
if (mixed_query.scalar_type() == at::kFloat)
|
| 1867 |
+
launch_apply_rotary_pos_emb<float>((float*)query_cont.data_ptr(),
|
| 1868 |
+
(float*)key_cont.data_ptr(),
|
| 1869 |
+
head_size,
|
| 1870 |
+
seq_len,
|
| 1871 |
+
rotary_dim,
|
| 1872 |
+
offset,
|
| 1873 |
+
num_heads,
|
| 1874 |
+
bsz,
|
| 1875 |
+
rope_theta,
|
| 1876 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 1877 |
+
InferenceContext::Instance().GetMaxTokenLength());
|
| 1878 |
+
else
|
| 1879 |
+
launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(),
|
| 1880 |
+
(__half*)key_cont.data_ptr(),
|
| 1881 |
+
head_size,
|
| 1882 |
+
seq_len,
|
| 1883 |
+
rotary_dim,
|
| 1884 |
+
offset,
|
| 1885 |
+
num_heads,
|
| 1886 |
+
bsz,
|
| 1887 |
+
rope_theta,
|
| 1888 |
+
InferenceContext::Instance().GetCurrentStream(),
|
| 1889 |
+
InferenceContext::Instance().GetMaxTokenLength());
|
| 1890 |
+
return {query_cont, key_cont};
|
| 1891 |
+
}
|
| 1892 |
+
|
| 1893 |
+
#define DISPATCH_MOE_RESIDUAL(T_TYPE, C_TYPE) \
|
| 1894 |
+
if (moe_res.scalar_type() == torch::T_TYPE) { \
|
| 1895 |
+
launch_moe_res_matmul<C_TYPE>((C_TYPE*)moe_res.data_ptr(), \
|
| 1896 |
+
(C_TYPE*)coef.data_ptr(), \
|
| 1897 |
+
(C_TYPE*)output.data_ptr(), \
|
| 1898 |
+
M, \
|
| 1899 |
+
N, \
|
| 1900 |
+
InferenceContext::Instance().GetCurrentStream()); \
|
| 1901 |
+
}
|
| 1902 |
+
|
| 1903 |
+
at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output)
|
| 1904 |
+
{
|
| 1905 |
+
int M = moe_res.size(0) * moe_res.size(1);
|
| 1906 |
+
int N = moe_res.size(2);
|
| 1907 |
+
InferenceContext::Instance().SynchComm();
|
| 1908 |
+
|
| 1909 |
+
DISPATCH_MOE_RESIDUAL(kFloat, float)
|
| 1910 |
+
DISPATCH_MOE_RESIDUAL(kHalf, __half)
|
| 1911 |
+
#ifdef BF16_AVAILABLE
|
| 1912 |
+
DISPATCH_MOE_RESIDUAL(kBFloat16, __nv_bfloat16)
|
| 1913 |
+
#endif
|
| 1914 |
+
|
| 1915 |
+
return output;
|
| 1916 |
+
}
|
| 1917 |
+
|
| 1918 |
+
void ds_release_workspace() { InferenceContext::Instance().release_workspace(); }
|
| 1919 |
+
|
| 1920 |
+
bool ds_retake_workspace() { return InferenceContext::Instance().retake_workspace(); }
|
| 1921 |
+
|
| 1922 |
+
template <typename T>
|
| 1923 |
+
at::Tensor ds_dequantize(at::Tensor& weight, at::Tensor& qscale, int groups)
|
| 1924 |
+
{
|
| 1925 |
+
auto options = at::TensorOptions()
|
| 1926 |
+
.dtype(torch::kFloat16)
|
| 1927 |
+
.layout(at::kStrided)
|
| 1928 |
+
.device(at::kCUDA)
|
| 1929 |
+
.requires_grad(false);
|
| 1930 |
+
auto weight16 = at::empty({weight.size(0), weight.size(1)}, options);
|
| 1931 |
+
|
| 1932 |
+
launch_dequantize((T*)weight16.data_ptr(),
|
| 1933 |
+
(int8_t*)weight.data_ptr(),
|
| 1934 |
+
(float*)qscale.data_ptr(),
|
| 1935 |
+
weight.size(0),
|
| 1936 |
+
weight.size(1),
|
| 1937 |
+
groups,
|
| 1938 |
+
InferenceContext::Instance().GetCurrentStream());
|
| 1939 |
+
|
| 1940 |
+
return weight16;
|
| 1941 |
+
}
|
| 1942 |
+
|
| 1943 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 1944 |
+
{
|
| 1945 |
+
m.def("softmax_context_int8",
|
| 1946 |
+
&ds_softmax_context1<__half>,
|
| 1947 |
+
"DeepSpeed attention with int8 (CUDA)");
|
| 1948 |
+
|
| 1949 |
+
// The following functions handle type dispatching internally
|
| 1950 |
+
m.def("gated_activation", &ds_gated_activation, "DeepSpeed Bias GEGLU (CUDA)");
|
| 1951 |
+
m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm (CUDA)");
|
| 1952 |
+
m.def(
|
| 1953 |
+
"_layer_norm_residual", &ds_layer_norm_residual, "DeepSpeed layer norm + residual (CUDA)");
|
| 1954 |
+
m.def("layer_norm_residual_store_pre_ln_res",
|
| 1955 |
+
&ds_layer_norm_residual_store_pre_ln_res,
|
| 1956 |
+
"DeepSpeed layer norm + store pre Layernorm residual (CUDA)");
|
| 1957 |
+
m.def("rms_norm", &ds_rms_norm, "DeepSpeed rms norm (CUDA)");
|
| 1958 |
+
m.def("pre_rms_norm", &ds_pre_rms_norm, "DeepSpeed pre rms norm (CUDA)");
|
| 1959 |
+
m.def("_vector_add", &_vector_add, "DeepSpeed vector add (CUDA)");
|
| 1960 |
+
m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)");
|
| 1961 |
+
m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)");
|
| 1962 |
+
m.def("reset_cache", &reset_cache, "Reset Cache for generation tasks");
|
| 1963 |
+
m.def("release_workspace", &ds_release_workspace, "DeepSpeed Release Workspace");
|
| 1964 |
+
m.def("retake_workspace", &ds_retake_workspace, "DeepSpeed Retake Workspace");
|
| 1965 |
+
|
| 1966 |
+
// The following functions are templated and need to be explicitly instantiated and bound
|
| 1967 |
+
// to different python methods
|
| 1968 |
+
#define DEF_OPS(_name, _dtype) \
|
| 1969 |
+
m.def("softmax_" #_name, &ds_softmax<_dtype>, "DeepSpeed SoftMax with " #_name " (CUDA)"); \
|
| 1970 |
+
m.def("softmax_context_" #_name, \
|
| 1971 |
+
&ds_softmax_context<_dtype>, \
|
| 1972 |
+
"DeepSpeed attention with " #_name " (CUDA)"); \
|
| 1973 |
+
m.def("bias_gelu_" #_name, &ds_bias_gelu<_dtype>, "DeepSpeed Gelu with " #_name " (CUDA)"); \
|
| 1974 |
+
m.def("bias_add_" #_name, &ds_bias_add<_dtype>, "DeepSpeed Bias Add with " #_name " (CUDA)"); \
|
| 1975 |
+
m.def("bias_relu_" #_name, &ds_bias_relu<_dtype>, "DeepSpeed ReLU with " #_name " (CUDA)"); \
|
| 1976 |
+
m.def("bias_residual_" #_name, \
|
| 1977 |
+
&ds_bias_residual<_dtype>, \
|
| 1978 |
+
"DeepSpeed residual-bias add with " #_name " (CUDA)"); \
|
| 1979 |
+
m.def("qkv_gemm_" #_name, &ds_qkv_gemm<_dtype>, "DeepSpeed qkv gemm with " #_name " (CUDA)"); \
|
| 1980 |
+
m.def("rms_qkv_gemm_" #_name, \
|
| 1981 |
+
&ds_rms_qkv<_dtype>, \
|
| 1982 |
+
"DeepSpeed rms qkv gemm with " #_name " (CUDA)"); \
|
| 1983 |
+
m.def("mlp_gemm_" #_name, &ds_mlp_gemm<_dtype>, "DeepSpeed mlp with " #_name " (CUDA)"); \
|
| 1984 |
+
m.def("rms_mlp_gemm_" #_name, \
|
| 1985 |
+
&ds_rms_mlp_gemm<_dtype>, \
|
| 1986 |
+
"DeepSpeed rms mlp gemm with " #_name " (CUDA)"); \
|
| 1987 |
+
m.def("vector_matmul_" #_name, \
|
| 1988 |
+
&ds_vector_matmul<_dtype>, \
|
| 1989 |
+
"DeepSpeed vector-MM with " #_name " (CUDA)"); \
|
| 1990 |
+
m.def("linear_layer_" #_name, \
|
| 1991 |
+
&ds_linear_layer<_dtype>, \
|
| 1992 |
+
"DeepSpeed linear_layer with " #_name " (CUDA)"); \
|
| 1993 |
+
m.def("fused_gemm_gelu_" #_name, \
|
| 1994 |
+
&fused_gemm_gelu<_dtype>, \
|
| 1995 |
+
"DeepSpeed mlp with " #_name " (CUDA)"); \
|
| 1996 |
+
m.def("residual_add_bias_" #_name, \
|
| 1997 |
+
&residual_add_bias<_dtype>, \
|
| 1998 |
+
"DeepSpeed residual add with " #_name " (CUDA)"); \
|
| 1999 |
+
m.def("einsum_sec_sm_ecm_" #_name, \
|
| 2000 |
+
&einsum_sec_sm_ecm<_dtype>, \
|
| 2001 |
+
"DeepSpeed vector-MM with " #_name " (CUDA)"); \
|
| 2002 |
+
m.def("add_padding_" #_name, \
|
| 2003 |
+
&add_padding<_dtype>, \
|
| 2004 |
+
"DeepSpeed residual add with " #_name " (CUDA)"); \
|
| 2005 |
+
m.def("pad_transform_" #_name, \
|
| 2006 |
+
&padd_add_transform<_dtype>, \
|
| 2007 |
+
"DeepSpeed residual add with " #_name " (CUDA)"); \
|
| 2008 |
+
m.def("allocate_workspace_" #_name, \
|
| 2009 |
+
&allocate_workspace<_dtype>, \
|
| 2010 |
+
"DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \
|
| 2011 |
+
m.def("dequantize_" #_name, \
|
| 2012 |
+
&ds_dequantize<_dtype>, \
|
| 2013 |
+
"DeepSpeed dequantize with " #_name " (CUDA)")
|
| 2014 |
+
|
| 2015 |
+
DEF_OPS(fp32, float);
|
| 2016 |
+
DEF_OPS(fp16, __half);
|
| 2017 |
+
#ifdef BF16_AVAILABLE
|
| 2018 |
+
DEF_OPS(bf16, __nv_bfloat16);
|
| 2019 |
+
#endif
|
| 2020 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "inference_cuda_layers.h"
|
| 8 |
+
#include "memory_access_utils.h"
|
| 9 |
+
|
| 10 |
+
namespace cg = cooperative_groups;
|
| 11 |
+
#define MAX_CAP 4
|
| 12 |
+
#define MAX_SEQ 2048
|
| 13 |
+
|
| 14 |
+
inline __device__ float relu(const float x) { return x < 0 ? 0 : x; }
|
| 15 |
+
|
| 16 |
+
/*
|
| 17 |
+
In-place relu(biasAdd(x)) for channels last
|
| 18 |
+
*/
|
| 19 |
+
template <typename T>
|
| 20 |
+
__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size)
|
| 21 |
+
{
|
| 22 |
+
// Input restriction: intermediate_size % vals_per_access == 0
|
| 23 |
+
constexpr int granularity = 16;
|
| 24 |
+
constexpr int values_per_access = granularity / sizeof(T);
|
| 25 |
+
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
|
| 26 |
+
|
| 27 |
+
if (offset < total_count) {
|
| 28 |
+
T data[values_per_access];
|
| 29 |
+
T data_bias[values_per_access];
|
| 30 |
+
mem_access::load_global<granularity>(data, input + offset);
|
| 31 |
+
mem_access::load_global<granularity>(
|
| 32 |
+
data_bias, bias + (offset % intermediate_size), bias != nullptr);
|
| 33 |
+
|
| 34 |
+
#pragma unroll
|
| 35 |
+
for (int i = 0; i < values_per_access; i++) {
|
| 36 |
+
float data_f = conversion::to<float>(data[i]);
|
| 37 |
+
float bias_f = conversion::to<float>(data_bias[i]);
|
| 38 |
+
data[i] = conversion::to<T>(relu(data_f + bias_f));
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
mem_access::store_global<granularity>(input + offset, data);
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template <typename T>
|
| 46 |
+
void launch_bias_relu(T* input,
|
| 47 |
+
const T* bias,
|
| 48 |
+
int intermediate_size,
|
| 49 |
+
int batch_size,
|
| 50 |
+
cudaStream_t stream)
|
| 51 |
+
{
|
| 52 |
+
constexpr int threads = 1024;
|
| 53 |
+
constexpr int granularity = 16;
|
| 54 |
+
|
| 55 |
+
const int total_count = batch_size * intermediate_size;
|
| 56 |
+
const int elems_per_block = threads * (granularity / sizeof(T));
|
| 57 |
+
dim3 block_dims(threads);
|
| 58 |
+
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
|
| 59 |
+
|
| 60 |
+
fused_bias_relu<<<grid_dims, block_dims, 0, stream>>>(
|
| 61 |
+
input, bias, total_count, intermediate_size);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
#define INSTANTIATE_LAUNCH_BIAS_RELU(T) \
|
| 65 |
+
template void launch_bias_relu<T>(T*, const T*, int, int, cudaStream_t);
|
| 66 |
+
|
| 67 |
+
INSTANTIATE_LAUNCH_BIAS_RELU(float)
|
| 68 |
+
#ifdef BF16_AVAILABLE
|
| 69 |
+
INSTANTIATE_LAUNCH_BIAS_RELU(__nv_bfloat16)
|
| 70 |
+
#endif
|
| 71 |
+
INSTANTIATE_LAUNCH_BIAS_RELU(__half)
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "conversion_utils.h"
|
| 7 |
+
#include "ds_kernel_utils.h"
|
| 8 |
+
#include "inference_cuda_layers.h"
|
| 9 |
+
#include "memory_access_utils.h"
|
| 10 |
+
#include "reduction_utils.h"
|
| 11 |
+
|
| 12 |
+
namespace cg = cooperative_groups;
|
| 13 |
+
using rop = reduce::ROpType;
|
| 14 |
+
|
| 15 |
+
namespace rms {
|
| 16 |
+
constexpr int granularity = 16;
|
| 17 |
+
} // namespace rms
|
| 18 |
+
|
| 19 |
+
template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
|
| 20 |
+
__global__ void rms_norm(T* output, const T* vals, const T* gamma, float epsilon, int elems_per_row)
|
| 21 |
+
{
|
| 22 |
+
constexpr int T_per_load = rms::granularity / sizeof(T);
|
| 23 |
+
|
| 24 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 25 |
+
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
|
| 26 |
+
|
| 27 |
+
// X-dimension of the block
|
| 28 |
+
const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
|
| 29 |
+
(tb.thread_index().y * elems_per_row);
|
| 30 |
+
const int thread_offset = tb.thread_index().x * T_per_load;
|
| 31 |
+
const int base_offset = block_offset + thread_offset;
|
| 32 |
+
const int stride = blockDim.x * T_per_load;
|
| 33 |
+
|
| 34 |
+
float var_sum = reduce::init<rop::Add, float>();
|
| 35 |
+
|
| 36 |
+
const T* input_base = vals + base_offset;
|
| 37 |
+
|
| 38 |
+
T local_buffer[UNROLL * T_per_load];
|
| 39 |
+
|
| 40 |
+
#pragma unroll
|
| 41 |
+
for (int i = 0; i < UNROLL; i++) {
|
| 42 |
+
T* iteration_buffer = local_buffer + (i * T_per_load);
|
| 43 |
+
|
| 44 |
+
mem_access::load_global<rms::granularity>(iteration_buffer,
|
| 45 |
+
input_base + (i * stride),
|
| 46 |
+
thread_offset + (i * stride) < elems_per_row);
|
| 47 |
+
|
| 48 |
+
#pragma unroll
|
| 49 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 50 |
+
float up_cast = conversion::to<float>(iteration_buffer[j]);
|
| 51 |
+
float sq_val = up_cast * up_cast;
|
| 52 |
+
var_sum = reduce::element<rop::Add, float>(var_sum, sq_val);
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
|
| 57 |
+
const float var = var_sum / elems_per_row;
|
| 58 |
+
const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
|
| 59 |
+
|
| 60 |
+
T* block_output = output + block_offset;
|
| 61 |
+
|
| 62 |
+
#pragma unroll
|
| 63 |
+
for (int i = 0; i < UNROLL; i++) {
|
| 64 |
+
T* iteration_buffer = local_buffer + (i * T_per_load);
|
| 65 |
+
const int iter_idx = i * stride + thread_offset;
|
| 66 |
+
const bool do_loads = (iter_idx < elems_per_row);
|
| 67 |
+
|
| 68 |
+
T gamma_local[T_per_load];
|
| 69 |
+
|
| 70 |
+
mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
|
| 71 |
+
|
| 72 |
+
#pragma unroll
|
| 73 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 74 |
+
iteration_buffer[j] *= denom;
|
| 75 |
+
iteration_buffer[j] *= gamma_local[j];
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
if (do_loads) {
|
| 79 |
+
mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
|
| 85 |
+
__global__ void pre_rms_norm(T* output,
|
| 86 |
+
T* res_out,
|
| 87 |
+
const T* vals,
|
| 88 |
+
const T* residual,
|
| 89 |
+
const T* gamma,
|
| 90 |
+
float epsilon,
|
| 91 |
+
int elems_per_row)
|
| 92 |
+
{
|
| 93 |
+
constexpr int T_per_load = rms::granularity / sizeof(T);
|
| 94 |
+
|
| 95 |
+
cg::thread_block tb = cg::this_thread_block();
|
| 96 |
+
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
|
| 97 |
+
|
| 98 |
+
// X-dimension of the block
|
| 99 |
+
const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
|
| 100 |
+
(tb.thread_index().y * elems_per_row);
|
| 101 |
+
const int thread_offset = tb.thread_index().x * T_per_load;
|
| 102 |
+
const int base_offset = block_offset + thread_offset;
|
| 103 |
+
const int stride = blockDim.x * T_per_load;
|
| 104 |
+
|
| 105 |
+
float var_sum = reduce::init<rop::Add, float>();
|
| 106 |
+
|
| 107 |
+
const T* input_base = vals + base_offset;
|
| 108 |
+
const T* residual_base = residual + base_offset;
|
| 109 |
+
T* res_output = res_out + base_offset;
|
| 110 |
+
|
| 111 |
+
T local_buffer[UNROLL * T_per_load];
|
| 112 |
+
|
| 113 |
+
#pragma unroll
|
| 114 |
+
for (int i = 0; i < UNROLL; i++) {
|
| 115 |
+
T* iteration_buffer = local_buffer + (i * T_per_load);
|
| 116 |
+
T residual_buffer[T_per_load];
|
| 117 |
+
|
| 118 |
+
const int iter_offset = i * stride + thread_offset;
|
| 119 |
+
const bool do_loads = (iter_offset < elems_per_row);
|
| 120 |
+
|
| 121 |
+
mem_access::load_global<rms::granularity>(
|
| 122 |
+
iteration_buffer, input_base + (i * stride), do_loads);
|
| 123 |
+
mem_access::load_global<rms::granularity>(
|
| 124 |
+
residual_buffer, residual_base + (i * stride), do_loads);
|
| 125 |
+
|
| 126 |
+
#pragma unroll
|
| 127 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 128 |
+
iteration_buffer[j] += residual_buffer[j];
|
| 129 |
+
float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
|
| 130 |
+
|
| 131 |
+
var_sum = reduce::element<rop::Add, float>(var_sum, vals_up_cast * vals_up_cast);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
if (do_loads) {
|
| 135 |
+
mem_access::store_global<rms::granularity>(res_output + i * stride, iteration_buffer);
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
|
| 140 |
+
const float var = var_sum / elems_per_row;
|
| 141 |
+
const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
|
| 142 |
+
|
| 143 |
+
T* block_output = output + block_offset;
|
| 144 |
+
|
| 145 |
+
#pragma unroll
|
| 146 |
+
for (int i = 0; i < UNROLL; i++) {
|
| 147 |
+
T* iteration_buffer = local_buffer + (i * T_per_load);
|
| 148 |
+
const int iter_idx = i * stride + thread_offset;
|
| 149 |
+
const bool do_loads = (iter_idx < elems_per_row);
|
| 150 |
+
|
| 151 |
+
T gamma_local[T_per_load];
|
| 152 |
+
|
| 153 |
+
mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
|
| 154 |
+
|
| 155 |
+
#pragma unroll
|
| 156 |
+
for (int j = 0; j < T_per_load; j++) {
|
| 157 |
+
iteration_buffer[j] *= denom;
|
| 158 |
+
iteration_buffer[j] *= gamma_local[j];
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
if (do_loads) {
|
| 162 |
+
mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
#define LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
|
| 168 |
+
rms_norm<T, UNROLL, threadsPerGroup, maxThreads> \
|
| 169 |
+
<<<grid, block, 0, stream>>>(norm_output, vals, gamma, epsilon, elems_per_row);
|
| 170 |
+
|
| 171 |
+
#define LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
|
| 172 |
+
pre_rms_norm<T, UNROLL, threadsPerGroup, maxThreads><<<grid, block, 0, stream>>>( \
|
| 173 |
+
norm_output, res_output, vals, residual, gamma, epsilon, elems_per_row);
|
| 174 |
+
|
| 175 |
+
#define LAUNCH_ALL_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
|
| 176 |
+
if (pre_norm) { \
|
| 177 |
+
LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
|
| 178 |
+
} else { \
|
| 179 |
+
LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
template <typename T>
|
| 183 |
+
void launch_rms_norm(T* norm_output,
|
| 184 |
+
T* res_output,
|
| 185 |
+
const T* vals,
|
| 186 |
+
const T* residual,
|
| 187 |
+
const T* gamma,
|
| 188 |
+
float epsilon,
|
| 189 |
+
int rows,
|
| 190 |
+
int elems_per_row,
|
| 191 |
+
cudaStream_t stream)
|
| 192 |
+
{
|
| 193 |
+
// 8 for __half, 4 for float
|
| 194 |
+
constexpr int T_per_load = rms::granularity / sizeof(T);
|
| 195 |
+
constexpr int maxThreads = 256;
|
| 196 |
+
constexpr int internalUnroll = sizeof(T) == 4 ? 4 : 2;
|
| 197 |
+
|
| 198 |
+
const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
|
| 199 |
+
const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internalUnroll;
|
| 200 |
+
|
| 201 |
+
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
|
| 202 |
+
// warp-sized blocks rather than stepping up to 64/96 threads
|
| 203 |
+
const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
|
| 204 |
+
const int threads_per_group = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
|
| 205 |
+
|
| 206 |
+
const int groups_per_block_max =
|
| 207 |
+
is_subblock_schedule ? (maxThreads + threads_per_group - 1) / threads_per_group : 1;
|
| 208 |
+
const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
|
| 209 |
+
const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
|
| 210 |
+
|
| 211 |
+
dim3 block(threads_per_group, groups_per_block);
|
| 212 |
+
dim3 grid(groups_launch);
|
| 213 |
+
|
| 214 |
+
const int elems_per_step = threads_per_group * h_per_step;
|
| 215 |
+
const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
|
| 216 |
+
|
| 217 |
+
bool pre_norm = (residual == nullptr) ? false : true;
|
| 218 |
+
|
| 219 |
+
if (is_subblock_schedule) {
|
| 220 |
+
// <=128
|
| 221 |
+
if (threads_per_group == 1) {
|
| 222 |
+
LAUNCH_ALL_RMS_NORM(1, 1, maxThreads);
|
| 223 |
+
} else if (threads_per_group == 2) {
|
| 224 |
+
LAUNCH_ALL_RMS_NORM(1, 2, maxThreads);
|
| 225 |
+
} else if (threads_per_group == 4) {
|
| 226 |
+
LAUNCH_ALL_RMS_NORM(1, 4, maxThreads);
|
| 227 |
+
} else if (threads_per_group == 8) {
|
| 228 |
+
LAUNCH_ALL_RMS_NORM(1, 8, maxThreads);
|
| 229 |
+
} else if (threads_per_group == 16) {
|
| 230 |
+
LAUNCH_ALL_RMS_NORM(1, 16, maxThreads);
|
| 231 |
+
}
|
| 232 |
+
} else if (external_unRoll == 1) {
|
| 233 |
+
// 129 - 4096 elems
|
| 234 |
+
// (this can launch with 1-7 warps as well)
|
| 235 |
+
LAUNCH_ALL_RMS_NORM(1 * internalUnroll, maxThreads, maxThreads);
|
| 236 |
+
} else if (external_unRoll == 2) {
|
| 237 |
+
// 4097 - 8192 elems
|
| 238 |
+
LAUNCH_ALL_RMS_NORM(2 * internalUnroll, maxThreads, maxThreads);
|
| 239 |
+
} else if (external_unRoll == 3) {
|
| 240 |
+
// 8193 - 12288 elems
|
| 241 |
+
LAUNCH_ALL_RMS_NORM(3 * internalUnroll, maxThreads, maxThreads);
|
| 242 |
+
} else if (external_unRoll == 4) {
|
| 243 |
+
// 12289 - 16384 elems
|
| 244 |
+
LAUNCH_ALL_RMS_NORM(4 * internalUnroll, maxThreads, maxThreads);
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
#define INSTANTIATE_LAUNCH_RMS_NORM(T) \
|
| 249 |
+
template void launch_rms_norm<T>(T * norm_output, \
|
| 250 |
+
T * res_output, \
|
| 251 |
+
const T* vals, \
|
| 252 |
+
const T* residual, \
|
| 253 |
+
const T* gamma, \
|
| 254 |
+
float epsilon, \
|
| 255 |
+
int rows, \
|
| 256 |
+
int elems_per_row, \
|
| 257 |
+
cudaStream_t stream);
|
| 258 |
+
|
| 259 |
+
INSTANTIATE_LAUNCH_RMS_NORM(float)
|
| 260 |
+
INSTANTIATE_LAUNCH_RMS_NORM(__half)
|
| 261 |
+
#ifdef BF16_AVAILABLE
|
| 262 |
+
INSTANTIATE_LAUNCH_RMS_NORM(__nv_bfloat16)
|
| 263 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu
ADDED
|
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <limits>
|
| 7 |
+
#include "conversion_utils.h"
|
| 8 |
+
#include "inference_cuda_layers.h"
|
| 9 |
+
|
| 10 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 11 |
+
#include <cuda_profiler_api.h>
|
| 12 |
+
#endif
|
| 13 |
+
#include <cstdio>
|
| 14 |
+
#include <cstdlib>
|
| 15 |
+
#include <ctime>
|
| 16 |
+
|
| 17 |
+
#define MAX_REG_SIZE 8
|
| 18 |
+
|
| 19 |
+
#define minus_infinity -10000.0
|
| 20 |
+
|
| 21 |
+
void CheckCudaErrorAux(const char* file, unsigned line)
|
| 22 |
+
{
|
| 23 |
+
cudaError_t err = cudaGetLastError();
|
| 24 |
+
if (err == cudaSuccess) return;
|
| 25 |
+
std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line
|
| 26 |
+
<< std::endl;
|
| 27 |
+
throw std::runtime_error("CUDA ERROR!!!\n");
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__)
|
| 31 |
+
|
| 32 |
+
namespace cg = cooperative_groups;
|
| 33 |
+
|
| 34 |
+
template <typename T, int iterations>
|
| 35 |
+
__global__ void attn_softmax_v2(T* vals,
|
| 36 |
+
T* mask,
|
| 37 |
+
T* alibi,
|
| 38 |
+
float layer_scale,
|
| 39 |
+
bool triangular,
|
| 40 |
+
bool recompute,
|
| 41 |
+
bool local_attention,
|
| 42 |
+
int window_size,
|
| 43 |
+
int total_count,
|
| 44 |
+
int heads,
|
| 45 |
+
int sequence_length,
|
| 46 |
+
int num_seq,
|
| 47 |
+
int head_offset,
|
| 48 |
+
int mask_stride,
|
| 49 |
+
int mp_size,
|
| 50 |
+
int reduceWidth)
|
| 51 |
+
{
|
| 52 |
+
cg::thread_block b = cg::this_thread_block();
|
| 53 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 54 |
+
|
| 55 |
+
float2 low_data[MAX_REG_SIZE];
|
| 56 |
+
float2 high_data[MAX_REG_SIZE];
|
| 57 |
+
const T zero_h = conversion::to<T>(0.f);
|
| 58 |
+
|
| 59 |
+
int wid = threadIdx.x >> 5;
|
| 60 |
+
int lane = threadIdx.x & 0x1f;
|
| 61 |
+
int warp_num = blockDim.x >> 5;
|
| 62 |
+
|
| 63 |
+
int reduce_blocks = reduceWidth >> 5;
|
| 64 |
+
int seq_lane = threadIdx.x % reduceWidth;
|
| 65 |
+
|
| 66 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 67 |
+
|
| 68 |
+
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
|
| 69 |
+
int batch_idx = iter_offset / (num_seq * heads);
|
| 70 |
+
int alibi_offset = batch_idx * heads * mp_size + head_offset;
|
| 71 |
+
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
|
| 72 |
+
|
| 73 |
+
if (iter_offset < total_count) {
|
| 74 |
+
vals += (iter_offset * sequence_length);
|
| 75 |
+
|
| 76 |
+
alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length;
|
| 77 |
+
mask_offset = mask_offset * sequence_length;
|
| 78 |
+
int seq_id = iter_offset % num_seq;
|
| 79 |
+
|
| 80 |
+
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
|
| 81 |
+
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
|
| 82 |
+
? (real_seq_id >> 2) - (window_size >> 2)
|
| 83 |
+
: 0;
|
| 84 |
+
int window_stride =
|
| 85 |
+
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
|
| 86 |
+
|
| 87 |
+
float max_val = minus_infinity;
|
| 88 |
+
// if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset);
|
| 89 |
+
for (int i = 0; i < iterations; i++) {
|
| 90 |
+
int data_id = i * (reduceWidth << 2) + (seq_lane);
|
| 91 |
+
bool check = (data_id >> 2) >= window_stride4;
|
| 92 |
+
bool low_x_check = check && (data_id < sequence_length) &&
|
| 93 |
+
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
|
| 94 |
+
bool low_y_check = check && ((data_id + reduceWidth) < sequence_length) &&
|
| 95 |
+
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
|
| 96 |
+
((data_id + reduceWidth) > window_stride);
|
| 97 |
+
bool high_x_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
|
| 98 |
+
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
|
| 99 |
+
((data_id + reduceWidth * 2) > window_stride);
|
| 100 |
+
bool high_y_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
|
| 101 |
+
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
|
| 102 |
+
((data_id + reduceWidth * 3) > window_stride);
|
| 103 |
+
|
| 104 |
+
if (mask && alibi) {
|
| 105 |
+
low_data[i].x = low_x_check
|
| 106 |
+
? conversion::to<float>(vals[data_id]) * layer_scale +
|
| 107 |
+
(conversion::to<float>(alibi[data_id + alibi_offset])) +
|
| 108 |
+
(conversion::to<float>(mask[data_id + mask_offset]))
|
| 109 |
+
: minus_infinity;
|
| 110 |
+
low_data[i].y =
|
| 111 |
+
low_y_check
|
| 112 |
+
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
|
| 113 |
+
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth])) +
|
| 114 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
|
| 115 |
+
: minus_infinity;
|
| 116 |
+
high_data[i].x =
|
| 117 |
+
high_x_check
|
| 118 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
|
| 119 |
+
(conversion::to<float>(
|
| 120 |
+
alibi[data_id + alibi_offset + reduceWidth * 2])) +
|
| 121 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
|
| 122 |
+
: minus_infinity;
|
| 123 |
+
high_data[i].y =
|
| 124 |
+
high_y_check
|
| 125 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
|
| 126 |
+
(conversion::to<float>(
|
| 127 |
+
alibi[data_id + alibi_offset + reduceWidth * 3])) +
|
| 128 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
|
| 129 |
+
: minus_infinity;
|
| 130 |
+
} else if (mask) {
|
| 131 |
+
low_data[i].x = low_x_check
|
| 132 |
+
? conversion::to<float>(vals[data_id]) * layer_scale +
|
| 133 |
+
(conversion::to<float>(mask[data_id + mask_offset]))
|
| 134 |
+
: minus_infinity;
|
| 135 |
+
low_data[i].y =
|
| 136 |
+
low_y_check
|
| 137 |
+
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
|
| 138 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth]))
|
| 139 |
+
: minus_infinity;
|
| 140 |
+
high_data[i].x =
|
| 141 |
+
high_x_check
|
| 142 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
|
| 143 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 2]))
|
| 144 |
+
: minus_infinity;
|
| 145 |
+
high_data[i].y =
|
| 146 |
+
high_y_check
|
| 147 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
|
| 148 |
+
(conversion::to<float>(mask[data_id + mask_offset + reduceWidth * 3]))
|
| 149 |
+
: minus_infinity;
|
| 150 |
+
} else if (alibi) {
|
| 151 |
+
low_data[i].x = low_x_check
|
| 152 |
+
? conversion::to<float>(vals[data_id]) * layer_scale +
|
| 153 |
+
(conversion::to<float>(alibi[data_id + alibi_offset]))
|
| 154 |
+
: minus_infinity;
|
| 155 |
+
low_data[i].y =
|
| 156 |
+
low_y_check
|
| 157 |
+
? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale +
|
| 158 |
+
(conversion::to<float>(alibi[data_id + alibi_offset + reduceWidth]))
|
| 159 |
+
: minus_infinity;
|
| 160 |
+
high_data[i].x =
|
| 161 |
+
high_x_check
|
| 162 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale +
|
| 163 |
+
(conversion::to<float>(
|
| 164 |
+
alibi[data_id + alibi_offset + reduceWidth * 2]))
|
| 165 |
+
: minus_infinity;
|
| 166 |
+
high_data[i].y =
|
| 167 |
+
high_y_check
|
| 168 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale +
|
| 169 |
+
(conversion::to<float>(
|
| 170 |
+
alibi[data_id + alibi_offset + reduceWidth * 3]))
|
| 171 |
+
: minus_infinity;
|
| 172 |
+
} else {
|
| 173 |
+
low_data[i].x = low_x_check ? conversion::to<float>(vals[data_id]) * layer_scale
|
| 174 |
+
: minus_infinity;
|
| 175 |
+
low_data[i].y =
|
| 176 |
+
low_y_check ? conversion::to<float>(vals[data_id + reduceWidth]) * layer_scale
|
| 177 |
+
: minus_infinity;
|
| 178 |
+
high_data[i].x =
|
| 179 |
+
high_x_check
|
| 180 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 2]) * layer_scale
|
| 181 |
+
: minus_infinity;
|
| 182 |
+
high_data[i].y =
|
| 183 |
+
high_y_check
|
| 184 |
+
? conversion::to<float>(vals[data_id + reduceWidth * 3]) * layer_scale
|
| 185 |
+
: minus_infinity;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
// if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id);
|
| 189 |
+
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
|
| 190 |
+
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
|
| 191 |
+
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
|
| 192 |
+
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) {
|
| 196 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 197 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
if (reduceWidth > WARP_SIZE) {
|
| 201 |
+
if (lane == 0) partialSum[wid] = max_val;
|
| 202 |
+
b.sync();
|
| 203 |
+
|
| 204 |
+
if (lane < warp_num) max_val = partialSum[lane];
|
| 205 |
+
|
| 206 |
+
b.sync();
|
| 207 |
+
|
| 208 |
+
for (int i = 1; i < reduce_blocks; i *= 2) {
|
| 209 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 210 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
|
| 214 |
+
}
|
| 215 |
+
float sum = 0;
|
| 216 |
+
for (int i = 0; i < iterations; i++) {
|
| 217 |
+
low_data[i].x = __expf(low_data[i].x - max_val);
|
| 218 |
+
low_data[i].y = __expf(low_data[i].y - max_val);
|
| 219 |
+
high_data[i].x = __expf(high_data[i].x - max_val);
|
| 220 |
+
high_data[i].y = __expf(high_data[i].y - max_val);
|
| 221 |
+
|
| 222 |
+
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
|
| 226 |
+
|
| 227 |
+
if (reduceWidth > WARP_SIZE) {
|
| 228 |
+
if (lane == 0) partialSum[wid] = sum;
|
| 229 |
+
b.sync();
|
| 230 |
+
|
| 231 |
+
if (lane < warp_num) sum = partialSum[lane];
|
| 232 |
+
|
| 233 |
+
b.sync();
|
| 234 |
+
|
| 235 |
+
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 236 |
+
|
| 237 |
+
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
|
| 238 |
+
}
|
| 239 |
+
sum += 1e-6;
|
| 240 |
+
for (int i = 0; i < iterations; i++) {
|
| 241 |
+
int data_id = i * (reduceWidth << 2) + (seq_lane);
|
| 242 |
+
if (data_id < sequence_length) {
|
| 243 |
+
vals[data_id] = conversion::to<T>(low_data[i].x / sum);
|
| 244 |
+
if ((data_id + reduceWidth) < sequence_length)
|
| 245 |
+
vals[data_id + reduceWidth] = conversion::to<T>(low_data[i].y / sum);
|
| 246 |
+
if ((data_id + reduceWidth * 2) < sequence_length)
|
| 247 |
+
vals[data_id + reduceWidth * 2] = conversion::to<T>(high_data[i].x / sum);
|
| 248 |
+
if ((data_id + reduceWidth * 3) < sequence_length)
|
| 249 |
+
vals[data_id + reduceWidth * 3] = conversion::to<T>(high_data[i].y / sum);
|
| 250 |
+
}
|
| 251 |
+
}
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
template <int iterations>
|
| 256 |
+
__global__ void attn_softmax_v2(float* vals,
|
| 257 |
+
float* attn_mask,
|
| 258 |
+
float* alibi,
|
| 259 |
+
float layer_scale,
|
| 260 |
+
bool triangular,
|
| 261 |
+
bool recompute,
|
| 262 |
+
bool local_attention,
|
| 263 |
+
int window_size,
|
| 264 |
+
int total_count,
|
| 265 |
+
int heads,
|
| 266 |
+
int sequence_length,
|
| 267 |
+
int num_seq,
|
| 268 |
+
int head_offset,
|
| 269 |
+
int mask_stride,
|
| 270 |
+
int mp_size,
|
| 271 |
+
int reduceWidth)
|
| 272 |
+
{
|
| 273 |
+
cg::thread_block b = cg::this_thread_block();
|
| 274 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 275 |
+
|
| 276 |
+
float4 data[MAX_REG_SIZE];
|
| 277 |
+
|
| 278 |
+
int wid = threadIdx.x >> 5;
|
| 279 |
+
int lane = threadIdx.x & 0x1f;
|
| 280 |
+
int warp_num = blockDim.x >> 5;
|
| 281 |
+
|
| 282 |
+
int reduce_blocks = reduceWidth >> 5;
|
| 283 |
+
int seq_lane = threadIdx.x % reduceWidth;
|
| 284 |
+
|
| 285 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 286 |
+
|
| 287 |
+
int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks);
|
| 288 |
+
if (iter_offset < total_count) {
|
| 289 |
+
vals += (iter_offset * sequence_length);
|
| 290 |
+
|
| 291 |
+
int batch_idx = iter_offset / (num_seq * heads);
|
| 292 |
+
int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride);
|
| 293 |
+
mask_offset = mask_offset * sequence_length;
|
| 294 |
+
int seq_id = iter_offset % num_seq;
|
| 295 |
+
|
| 296 |
+
int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length);
|
| 297 |
+
int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2))
|
| 298 |
+
? (real_seq_id >> 2) - (window_size >> 2)
|
| 299 |
+
: 0;
|
| 300 |
+
int window_stride =
|
| 301 |
+
(local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1;
|
| 302 |
+
|
| 303 |
+
float max_val = minus_infinity;
|
| 304 |
+
|
| 305 |
+
for (int i = 0; i < iterations; i++) {
|
| 306 |
+
int data_id = i * (reduceWidth << 2) + (seq_lane);
|
| 307 |
+
bool check = (data_id >> 2) >= window_stride4;
|
| 308 |
+
bool x_check = check && (data_id < sequence_length) &&
|
| 309 |
+
(!triangular || (data_id <= seq_id)) && (data_id > window_stride);
|
| 310 |
+
bool y_check = check && ((data_id + reduceWidth) < sequence_length) &&
|
| 311 |
+
(!triangular || ((data_id + reduceWidth) <= seq_id)) &&
|
| 312 |
+
((data_id + reduceWidth) > window_stride);
|
| 313 |
+
bool z_check = check && ((data_id + reduceWidth * 2) < sequence_length) &&
|
| 314 |
+
(!triangular || ((data_id + reduceWidth * 2) <= seq_id)) &&
|
| 315 |
+
((data_id + reduceWidth * 2) > window_stride);
|
| 316 |
+
bool w_check = check && ((data_id + reduceWidth * 3) < sequence_length) &&
|
| 317 |
+
(!triangular || ((data_id + reduceWidth * 3) <= seq_id)) &&
|
| 318 |
+
((data_id + reduceWidth * 3) > window_stride);
|
| 319 |
+
|
| 320 |
+
if (attn_mask) {
|
| 321 |
+
data[i].x = x_check ? vals[data_id] + attn_mask[data_id + mask_offset]
|
| 322 |
+
: minus_infinity;
|
| 323 |
+
data[i].y = y_check ? vals[data_id + reduceWidth] +
|
| 324 |
+
attn_mask[data_id + mask_offset + reduceWidth]
|
| 325 |
+
: minus_infinity;
|
| 326 |
+
data[i].z = z_check ? vals[data_id + reduceWidth * 2] +
|
| 327 |
+
attn_mask[data_id + mask_offset + reduceWidth * 2]
|
| 328 |
+
: minus_infinity;
|
| 329 |
+
data[i].w = w_check ? vals[data_id + reduceWidth * 3] +
|
| 330 |
+
attn_mask[data_id + mask_offset + reduceWidth * 3]
|
| 331 |
+
: minus_infinity;
|
| 332 |
+
} else {
|
| 333 |
+
data[i].x = x_check ? vals[data_id] : minus_infinity;
|
| 334 |
+
data[i].y = y_check ? vals[data_id + reduceWidth] : minus_infinity;
|
| 335 |
+
data[i].z = z_check ? vals[data_id + reduceWidth * 2] : minus_infinity;
|
| 336 |
+
data[i].w = w_check ? vals[data_id + reduceWidth * 3] : minus_infinity;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
max_val = (data[i].x > max_val ? data[i].x : max_val);
|
| 340 |
+
max_val = (data[i].y > max_val ? data[i].y : max_val);
|
| 341 |
+
max_val = (data[i].z > max_val ? data[i].z : max_val);
|
| 342 |
+
max_val = (data[i].w > max_val ? data[i].w : max_val);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) {
|
| 346 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 347 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
if (reduceWidth > WARP_SIZE) {
|
| 351 |
+
if (lane == 0) partialSum[wid] = max_val;
|
| 352 |
+
b.sync();
|
| 353 |
+
|
| 354 |
+
if (lane < warp_num) max_val = partialSum[lane];
|
| 355 |
+
|
| 356 |
+
b.sync();
|
| 357 |
+
|
| 358 |
+
for (int i = 1; i < reduce_blocks; i *= 2) {
|
| 359 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 360 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE);
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
float sum = 0;
|
| 367 |
+
for (int i = 0; i < iterations; i++) {
|
| 368 |
+
data[i].x = __expf(data[i].x - max_val);
|
| 369 |
+
data[i].y = __expf(data[i].y - max_val);
|
| 370 |
+
data[i].z = __expf(data[i].z - max_val);
|
| 371 |
+
data[i].w = __expf(data[i].w - max_val);
|
| 372 |
+
|
| 373 |
+
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i);
|
| 377 |
+
|
| 378 |
+
if (reduceWidth > WARP_SIZE) {
|
| 379 |
+
if (lane == 0) partialSum[wid] = sum;
|
| 380 |
+
b.sync();
|
| 381 |
+
|
| 382 |
+
if (lane < warp_num) sum = partialSum[lane];
|
| 383 |
+
|
| 384 |
+
b.sync();
|
| 385 |
+
|
| 386 |
+
for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 387 |
+
|
| 388 |
+
sum = g.shfl(sum, threadIdx.x / WARP_SIZE);
|
| 389 |
+
}
|
| 390 |
+
sum += 1e-6;
|
| 391 |
+
|
| 392 |
+
for (int i = 0; i < iterations; i++) {
|
| 393 |
+
int data_id = i * (reduceWidth << 2) + (seq_lane);
|
| 394 |
+
if (data_id < sequence_length) {
|
| 395 |
+
vals[data_id] = data[i].x / sum;
|
| 396 |
+
if ((data_id + reduceWidth) < sequence_length)
|
| 397 |
+
vals[data_id + reduceWidth] = data[i].y / sum;
|
| 398 |
+
if ((data_id + reduceWidth * 2) < sequence_length)
|
| 399 |
+
vals[data_id + reduceWidth * 2] = data[i].z / sum;
|
| 400 |
+
if ((data_id + reduceWidth * 3) < sequence_length)
|
| 401 |
+
vals[data_id + reduceWidth * 3] = data[i].w / sum;
|
| 402 |
+
}
|
| 403 |
+
}
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
#define LAUNCH_ATTN_SOFTMAX_V2(iterations) \
|
| 408 |
+
attn_softmax_v2<T, iterations><<<grid, block, 0, stream>>>(vals, \
|
| 409 |
+
mask, \
|
| 410 |
+
alibi, \
|
| 411 |
+
layer_scale, \
|
| 412 |
+
triangular, \
|
| 413 |
+
recompute, \
|
| 414 |
+
local_attention, \
|
| 415 |
+
window_size, \
|
| 416 |
+
total_count, \
|
| 417 |
+
heads, \
|
| 418 |
+
sequence_length, \
|
| 419 |
+
num_seq, \
|
| 420 |
+
head_offset, \
|
| 421 |
+
mask_stride, \
|
| 422 |
+
mp_size, \
|
| 423 |
+
reduce_width);
|
| 424 |
+
|
| 425 |
+
template <typename T>
|
| 426 |
+
void launch_attn_softmax_v2(T* vals,
|
| 427 |
+
T* mask,
|
| 428 |
+
T* alibi,
|
| 429 |
+
float layer_scale,
|
| 430 |
+
bool triangular,
|
| 431 |
+
bool recompute,
|
| 432 |
+
bool local_attention,
|
| 433 |
+
int window_size,
|
| 434 |
+
int batch_size,
|
| 435 |
+
int heads,
|
| 436 |
+
int num_seq,
|
| 437 |
+
int sequence_length,
|
| 438 |
+
int head_offset,
|
| 439 |
+
int mask_stride,
|
| 440 |
+
int mp_size,
|
| 441 |
+
cudaStream_t stream)
|
| 442 |
+
{
|
| 443 |
+
const int total_count = batch_size * heads * num_seq;
|
| 444 |
+
|
| 445 |
+
// Scheduling Overview
|
| 446 |
+
// 4 element unroll with power of 2 `reduce_width` threads to a ceiling of `attn_threads`
|
| 447 |
+
// Each block should be partitioned into as many `reduce_width` blocks
|
| 448 |
+
// as can be fit.
|
| 449 |
+
constexpr int attn_threads = 256;
|
| 450 |
+
constexpr int min_reduce_width = hw_warp_size;
|
| 451 |
+
constexpr int internal_unroll = 4;
|
| 452 |
+
|
| 453 |
+
// Handle internal unroll then round to next power of 2. Bump up to minimum granularity.
|
| 454 |
+
const int thread_steps_rounded =
|
| 455 |
+
next_pow2((sequence_length + internal_unroll - 1) / internal_unroll);
|
| 456 |
+
const int thread_steps_schedule =
|
| 457 |
+
(thread_steps_rounded < min_reduce_width) ? min_reduce_width : thread_steps_rounded;
|
| 458 |
+
// Bound reduce width to the number of threads
|
| 459 |
+
const int reduce_width = (thread_steps_schedule < attn_threads) ? thread_steps_schedule
|
| 460 |
+
: attn_threads;
|
| 461 |
+
// Scale for the excess
|
| 462 |
+
const int iterations = thread_steps_schedule / reduce_width;
|
| 463 |
+
// Should be safe since reduce_width is capped to attn_threads
|
| 464 |
+
const int partitions = attn_threads / reduce_width;
|
| 465 |
+
|
| 466 |
+
// Launch params
|
| 467 |
+
dim3 grid((total_count + partitions - 1) / partitions);
|
| 468 |
+
dim3 block(attn_threads);
|
| 469 |
+
|
| 470 |
+
if (sequence_length <= 32768) {
|
| 471 |
+
if (iterations == 1) {
|
| 472 |
+
LAUNCH_ATTN_SOFTMAX_V2(1);
|
| 473 |
+
} else if (iterations == 2) {
|
| 474 |
+
LAUNCH_ATTN_SOFTMAX_V2(2);
|
| 475 |
+
} else if (iterations == 4) {
|
| 476 |
+
LAUNCH_ATTN_SOFTMAX_V2(4);
|
| 477 |
+
} else if (iterations == 8) {
|
| 478 |
+
LAUNCH_ATTN_SOFTMAX_V2(8);
|
| 479 |
+
} else if (iterations == 16) {
|
| 480 |
+
LAUNCH_ATTN_SOFTMAX_V2(16);
|
| 481 |
+
} else if (iterations == 32) {
|
| 482 |
+
LAUNCH_ATTN_SOFTMAX_V2(32);
|
| 483 |
+
} else if (iterations == 64) {
|
| 484 |
+
LAUNCH_ATTN_SOFTMAX_V2(64);
|
| 485 |
+
}
|
| 486 |
+
} else
|
| 487 |
+
throw std::runtime_error("Unsupport Seq_Length!");
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
#define INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(T) \
|
| 491 |
+
template void launch_attn_softmax_v2(T* vals, \
|
| 492 |
+
T* mask, \
|
| 493 |
+
T* alibi, \
|
| 494 |
+
float layer_scale, \
|
| 495 |
+
bool triangular, \
|
| 496 |
+
bool recompute, \
|
| 497 |
+
bool local_attention, \
|
| 498 |
+
int window_size, \
|
| 499 |
+
int batch_size, \
|
| 500 |
+
int heads, \
|
| 501 |
+
int num_seq, \
|
| 502 |
+
int sequence_length, \
|
| 503 |
+
int head_offset, \
|
| 504 |
+
int mask_stride, \
|
| 505 |
+
int mp_size, \
|
| 506 |
+
cudaStream_t stream);
|
| 507 |
+
|
| 508 |
+
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(float);
|
| 509 |
+
#ifdef BF16_AVAILABLE
|
| 510 |
+
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__nv_bfloat16);
|
| 511 |
+
#endif
|
| 512 |
+
INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__half);
|
| 513 |
+
|
| 514 |
+
#define DEF_ATTN_SOFTMAX_V2_HALF(_iter) \
|
| 515 |
+
template __global__ void attn_softmax_v2<__half, _iter>(__half * vals, \
|
| 516 |
+
__half * mask, \
|
| 517 |
+
__half * alibi, \
|
| 518 |
+
float layer_scale, \
|
| 519 |
+
bool triangular, \
|
| 520 |
+
bool recompute, \
|
| 521 |
+
bool local_attention, \
|
| 522 |
+
int window_size, \
|
| 523 |
+
int total_count, \
|
| 524 |
+
int heads, \
|
| 525 |
+
int sequence_length, \
|
| 526 |
+
int num_seq, \
|
| 527 |
+
int head_offset, \
|
| 528 |
+
int mask_stride, \
|
| 529 |
+
int mp_size, \
|
| 530 |
+
int reduceWidth)
|
| 531 |
+
|
| 532 |
+
#define DEF_ATTN_SOFTMAX_V2_BF16(_iter) \
|
| 533 |
+
template __global__ void attn_softmax_v2<__nv_bfloat16, _iter>(__nv_bfloat16 * vals, \
|
| 534 |
+
__nv_bfloat16 * mask, \
|
| 535 |
+
__nv_bfloat16 * alibi, \
|
| 536 |
+
float layer_scale, \
|
| 537 |
+
bool triangular, \
|
| 538 |
+
bool recompute, \
|
| 539 |
+
bool local_attention, \
|
| 540 |
+
int window_size, \
|
| 541 |
+
int total_count, \
|
| 542 |
+
int heads, \
|
| 543 |
+
int sequence_length, \
|
| 544 |
+
int num_seq, \
|
| 545 |
+
int head_offset, \
|
| 546 |
+
int mask_stride, \
|
| 547 |
+
int mp_size, \
|
| 548 |
+
int reduceWidth)
|
| 549 |
+
|
| 550 |
+
#define FOREACH_ITERATIONS(cb) \
|
| 551 |
+
cb(1); \
|
| 552 |
+
cb(2); \
|
| 553 |
+
cb(4); \
|
| 554 |
+
cb(8); \
|
| 555 |
+
cb(16); \
|
| 556 |
+
cb(32); \
|
| 557 |
+
cb(64)
|
| 558 |
+
|
| 559 |
+
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_HALF);
|
| 560 |
+
#ifdef BF16_AVAILABLE
|
| 561 |
+
FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_BF16);
|
| 562 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu
ADDED
|
@@ -0,0 +1,727 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 7 |
+
#include <cuda_profiler_api.h>
|
| 8 |
+
#endif
|
| 9 |
+
#include "conversion_utils.h"
|
| 10 |
+
#include "inference_cuda_layers.h"
|
| 11 |
+
namespace cg = cooperative_groups;
|
| 12 |
+
|
| 13 |
+
// only used to avoid compilation error due to lack of definition.
|
| 14 |
+
#ifndef BF16_AVAILABLE
|
| 15 |
+
using __nv_bfloat162 = __half2;
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
// Bias add
|
| 19 |
+
|
| 20 |
+
__global__ void bias_add_transform_0213(float* output,
|
| 21 |
+
float* k_cache,
|
| 22 |
+
float* v_cache,
|
| 23 |
+
const float* vals,
|
| 24 |
+
const float* bias,
|
| 25 |
+
int hidden_dim,
|
| 26 |
+
int seq_length,
|
| 27 |
+
unsigned seq_offset,
|
| 28 |
+
int heads,
|
| 29 |
+
int head_stride,
|
| 30 |
+
int num_kv,
|
| 31 |
+
int rotary_dim,
|
| 32 |
+
bool rotate_half,
|
| 33 |
+
bool rotate_every_two,
|
| 34 |
+
int head_ext,
|
| 35 |
+
int max_out_tokens,
|
| 36 |
+
float rope_theta)
|
| 37 |
+
{
|
| 38 |
+
int d0_stride = hidden_dim * seq_length;
|
| 39 |
+
int d1_stride = hidden_dim;
|
| 40 |
+
int d2_stride = hidden_dim / heads;
|
| 41 |
+
|
| 42 |
+
int d0 = blockIdx.x; // Batch
|
| 43 |
+
int d1 = blockIdx.y; // Sequence ID (0-127)
|
| 44 |
+
int cnt = blockIdx.z / head_ext; // Hidden count
|
| 45 |
+
int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
|
| 46 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 47 |
+
|
| 48 |
+
int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens);
|
| 49 |
+
int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens);
|
| 50 |
+
|
| 51 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 52 |
+
float4* output_vec =
|
| 53 |
+
reinterpret_cast<float4*>(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache));
|
| 54 |
+
|
| 55 |
+
vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length);
|
| 56 |
+
vals_vec += d1 * (d1_stride + num_kv * 2 * d2_stride);
|
| 57 |
+
vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride);
|
| 58 |
+
vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride);
|
| 59 |
+
|
| 60 |
+
output_vec += (d1 * d2_stride);
|
| 61 |
+
output_vec += (d0 * d0_out_stride);
|
| 62 |
+
output_vec += (d2 * d2_out_stride);
|
| 63 |
+
|
| 64 |
+
unsigned seq_id = d1 + seq_offset;
|
| 65 |
+
float4 inputs = vals_vec[d3];
|
| 66 |
+
int lane = d3 & 0x1f;
|
| 67 |
+
if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) {
|
| 68 |
+
float4 q = vals_vec[d3];
|
| 69 |
+
float2* q_f = reinterpret_cast<float2*>(&q);
|
| 70 |
+
if (rotate_every_two) {
|
| 71 |
+
#pragma unroll
|
| 72 |
+
for (int o = 0; o < 2; o++) {
|
| 73 |
+
float inv_freq = (float)(((d3 << 1) + o) * 2) / (float)(rotary_dim << 2);
|
| 74 |
+
inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id;
|
| 75 |
+
q_f[o].x = (-1.0 * q_f[o].y * sinf(inv_freq) + q_f[o].x * cosf(inv_freq));
|
| 76 |
+
q_f[o].y = (q_f[o].x * sinf(inv_freq) + q_f[o].y * cosf(inv_freq));
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
output_vec[d3] = q;
|
| 80 |
+
} else
|
| 81 |
+
output_vec[d3] = inputs;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
#define ATTN_H 3
|
| 85 |
+
#define MAX_SEQ_LINE 10
|
| 86 |
+
|
| 87 |
+
template <typename T>
|
| 88 |
+
__global__ void bias_add_transform_0213(T* output, // q
|
| 89 |
+
T* k_cache,
|
| 90 |
+
T* v_cache,
|
| 91 |
+
const T* vals, // qkv
|
| 92 |
+
const T* bias,
|
| 93 |
+
int hidden_dim,
|
| 94 |
+
int seq_length,
|
| 95 |
+
unsigned seq_offset,
|
| 96 |
+
int all_tokens,
|
| 97 |
+
int heads,
|
| 98 |
+
int head_stride,
|
| 99 |
+
int num_kv,
|
| 100 |
+
int rotary_dim,
|
| 101 |
+
bool rotate_half,
|
| 102 |
+
bool rotate_every_two,
|
| 103 |
+
int head_ext,
|
| 104 |
+
int max_out_tokens,
|
| 105 |
+
float rope_theta)
|
| 106 |
+
{
|
| 107 |
+
using T2 =
|
| 108 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 109 |
+
unsigned half_dim = (rotary_dim << 3) >> 1;
|
| 110 |
+
int d0_stride = hidden_dim * seq_length;
|
| 111 |
+
int d1_stride = hidden_dim;
|
| 112 |
+
int d2_stride = hidden_dim / heads;
|
| 113 |
+
|
| 114 |
+
int d0 = blockIdx.x; // Batch
|
| 115 |
+
int d1 = blockIdx.y; // Sequence ID (0-127)
|
| 116 |
+
int cnt = blockIdx.z / head_ext; // Hidden count
|
| 117 |
+
int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
|
| 118 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 119 |
+
|
| 120 |
+
int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens);
|
| 121 |
+
int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens);
|
| 122 |
+
|
| 123 |
+
float4 vals_arr;
|
| 124 |
+
float4 output_arr;
|
| 125 |
+
|
| 126 |
+
T2* vals_half = reinterpret_cast<T2*>(&vals_arr);
|
| 127 |
+
T2* output_half = reinterpret_cast<T2*>(&output_arr);
|
| 128 |
+
|
| 129 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 130 |
+
float4* output_vec =
|
| 131 |
+
reinterpret_cast<float4*>(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache));
|
| 132 |
+
|
| 133 |
+
vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length);
|
| 134 |
+
vals_vec += (d1 * (d1_stride + num_kv * 2 * d2_stride));
|
| 135 |
+
vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride);
|
| 136 |
+
vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride);
|
| 137 |
+
|
| 138 |
+
output_vec += (d1 * d2_stride);
|
| 139 |
+
output_vec += (d0 * d0_out_stride);
|
| 140 |
+
output_vec += (d2 * d2_out_stride);
|
| 141 |
+
|
| 142 |
+
unsigned seq_id = d1 + seq_offset;
|
| 143 |
+
|
| 144 |
+
int lane = d3 & 0x1f;
|
| 145 |
+
if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) {
|
| 146 |
+
float4 q = vals_vec[d3];
|
| 147 |
+
T2* q_h = reinterpret_cast<T2*>(&q);
|
| 148 |
+
if (rotate_every_two) {
|
| 149 |
+
#pragma unroll
|
| 150 |
+
for (int o = 0; o < 4; o++) {
|
| 151 |
+
float inv_freq = (float)(((d3 << 2) + o) * 2) / (float)(rotary_dim << 3);
|
| 152 |
+
inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id;
|
| 153 |
+
float q_data[2];
|
| 154 |
+
q_data[0] = conversion::to<float>(q_h[o].x);
|
| 155 |
+
q_data[1] = conversion::to<float>(q_h[o].y);
|
| 156 |
+
q_h[o].x = conversion::to<T>(-1.0 * q_data[1] * sinf(inv_freq) +
|
| 157 |
+
q_data[0] * cosf(inv_freq));
|
| 158 |
+
q_h[o].y =
|
| 159 |
+
conversion::to<T>(q_data[0] * sinf(inv_freq) + q_data[1] * cosf(inv_freq));
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
output_vec[d3] = q;
|
| 163 |
+
} else
|
| 164 |
+
output_vec[d3] = vals_vec[d3];
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
// [B S C*H] - > C * [B A S N]
|
| 168 |
+
template <>
|
| 169 |
+
void launch_bias_add_transform_0213<float>(float* output,
|
| 170 |
+
float* k_cache,
|
| 171 |
+
float* v_cache,
|
| 172 |
+
const float* vals,
|
| 173 |
+
const float* bias,
|
| 174 |
+
int batch_size,
|
| 175 |
+
int seq_length,
|
| 176 |
+
unsigned seq_offset,
|
| 177 |
+
int all_tokens,
|
| 178 |
+
int hidden_dim,
|
| 179 |
+
int heads,
|
| 180 |
+
int num_kv,
|
| 181 |
+
int rotary_dim,
|
| 182 |
+
bool rotate_half,
|
| 183 |
+
bool rotate_every_two,
|
| 184 |
+
cudaStream_t stream,
|
| 185 |
+
int trans_count,
|
| 186 |
+
int max_out_tokens,
|
| 187 |
+
float rope_theta)
|
| 188 |
+
{
|
| 189 |
+
hidden_dim >>= 2;
|
| 190 |
+
int head_ext = (hidden_dim - 1) / MAX_THREADS + 1;
|
| 191 |
+
|
| 192 |
+
dim3 block_dim(hidden_dim / heads, (heads / head_ext));
|
| 193 |
+
dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext));
|
| 194 |
+
|
| 195 |
+
bias_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(output,
|
| 196 |
+
k_cache,
|
| 197 |
+
v_cache,
|
| 198 |
+
vals,
|
| 199 |
+
bias,
|
| 200 |
+
hidden_dim,
|
| 201 |
+
seq_length,
|
| 202 |
+
seq_offset,
|
| 203 |
+
heads,
|
| 204 |
+
num_kv > 0 ? (heads / num_kv) : 1,
|
| 205 |
+
num_kv > 0 ? num_kv : heads,
|
| 206 |
+
rotary_dim >> 2,
|
| 207 |
+
rotate_half,
|
| 208 |
+
rotate_every_two,
|
| 209 |
+
head_ext,
|
| 210 |
+
max_out_tokens,
|
| 211 |
+
rope_theta);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
template <typename T>
|
| 215 |
+
void launch_bias_add_transform_0213(T* output,
|
| 216 |
+
T* k_cache,
|
| 217 |
+
T* v_cache,
|
| 218 |
+
const T* vals,
|
| 219 |
+
const T* bias,
|
| 220 |
+
int batch_size,
|
| 221 |
+
int seq_length,
|
| 222 |
+
unsigned seq_offset,
|
| 223 |
+
int all_tokens,
|
| 224 |
+
int hidden_dim,
|
| 225 |
+
int heads,
|
| 226 |
+
int num_kv,
|
| 227 |
+
int rotary_dim,
|
| 228 |
+
bool rotate_half,
|
| 229 |
+
bool rotate_every_two,
|
| 230 |
+
cudaStream_t stream,
|
| 231 |
+
int trans_count,
|
| 232 |
+
int max_out_tokens,
|
| 233 |
+
float rope_theta)
|
| 234 |
+
{
|
| 235 |
+
hidden_dim >>= 3;
|
| 236 |
+
int head_ext = 1; // (hidden_dim - 1) / MAX_THREADS + 1;
|
| 237 |
+
dim3 block_dim(hidden_dim / heads, (heads / head_ext));
|
| 238 |
+
dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext));
|
| 239 |
+
bias_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(output,
|
| 240 |
+
k_cache,
|
| 241 |
+
v_cache,
|
| 242 |
+
vals,
|
| 243 |
+
bias,
|
| 244 |
+
hidden_dim,
|
| 245 |
+
seq_length,
|
| 246 |
+
seq_offset,
|
| 247 |
+
all_tokens,
|
| 248 |
+
heads,
|
| 249 |
+
num_kv > 0 ? (heads / num_kv) : 1,
|
| 250 |
+
num_kv > 0 ? num_kv : heads,
|
| 251 |
+
rotary_dim >> 3,
|
| 252 |
+
rotate_half,
|
| 253 |
+
rotate_every_two,
|
| 254 |
+
head_ext,
|
| 255 |
+
max_out_tokens,
|
| 256 |
+
rope_theta);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
#define INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(T) \
|
| 260 |
+
template void launch_bias_add_transform_0213<T>(T*, \
|
| 261 |
+
T*, \
|
| 262 |
+
T*, \
|
| 263 |
+
const T*, \
|
| 264 |
+
const T*, \
|
| 265 |
+
int, \
|
| 266 |
+
int, \
|
| 267 |
+
unsigned, \
|
| 268 |
+
int, \
|
| 269 |
+
int, \
|
| 270 |
+
int, \
|
| 271 |
+
int, \
|
| 272 |
+
int, \
|
| 273 |
+
bool, \
|
| 274 |
+
bool, \
|
| 275 |
+
cudaStream_t, \
|
| 276 |
+
int, \
|
| 277 |
+
int, \
|
| 278 |
+
float)
|
| 279 |
+
|
| 280 |
+
#ifdef BF16_AVAILABLE
|
| 281 |
+
INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__nv_bfloat16);
|
| 282 |
+
#endif
|
| 283 |
+
INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__half);
|
| 284 |
+
|
| 285 |
+
// Bias add
|
| 286 |
+
|
| 287 |
+
__global__ void pad_add_transform_0213(float* output,
|
| 288 |
+
const float* vals,
|
| 289 |
+
int hidden_dim,
|
| 290 |
+
int seq_length,
|
| 291 |
+
int padded_seq_len,
|
| 292 |
+
int heads,
|
| 293 |
+
int padded_head_size)
|
| 294 |
+
{
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
template <typename T>
|
| 298 |
+
__global__ void pad_add_transform_0213(T* output,
|
| 299 |
+
const T* vals,
|
| 300 |
+
int hidden_dim,
|
| 301 |
+
int seq_length,
|
| 302 |
+
int padded_seq_len,
|
| 303 |
+
int heads,
|
| 304 |
+
int padded_head_size)
|
| 305 |
+
{
|
| 306 |
+
using T2 =
|
| 307 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 308 |
+
float4 ZERO;
|
| 309 |
+
const T2 zero_h = conversion::to<T2>(0.f);
|
| 310 |
+
T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
|
| 311 |
+
#pragma unroll
|
| 312 |
+
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
|
| 313 |
+
|
| 314 |
+
int d0_stride = hidden_dim * seq_length;
|
| 315 |
+
int d1_stride = hidden_dim;
|
| 316 |
+
int d2_stride = hidden_dim / heads;
|
| 317 |
+
|
| 318 |
+
int d0 = blockIdx.x; // Batch
|
| 319 |
+
int d1 = blockIdx.y * blockDim.z + threadIdx.z; // Sequence ID (0-127)
|
| 320 |
+
int d2 = threadIdx.y; // Head (0-11)
|
| 321 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 322 |
+
|
| 323 |
+
int d2_out_stride = padded_head_size * padded_seq_len;
|
| 324 |
+
int d0_out_stride = heads * d2_out_stride;
|
| 325 |
+
|
| 326 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 327 |
+
float4* output_vec = reinterpret_cast<float4*>(output);
|
| 328 |
+
|
| 329 |
+
vals_vec += (d0 * d0_stride);
|
| 330 |
+
vals_vec += (d1 * d1_stride);
|
| 331 |
+
vals_vec += (d2 * d2_stride);
|
| 332 |
+
|
| 333 |
+
output_vec += (d1 * padded_head_size);
|
| 334 |
+
output_vec += (d0 * d0_out_stride);
|
| 335 |
+
output_vec += (d2 * d2_out_stride);
|
| 336 |
+
|
| 337 |
+
if (d3 < d2_stride && d1 < seq_length)
|
| 338 |
+
output_vec[d3] = vals_vec[d3];
|
| 339 |
+
else
|
| 340 |
+
output_vec[d3] = ZERO;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
// [B S C*H] - > C * [B A S N]
|
| 344 |
+
template <>
|
| 345 |
+
void launch_pad_add_transform_0213<float>(float* output,
|
| 346 |
+
const float* vals,
|
| 347 |
+
int batch_size,
|
| 348 |
+
int hidden_dim,
|
| 349 |
+
int seq_length,
|
| 350 |
+
int padded_seq_len,
|
| 351 |
+
int heads,
|
| 352 |
+
int padded_head_size,
|
| 353 |
+
cudaStream_t stream)
|
| 354 |
+
{
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
template <typename T>
|
| 358 |
+
void launch_pad_add_transform_0213(T* output,
|
| 359 |
+
const T* vals,
|
| 360 |
+
int batch_size,
|
| 361 |
+
int hidden_dim,
|
| 362 |
+
int seq_length,
|
| 363 |
+
int padded_seq_len,
|
| 364 |
+
int heads,
|
| 365 |
+
int padded_head_size,
|
| 366 |
+
cudaStream_t stream)
|
| 367 |
+
{
|
| 368 |
+
hidden_dim >>= 3;
|
| 369 |
+
dim3 block_dim((padded_head_size >> 3), heads, 2);
|
| 370 |
+
dim3 grid_dim(batch_size, padded_seq_len / 2);
|
| 371 |
+
pad_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(
|
| 372 |
+
output, vals, hidden_dim, seq_length, padded_seq_len, heads, padded_head_size >> 3);
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
#define INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(T) \
|
| 376 |
+
template void launch_pad_add_transform_0213<T>( \
|
| 377 |
+
T*, const T*, int, int, int, int, int, int, cudaStream_t);
|
| 378 |
+
|
| 379 |
+
INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__half);
|
| 380 |
+
#ifdef BF16_AVAILABLE
|
| 381 |
+
INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__nv_bfloat16);
|
| 382 |
+
#endif
|
| 383 |
+
|
| 384 |
+
// Bias add
|
| 385 |
+
template <typename T>
|
| 386 |
+
__global__ void bias_add_transform_0213(T* output,
|
| 387 |
+
const T* vals,
|
| 388 |
+
const T* bias,
|
| 389 |
+
int hidden_dim,
|
| 390 |
+
int seq_length,
|
| 391 |
+
int heads,
|
| 392 |
+
int head_ext);
|
| 393 |
+
|
| 394 |
+
template <>
|
| 395 |
+
__global__ void bias_add_transform_0213<float>(float* output,
|
| 396 |
+
const float* vals,
|
| 397 |
+
const float* bias,
|
| 398 |
+
int hidden_dim,
|
| 399 |
+
int seq_length,
|
| 400 |
+
int heads,
|
| 401 |
+
int head_ext)
|
| 402 |
+
{
|
| 403 |
+
int d0_stride = hidden_dim * seq_length;
|
| 404 |
+
int d1_stride = hidden_dim;
|
| 405 |
+
int d2_stride = hidden_dim / heads;
|
| 406 |
+
|
| 407 |
+
int d0_out_stride = d0_stride;
|
| 408 |
+
int d1_out_stride = d2_stride;
|
| 409 |
+
int d2_out_stride = d2_stride * seq_length;
|
| 410 |
+
|
| 411 |
+
int d0 = blockIdx.x; // Batch
|
| 412 |
+
int d1 = blockIdx.y; // Sequence ID (0-127)
|
| 413 |
+
int cnt = blockIdx.z / head_ext; // Hidden count
|
| 414 |
+
int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
|
| 415 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 416 |
+
|
| 417 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 418 |
+
const float4* bias_vec = reinterpret_cast<const float4*>(bias);
|
| 419 |
+
float4* output_vec = reinterpret_cast<float4*>(output);
|
| 420 |
+
|
| 421 |
+
float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride +
|
| 422 |
+
d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3];
|
| 423 |
+
float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3];
|
| 424 |
+
|
| 425 |
+
float4 outputs;
|
| 426 |
+
outputs.x = inputs.x + biases.x;
|
| 427 |
+
outputs.y = inputs.y + biases.y;
|
| 428 |
+
outputs.z = inputs.z + biases.z;
|
| 429 |
+
outputs.w = inputs.w + biases.w;
|
| 430 |
+
|
| 431 |
+
output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride +
|
| 432 |
+
d2 * d2_out_stride + d3] = outputs;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <typename T>
|
| 436 |
+
__global__ void bias_add_transform_0213(T* output,
|
| 437 |
+
const T* vals,
|
| 438 |
+
const T* bias,
|
| 439 |
+
int hidden_dim,
|
| 440 |
+
int seq_length,
|
| 441 |
+
int heads,
|
| 442 |
+
int head_ext)
|
| 443 |
+
{
|
| 444 |
+
using T2 =
|
| 445 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 446 |
+
int d0_stride = hidden_dim * seq_length;
|
| 447 |
+
int d1_stride = hidden_dim;
|
| 448 |
+
int d2_stride = hidden_dim / heads;
|
| 449 |
+
|
| 450 |
+
int d2_out_stride = d2_stride * seq_length;
|
| 451 |
+
|
| 452 |
+
int d0 = blockIdx.x; // Batch
|
| 453 |
+
int d1 = blockIdx.y; // Sequence ID (0-127)
|
| 454 |
+
int cnt = blockIdx.z / head_ext; // Hidden count
|
| 455 |
+
int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
|
| 456 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 457 |
+
|
| 458 |
+
float4 vals_arr;
|
| 459 |
+
float4 bias_arr;
|
| 460 |
+
float4 output_arr;
|
| 461 |
+
T2* vals_half = reinterpret_cast<T2*>(&vals_arr);
|
| 462 |
+
T2* bias_half = reinterpret_cast<T2*>(&bias_arr);
|
| 463 |
+
T2* output_half = reinterpret_cast<T2*>(&output_arr);
|
| 464 |
+
|
| 465 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 466 |
+
const float4* bias_vec = reinterpret_cast<const float4*>(bias);
|
| 467 |
+
float4* output_vec = reinterpret_cast<float4*>(output);
|
| 468 |
+
|
| 469 |
+
vals_vec += (d0 * d0_stride * (gridDim.z / head_ext));
|
| 470 |
+
vals_vec += (d1 * d1_stride * (gridDim.z / head_ext));
|
| 471 |
+
vals_vec += (cnt * d1_stride);
|
| 472 |
+
vals_vec += (d2 * d2_stride);
|
| 473 |
+
|
| 474 |
+
bias_vec += (cnt * d1_stride);
|
| 475 |
+
bias_vec += (d2 * d2_stride);
|
| 476 |
+
|
| 477 |
+
output_vec += (cnt * d0_stride * gridDim.x);
|
| 478 |
+
output_vec += (d1 * d2_stride);
|
| 479 |
+
output_vec += (d0 * d0_stride);
|
| 480 |
+
output_vec += (d2 * d2_out_stride);
|
| 481 |
+
|
| 482 |
+
bias_arr = bias_vec[d3];
|
| 483 |
+
vals_arr = vals_vec[d3];
|
| 484 |
+
|
| 485 |
+
output_half[0] = vals_half[0] + bias_half[0];
|
| 486 |
+
output_half[1] = vals_half[1] + bias_half[1];
|
| 487 |
+
output_half[2] = vals_half[2] + bias_half[2];
|
| 488 |
+
output_half[3] = vals_half[3] + bias_half[3];
|
| 489 |
+
output_vec[d3] = output_arr;
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
template <typename T>
|
| 493 |
+
__global__ void bias_add_transform_0213_v2(T* output,
|
| 494 |
+
const T* vals,
|
| 495 |
+
const T* bias,
|
| 496 |
+
int hidden_dim,
|
| 497 |
+
int seq_length,
|
| 498 |
+
int heads)
|
| 499 |
+
{
|
| 500 |
+
using T2 =
|
| 501 |
+
typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
|
| 502 |
+
__shared__ float4 in_data[3072];
|
| 503 |
+
|
| 504 |
+
int d0_stride = hidden_dim * seq_length;
|
| 505 |
+
int d1_stride = hidden_dim;
|
| 506 |
+
int d2_stride = hidden_dim / heads;
|
| 507 |
+
int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8
|
| 508 |
+
int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8
|
| 509 |
+
|
| 510 |
+
int d0_out_stride = d0_stride;
|
| 511 |
+
int d1_out_stride = d2_stride;
|
| 512 |
+
int d2_out_stride = d2_stride * seq_length;
|
| 513 |
+
|
| 514 |
+
int d0 = blockIdx.x; // Batch
|
| 515 |
+
int d1 = blockIdx.y; // Sequence ID (0-127)
|
| 516 |
+
int cnt = threadIdx.z; // blockIdx.z; // Hidden count
|
| 517 |
+
int d2 = threadIdx.y; // Head (0-11)
|
| 518 |
+
int d3 = threadIdx.x; // Values (groups of 4)
|
| 519 |
+
|
| 520 |
+
float4 vals_arr[1];
|
| 521 |
+
float4 bias_arr[1];
|
| 522 |
+
float4 output_arr[1];
|
| 523 |
+
T2* vals_half = reinterpret_cast<T2*>(vals_arr);
|
| 524 |
+
T2* bias_half = reinterpret_cast<T2*>(bias_arr);
|
| 525 |
+
T2* output_half = reinterpret_cast<T2*>(output_arr);
|
| 526 |
+
|
| 527 |
+
const float4* vals_vec = reinterpret_cast<const float4*>(vals);
|
| 528 |
+
const float4* bias_vec = reinterpret_cast<const float4*>(bias);
|
| 529 |
+
float4* output_vec = reinterpret_cast<float4*>(output);
|
| 530 |
+
|
| 531 |
+
int iter_index = cnt * d1_stride + d2 * d2_stride + d3;
|
| 532 |
+
int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1);
|
| 533 |
+
bias_arr[0] = bias_vec[iter_index];
|
| 534 |
+
|
| 535 |
+
#pragma unroll
|
| 536 |
+
for (int iter = 0; iter < 2; iter++) {
|
| 537 |
+
int iter_id = iter * iteration_stride + iter_index;
|
| 538 |
+
vals_arr[0] = vals_vec[input_offset + iter_id];
|
| 539 |
+
|
| 540 |
+
output_half[0] = vals_half[0] + bias_half[0];
|
| 541 |
+
output_half[1] = vals_half[1] + bias_half[1];
|
| 542 |
+
output_half[2] = vals_half[2] + bias_half[2];
|
| 543 |
+
output_half[3] = vals_half[3] + bias_half[3];
|
| 544 |
+
|
| 545 |
+
in_data[iter_id] = output_arr[0];
|
| 546 |
+
}
|
| 547 |
+
__syncthreads();
|
| 548 |
+
|
| 549 |
+
iteration_stride = blockDim.z * (blockDim.y >> 1);
|
| 550 |
+
int matrix_stride = (d0_out_stride * gridDim.x);
|
| 551 |
+
int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1);
|
| 552 |
+
|
| 553 |
+
int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride;
|
| 554 |
+
|
| 555 |
+
#pragma unroll
|
| 556 |
+
for (int iter = 0; iter < 2; iter++) {
|
| 557 |
+
int iter_row = (iter * iteration_stride) + head_count;
|
| 558 |
+
int iter_offset =
|
| 559 |
+
(iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride;
|
| 560 |
+
output_vec[out_index + iter_offset] =
|
| 561 |
+
in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)];
|
| 562 |
+
}
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
template <typename T>
|
| 566 |
+
__global__ void transform4d_0213(T* out,
|
| 567 |
+
const T* in,
|
| 568 |
+
int heads,
|
| 569 |
+
int seq_length,
|
| 570 |
+
int hidden_dim,
|
| 571 |
+
int head_ext);
|
| 572 |
+
|
| 573 |
+
template <>
|
| 574 |
+
__global__ void transform4d_0213<float>(float* out,
|
| 575 |
+
const float* in,
|
| 576 |
+
int heads,
|
| 577 |
+
int seq_length,
|
| 578 |
+
int hidden_dim,
|
| 579 |
+
int head_ext)
|
| 580 |
+
{
|
| 581 |
+
int d0_stride = hidden_dim * seq_length;
|
| 582 |
+
int d1_stride = d0_stride / heads;
|
| 583 |
+
int d2_stride = hidden_dim / heads;
|
| 584 |
+
|
| 585 |
+
int d0_out_stride = d0_stride;
|
| 586 |
+
int d1_out_stride = d2_stride;
|
| 587 |
+
int d2_out_stride = hidden_dim;
|
| 588 |
+
|
| 589 |
+
int d0 = blockIdx.x; // Batch
|
| 590 |
+
int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head
|
| 591 |
+
int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length;
|
| 592 |
+
int cnt = blockIdx.z;
|
| 593 |
+
int d3 = threadIdx.x; // Values (groups of 8)
|
| 594 |
+
|
| 595 |
+
if (d2 < seq_length) {
|
| 596 |
+
const float4* in_vec = reinterpret_cast<const float4*>(in);
|
| 597 |
+
float4* out_vec = reinterpret_cast<float4*>(out);
|
| 598 |
+
|
| 599 |
+
float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride +
|
| 600 |
+
d2 * d2_stride + d3];
|
| 601 |
+
out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride +
|
| 602 |
+
d2 * d2_out_stride * gridDim.z + d3] = vals_vec;
|
| 603 |
+
}
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
template <typename T>
|
| 607 |
+
__global__ void transform4d_0213(T* out,
|
| 608 |
+
const T* in,
|
| 609 |
+
int heads,
|
| 610 |
+
int seq_length,
|
| 611 |
+
int hidden_dim,
|
| 612 |
+
int head_ext)
|
| 613 |
+
{
|
| 614 |
+
int d0_stride = hidden_dim * (seq_length / head_ext);
|
| 615 |
+
int d1_stride = hidden_dim;
|
| 616 |
+
int d2_stride = hidden_dim / heads;
|
| 617 |
+
|
| 618 |
+
int d0 = blockIdx.x; // Batch
|
| 619 |
+
int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head
|
| 620 |
+
int d2 = blockIdx.z / head_ext; // Sequence
|
| 621 |
+
int cnt = blockIdx.y; // Hidden count
|
| 622 |
+
int d3 = threadIdx.x; // Values (groups of 8)
|
| 623 |
+
|
| 624 |
+
const float4* in_vec = reinterpret_cast<const float4*>(in);
|
| 625 |
+
float4* out_vec = reinterpret_cast<float4*>(out);
|
| 626 |
+
|
| 627 |
+
in_vec += (cnt * d0_stride * gridDim.x);
|
| 628 |
+
in_vec += (d0 * d0_stride);
|
| 629 |
+
in_vec += (d2 * d2_stride);
|
| 630 |
+
in_vec += (d1 * d2_stride * seq_length);
|
| 631 |
+
|
| 632 |
+
out_vec += (cnt * d1_stride);
|
| 633 |
+
out_vec += (d1 * d2_stride);
|
| 634 |
+
out_vec += (d0 * d0_stride * gridDim.y);
|
| 635 |
+
out_vec += (d2 * d1_stride * gridDim.y);
|
| 636 |
+
|
| 637 |
+
out_vec[d3] = in_vec[d3];
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
template <typename T>
|
| 641 |
+
__global__ void transform4d_0213_v2(T* out, const T* in, int heads, int seq_length, int hidden_dim)
|
| 642 |
+
{
|
| 643 |
+
__shared__ float4 in_data[3072];
|
| 644 |
+
|
| 645 |
+
int d0_stride = hidden_dim * seq_length;
|
| 646 |
+
int d1_stride = hidden_dim;
|
| 647 |
+
int d2_stride = hidden_dim / heads;
|
| 648 |
+
|
| 649 |
+
int d0 = blockIdx.x; // Batch
|
| 650 |
+
int d1 = threadIdx.y; // Head
|
| 651 |
+
int d2 = blockIdx.y; // Sequence
|
| 652 |
+
int cnt = threadIdx.z; // Hidden count
|
| 653 |
+
int d3 = threadIdx.x; // Values (groups of 8)
|
| 654 |
+
|
| 655 |
+
const float4* in_vec = reinterpret_cast<const float4*>(in);
|
| 656 |
+
float4* out_vec = reinterpret_cast<float4*>(out);
|
| 657 |
+
|
| 658 |
+
int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride;
|
| 659 |
+
int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1);
|
| 660 |
+
int iteration_stride = blockDim.z * (blockDim.y >> 1);
|
| 661 |
+
int matrix_stride = (d0_stride * gridDim.x);
|
| 662 |
+
|
| 663 |
+
#pragma unroll
|
| 664 |
+
for (int iter = 0; iter < 2; iter++) {
|
| 665 |
+
int iter_row = iter * iteration_stride + head_count;
|
| 666 |
+
int iter_offset = (iter_row % blockDim.y) * d2_stride;
|
| 667 |
+
|
| 668 |
+
in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] =
|
| 669 |
+
in_vec[input_offset + iter_offset * seq_length +
|
| 670 |
+
(iter_row / blockDim.y) * matrix_stride];
|
| 671 |
+
}
|
| 672 |
+
__syncthreads();
|
| 673 |
+
|
| 674 |
+
iteration_stride = d1_stride * blockDim.z;
|
| 675 |
+
int iter_index = cnt * d1_stride + d1 * d2_stride + d3;
|
| 676 |
+
int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1);
|
| 677 |
+
|
| 678 |
+
#pragma unroll
|
| 679 |
+
for (int iter = 0; iter < 2; iter++) {
|
| 680 |
+
int iter_id = iter * iteration_stride + iter_index;
|
| 681 |
+
out_vec[output_offset + iter_id] = in_data[iter_id];
|
| 682 |
+
}
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
// 3 * [B A S N] - > [B S C*H]
|
| 686 |
+
template <>
|
| 687 |
+
void launch_transform4d_0213<float>(float* out,
|
| 688 |
+
const float* in,
|
| 689 |
+
int batch_size,
|
| 690 |
+
int heads,
|
| 691 |
+
int seq_length,
|
| 692 |
+
int hidden_dim,
|
| 693 |
+
cudaStream_t stream,
|
| 694 |
+
int trans_count)
|
| 695 |
+
{
|
| 696 |
+
hidden_dim >>= 2;
|
| 697 |
+
dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count);
|
| 698 |
+
dim3 block_dims(hidden_dim / heads, 8);
|
| 699 |
+
transform4d_0213<float>
|
| 700 |
+
<<<grid_dims, block_dims, 0, stream>>>(out, in, heads, seq_length, hidden_dim, 1);
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
template <typename T>
|
| 704 |
+
void launch_transform4d_0213(T* out,
|
| 705 |
+
const T* in,
|
| 706 |
+
int batch_size,
|
| 707 |
+
int heads,
|
| 708 |
+
int seq_length,
|
| 709 |
+
int hidden_dim,
|
| 710 |
+
cudaStream_t stream,
|
| 711 |
+
int trans_count)
|
| 712 |
+
{
|
| 713 |
+
hidden_dim >>= 3;
|
| 714 |
+
int head_ext = (hidden_dim - 1) / MAX_THREADS + 1;
|
| 715 |
+
dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext));
|
| 716 |
+
dim3 block_dims(hidden_dim / heads, (heads / head_ext));
|
| 717 |
+
transform4d_0213<<<grid_dims, block_dims, 0, stream>>>(
|
| 718 |
+
out, in, heads, seq_length, hidden_dim, head_ext);
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
#define INSTANTIATE_2B_LAUNCH_TRANSFORM4D(T) \
|
| 722 |
+
template void launch_transform4d_0213<T>(T*, const T*, int, int, int, int, cudaStream_t, int);
|
| 723 |
+
|
| 724 |
+
INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__half)
|
| 725 |
+
#ifdef BF16_AVAILABLE
|
| 726 |
+
INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__nv_bfloat16)
|
| 727 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/cuda/CUDAStream.h>
|
| 9 |
+
#include <cuda_runtime_api.h>
|
| 10 |
+
#include <cassert>
|
| 11 |
+
#include <iostream>
|
| 12 |
+
#include <vector>
|
| 13 |
+
#include "cublas_v2.h"
|
| 14 |
+
#include "cuda.h"
|
| 15 |
+
|
| 16 |
+
#define MEGABYTE (1024 * 1024)
|
| 17 |
+
#define GIGABYTE (1024 * 1024 * 1024)
|
| 18 |
+
|
| 19 |
+
// TODO: refactor out
|
| 20 |
+
#define WARP_SIZE 32
|
| 21 |
+
|
| 22 |
+
#define CUDA_CHECK(callstr) \
|
| 23 |
+
{ \
|
| 24 |
+
cudaError_t error_code = callstr; \
|
| 25 |
+
if (error_code != cudaSuccess) { \
|
| 26 |
+
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
|
| 27 |
+
assert(0); \
|
| 28 |
+
} \
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
#define CUDA_1D_KERNEL_LOOP(i, n) \
|
| 32 |
+
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
|
| 33 |
+
|
| 34 |
+
#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \
|
| 35 |
+
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \
|
| 36 |
+
for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y)
|
| 37 |
+
|
| 38 |
+
#define DS_CUDA_NUM_THREADS 512
|
| 39 |
+
#define DS_MAXIMUM_NUM_BLOCKS 262144
|
| 40 |
+
|
| 41 |
+
inline int DS_GET_BLOCKS(const int N)
|
| 42 |
+
{
|
| 43 |
+
return std::max(
|
| 44 |
+
std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS),
|
| 45 |
+
// Use at least 1 block, since CUDA does not allow empty block
|
| 46 |
+
1);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
class InferenceContext {
|
| 50 |
+
public:
|
| 51 |
+
InferenceContext()
|
| 52 |
+
: _workspace(nullptr),
|
| 53 |
+
_seed(42),
|
| 54 |
+
_curr_offset(0),
|
| 55 |
+
_stream(0),
|
| 56 |
+
_free_memory_size(0),
|
| 57 |
+
_num_tokens(1),
|
| 58 |
+
_attention_unfused_workspace_offset(0),
|
| 59 |
+
_workSpaceSize(0)
|
| 60 |
+
{
|
| 61 |
+
_workSpaceSize = 0;
|
| 62 |
+
_workspace = 0;
|
| 63 |
+
|
| 64 |
+
cublasStatus_t stat = cublasCreate(&_cublasHandle);
|
| 65 |
+
if (stat != CUBLAS_STATUS_SUCCESS) {
|
| 66 |
+
// It would be nice to use cublasGetStatusName and
|
| 67 |
+
// cublasGetStatusString, but they were only added in CUDA 11.4.2.
|
| 68 |
+
auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") +
|
| 69 |
+
std::to_string(stat);
|
| 70 |
+
std::cerr << message << std::endl;
|
| 71 |
+
throw std::runtime_error(message);
|
| 72 |
+
}
|
| 73 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 74 |
+
cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH);
|
| 75 |
+
#endif
|
| 76 |
+
cudaEventCreate(&_comp1_event);
|
| 77 |
+
cudaEventCreate(&_comp2_event);
|
| 78 |
+
cudaEventCreate(&_comp_event);
|
| 79 |
+
cudaEventCreate(&_comm_event);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
virtual ~InferenceContext()
|
| 83 |
+
{
|
| 84 |
+
cublasDestroy(_cublasHandle);
|
| 85 |
+
cudaFree(_workspace);
|
| 86 |
+
cudaEventDestroy(_comp1_event);
|
| 87 |
+
cudaEventDestroy(_comp2_event);
|
| 88 |
+
cudaEventDestroy(_comp_event);
|
| 89 |
+
cudaEventDestroy(_comm_event);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
static InferenceContext& Instance()
|
| 93 |
+
{
|
| 94 |
+
static InferenceContext _ctx;
|
| 95 |
+
return _ctx;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
void GenWorkSpace(const unsigned& num_layers,
|
| 99 |
+
const unsigned& num_heads,
|
| 100 |
+
const size_t& batch_size,
|
| 101 |
+
const size_t& prompt_len,
|
| 102 |
+
const size_t& hidden_dim,
|
| 103 |
+
const unsigned& mp_size,
|
| 104 |
+
const bool& external_cache,
|
| 105 |
+
const size_t& elem_size,
|
| 106 |
+
const unsigned& rank,
|
| 107 |
+
unsigned max_out_tokens,
|
| 108 |
+
unsigned min_out_tokens)
|
| 109 |
+
{
|
| 110 |
+
size_t total_size;
|
| 111 |
+
if (!_free_memory_size) { cudaMemGetInfo(&_free_memory_size, &total_size); }
|
| 112 |
+
|
| 113 |
+
// Flash attention requires padded heads and we'll conservatively allocate
|
| 114 |
+
// for that here. Flash attention is only enabled for head size <= 128 right now
|
| 115 |
+
const int head_size = hidden_dim / num_heads;
|
| 116 |
+
const int padded_head_size = head_size <= 32 ? 32 : (head_size <= 64 ? 64 : 128);
|
| 117 |
+
const int effective_head_size = (head_size > 128) ? head_size : padded_head_size;
|
| 118 |
+
|
| 119 |
+
size_t activation_size = 10 * (num_heads * effective_head_size) * batch_size;
|
| 120 |
+
// Other sequence length dimension is added when the final workSpaceSize is calculated
|
| 121 |
+
size_t temp_size = batch_size * (num_heads / mp_size) * max_out_tokens;
|
| 122 |
+
size_t cache_size =
|
| 123 |
+
num_layers * batch_size * ((num_heads * effective_head_size) / mp_size) * 2;
|
| 124 |
+
size_t minimal_requirements =
|
| 125 |
+
temp_size + (_free_memory_size > GIGABYTE ? 500 : 100) * MEGABYTE;
|
| 126 |
+
if (_free_memory_size < minimal_requirements) {
|
| 127 |
+
printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n",
|
| 128 |
+
minimal_requirements,
|
| 129 |
+
_free_memory_size,
|
| 130 |
+
total_size);
|
| 131 |
+
throw std::runtime_error("Workspace can't be allocated, no enough memory.");
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
_max_seq_len = ((_free_memory_size - minimal_requirements) / elem_size) /
|
| 135 |
+
(activation_size + temp_size + cache_size);
|
| 136 |
+
_max_seq_len = std::min((size_t)max_out_tokens, _max_seq_len);
|
| 137 |
+
size_t workSpaceSize = ((external_cache ? (activation_size + temp_size)
|
| 138 |
+
: (activation_size + temp_size + cache_size))) *
|
| 139 |
+
_max_seq_len * elem_size;
|
| 140 |
+
temp_size *= _max_seq_len * elem_size;
|
| 141 |
+
|
| 142 |
+
if (_max_seq_len < min_out_tokens) {
|
| 143 |
+
printf(
|
| 144 |
+
"Allocatable workspace available (%ld tokens) is less than minimum requested "
|
| 145 |
+
"workspace (%d tokens)\n",
|
| 146 |
+
_max_seq_len,
|
| 147 |
+
min_out_tokens);
|
| 148 |
+
throw std::runtime_error("Workspace can't be allocated, not enough memory");
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
if (!_workspace) {
|
| 152 |
+
assert(_workspace == nullptr);
|
| 153 |
+
cudaMalloc(&_workspace, workSpaceSize);
|
| 154 |
+
} else if (_workSpaceSize < workSpaceSize) {
|
| 155 |
+
cudaFree(_workspace);
|
| 156 |
+
cudaMalloc(&_workspace, workSpaceSize);
|
| 157 |
+
}
|
| 158 |
+
if (rank == 0 && (!_workspace || _workSpaceSize < workSpaceSize))
|
| 159 |
+
printf(
|
| 160 |
+
"------------------------------------------------------\n"
|
| 161 |
+
"Free memory : %f (GigaBytes) \n"
|
| 162 |
+
"Total memory: %f (GigaBytes) \n"
|
| 163 |
+
"Requested memory: %f (GigaBytes) \n"
|
| 164 |
+
"Setting maximum total tokens (input + output) to %lu \n"
|
| 165 |
+
"WorkSpace: %p \n"
|
| 166 |
+
"------------------------------------------------------\n",
|
| 167 |
+
(float)_free_memory_size / GIGABYTE,
|
| 168 |
+
(float)total_size / GIGABYTE,
|
| 169 |
+
(float)workSpaceSize / GIGABYTE,
|
| 170 |
+
_max_seq_len,
|
| 171 |
+
_workspace);
|
| 172 |
+
|
| 173 |
+
if (!_workspace) {
|
| 174 |
+
printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n",
|
| 175 |
+
workSpaceSize,
|
| 176 |
+
_free_memory_size,
|
| 177 |
+
total_size);
|
| 178 |
+
throw std::runtime_error("Workspace is null.");
|
| 179 |
+
}
|
| 180 |
+
_workSpaceSize = workSpaceSize;
|
| 181 |
+
_attention_unfused_workspace_offset = workSpaceSize - temp_size;
|
| 182 |
+
}
|
| 183 |
+
inline size_t GetMaxTokenLength() const { return _max_seq_len; }
|
| 184 |
+
|
| 185 |
+
cudaEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; }
|
| 186 |
+
|
| 187 |
+
size_t get_workspace_size() const { return _workSpaceSize; }
|
| 188 |
+
void* GetWorkSpace() { return _workspace; }
|
| 189 |
+
void* GetAttentionUnfusedWorkspace()
|
| 190 |
+
{
|
| 191 |
+
return (char*)_workspace + _attention_unfused_workspace_offset;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
inline unsigned new_token(unsigned layer_id)
|
| 195 |
+
{
|
| 196 |
+
if (layer_id == 0) _token_length++;
|
| 197 |
+
return _token_length;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
inline void reset_tokens(unsigned initial_tokens = 1)
|
| 201 |
+
{
|
| 202 |
+
_num_tokens = initial_tokens;
|
| 203 |
+
} //_token_length = 0; }
|
| 204 |
+
|
| 205 |
+
inline unsigned current_tokens() const { return _num_tokens; }
|
| 206 |
+
|
| 207 |
+
inline void advance_tokens() { _num_tokens++; }
|
| 208 |
+
|
| 209 |
+
cudaStream_t GetCommStream(bool async_op = false)
|
| 210 |
+
{
|
| 211 |
+
if (!_comm_stream)
|
| 212 |
+
_comm_stream = async_op ? at::cuda::getStreamFromPool(true)
|
| 213 |
+
: at::cuda::getCurrentCUDAStream();
|
| 214 |
+
return _comm_stream;
|
| 215 |
+
}
|
| 216 |
+
cudaStream_t GetCurrentStream(bool other_stream = false)
|
| 217 |
+
{
|
| 218 |
+
// get current pytorch stream.
|
| 219 |
+
if (other_stream) {
|
| 220 |
+
if (!_stream) _stream = at::cuda::getStreamFromPool(true);
|
| 221 |
+
return _stream;
|
| 222 |
+
}
|
| 223 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 224 |
+
return stream;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
void release_workspace()
|
| 228 |
+
{
|
| 229 |
+
cudaFree(_workspace);
|
| 230 |
+
_workspace = nullptr;
|
| 231 |
+
}
|
| 232 |
+
bool retake_workspace()
|
| 233 |
+
{
|
| 234 |
+
if (_workspace != nullptr || _workSpaceSize == 0) return true;
|
| 235 |
+
cudaMalloc(&_workspace, _workSpaceSize);
|
| 236 |
+
return _workspace != nullptr;
|
| 237 |
+
}
|
| 238 |
+
cublasHandle_t GetCublasHandle() { return _cublasHandle; }
|
| 239 |
+
|
| 240 |
+
std::pair<uint64_t, uint64_t> IncrementOffset(uint64_t offset_inc)
|
| 241 |
+
{
|
| 242 |
+
uint64_t offset = _curr_offset;
|
| 243 |
+
_curr_offset += offset_inc;
|
| 244 |
+
return std::pair<uint64_t, uint64_t>(_seed, offset);
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
void SetSeed(uint64_t new_seed) { _seed = new_seed; }
|
| 248 |
+
|
| 249 |
+
const std::vector<std::array<int, 3>>& GetGemmAlgos() const { return _gemm_algos; }
|
| 250 |
+
|
| 251 |
+
inline void SynchComp()
|
| 252 |
+
{
|
| 253 |
+
cudaEventRecord(_comp_event, _comp_stream);
|
| 254 |
+
cudaStreamWaitEvent(_comm_stream, _comp_event, 0);
|
| 255 |
+
}
|
| 256 |
+
inline void SynchComm()
|
| 257 |
+
{
|
| 258 |
+
cudaEventRecord(_comm_event, _comm_stream);
|
| 259 |
+
cudaStreamWaitEvent(_comp_stream, _comm_event, 0);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
private:
|
| 263 |
+
cublasHandle_t _cublasHandle;
|
| 264 |
+
|
| 265 |
+
cudaEvent_t _comp_event;
|
| 266 |
+
cudaEvent_t _comm_event;
|
| 267 |
+
|
| 268 |
+
void* _workspace;
|
| 269 |
+
// offset from _workspace for attention unfused memory
|
| 270 |
+
size_t _attention_unfused_workspace_offset;
|
| 271 |
+
uint64_t _seed;
|
| 272 |
+
uint64_t _curr_offset;
|
| 273 |
+
|
| 274 |
+
size_t _workSpaceSize;
|
| 275 |
+
size_t _free_memory_size;
|
| 276 |
+
|
| 277 |
+
size_t _max_seq_len;
|
| 278 |
+
|
| 279 |
+
cudaEvent_t _comp1_event;
|
| 280 |
+
cudaEvent_t _comp2_event;
|
| 281 |
+
|
| 282 |
+
cudaStream_t _stream;
|
| 283 |
+
|
| 284 |
+
unsigned _token_length;
|
| 285 |
+
unsigned _num_tokens;
|
| 286 |
+
std::vector<std::array<int, 3>> _gemm_algos;
|
| 287 |
+
|
| 288 |
+
cudaStream_t _comp_stream;
|
| 289 |
+
cudaStream_t _comm_stream;
|
| 290 |
+
|
| 291 |
+
std::unordered_map<int, int> _world_sizes;
|
| 292 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <assert.h>
|
| 9 |
+
#include <cublas_v2.h>
|
| 10 |
+
#include <cuda.h>
|
| 11 |
+
#ifdef BF16_AVAILABLE
|
| 12 |
+
#include <cuda_bf16.h>
|
| 13 |
+
#endif
|
| 14 |
+
#include <cuda_fp16.h>
|
| 15 |
+
#include <cuda_runtime.h>
|
| 16 |
+
#ifndef __HIP_PLATFORM_AMD__
|
| 17 |
+
#include <mma.h>
|
| 18 |
+
#endif
|
| 19 |
+
#include <stdio.h>
|
| 20 |
+
|
| 21 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 22 |
+
int cublas_gemm_ex(rocblas_handle handle,
|
| 23 |
+
rocblas_operation transa,
|
| 24 |
+
rocblas_operation transb,
|
| 25 |
+
int m,
|
| 26 |
+
int n,
|
| 27 |
+
int k,
|
| 28 |
+
const float* alpha,
|
| 29 |
+
const float* beta,
|
| 30 |
+
const float* A,
|
| 31 |
+
const float* B,
|
| 32 |
+
float* C,
|
| 33 |
+
rocblas_gemm_algo algo,
|
| 34 |
+
int b_stride = -1)
|
| 35 |
+
#else
|
| 36 |
+
int cublas_gemm_ex(cublasHandle_t handle,
|
| 37 |
+
cublasOperation_t transa,
|
| 38 |
+
cublasOperation_t transb,
|
| 39 |
+
int m,
|
| 40 |
+
int n,
|
| 41 |
+
int k,
|
| 42 |
+
const float* alpha,
|
| 43 |
+
const float* beta,
|
| 44 |
+
const float* A,
|
| 45 |
+
const float* B,
|
| 46 |
+
float* C,
|
| 47 |
+
cublasGemmAlgo_t algo,
|
| 48 |
+
int b_stride = -1)
|
| 49 |
+
#endif
|
| 50 |
+
{
|
| 51 |
+
const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride;
|
| 52 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 53 |
+
rocblas_status status = rocblas_gemm_ex(handle,
|
| 54 |
+
transa,
|
| 55 |
+
transb,
|
| 56 |
+
m,
|
| 57 |
+
n,
|
| 58 |
+
k,
|
| 59 |
+
(const void*)alpha,
|
| 60 |
+
(const void*)A,
|
| 61 |
+
rocblas_datatype_f32_r,
|
| 62 |
+
(transa == rocblas_operation_none) ? m : k,
|
| 63 |
+
(const void*)B,
|
| 64 |
+
rocblas_datatype_f32_r,
|
| 65 |
+
ldb,
|
| 66 |
+
(const void*)beta,
|
| 67 |
+
C,
|
| 68 |
+
rocblas_datatype_f32_r,
|
| 69 |
+
m,
|
| 70 |
+
C,
|
| 71 |
+
rocblas_datatype_f32_r,
|
| 72 |
+
m,
|
| 73 |
+
rocblas_datatype_f32_r,
|
| 74 |
+
algo,
|
| 75 |
+
0,
|
| 76 |
+
0);
|
| 77 |
+
#else
|
| 78 |
+
cublasStatus_t status = cublasGemmEx(handle,
|
| 79 |
+
transa,
|
| 80 |
+
transb,
|
| 81 |
+
m,
|
| 82 |
+
n,
|
| 83 |
+
k,
|
| 84 |
+
(const void*)alpha,
|
| 85 |
+
(const void*)A,
|
| 86 |
+
CUDA_R_32F,
|
| 87 |
+
(transa == CUBLAS_OP_N) ? m : k,
|
| 88 |
+
(const void*)B,
|
| 89 |
+
CUDA_R_32F,
|
| 90 |
+
ldb,
|
| 91 |
+
(const void*)beta,
|
| 92 |
+
C,
|
| 93 |
+
CUDA_R_32F,
|
| 94 |
+
m,
|
| 95 |
+
CUDA_R_32F,
|
| 96 |
+
algo);
|
| 97 |
+
#endif
|
| 98 |
+
|
| 99 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 100 |
+
if (status != rocblas_status_success) {
|
| 101 |
+
#else
|
| 102 |
+
if (status != CUBLAS_STATUS_SUCCESS) {
|
| 103 |
+
#endif
|
| 104 |
+
fprintf(stderr,
|
| 105 |
+
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
|
| 106 |
+
m,
|
| 107 |
+
n,
|
| 108 |
+
k,
|
| 109 |
+
(int)status);
|
| 110 |
+
return EXIT_FAILURE;
|
| 111 |
+
}
|
| 112 |
+
return 0;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
template <typename T>
|
| 116 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 117 |
+
int cublas_gemm_ex(rocblas_handle handle,
|
| 118 |
+
rocblas_operation transa,
|
| 119 |
+
rocblas_operation transb,
|
| 120 |
+
int m,
|
| 121 |
+
int n,
|
| 122 |
+
int k,
|
| 123 |
+
const float* alpha,
|
| 124 |
+
const float* beta,
|
| 125 |
+
const T* A,
|
| 126 |
+
const T* B,
|
| 127 |
+
T* C,
|
| 128 |
+
rocblas_gemm_algo algo,
|
| 129 |
+
int b_stride = -1)
|
| 130 |
+
#else
|
| 131 |
+
int cublas_gemm_ex(cublasHandle_t handle,
|
| 132 |
+
cublasOperation_t transa,
|
| 133 |
+
cublasOperation_t transb,
|
| 134 |
+
int m,
|
| 135 |
+
int n,
|
| 136 |
+
int k,
|
| 137 |
+
const float* alpha,
|
| 138 |
+
const float* beta,
|
| 139 |
+
const T* A,
|
| 140 |
+
const T* B,
|
| 141 |
+
T* C,
|
| 142 |
+
cublasGemmAlgo_t algo,
|
| 143 |
+
int b_stride = -1)
|
| 144 |
+
#endif
|
| 145 |
+
{
|
| 146 |
+
const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride;
|
| 147 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 148 |
+
constexpr auto rocblas_dtype_16 = std::is_same<T, __half>::value ? rocblas_datatype_f16_r
|
| 149 |
+
: rocblas_datatype_bf16_r;
|
| 150 |
+
rocblas_status status = rocblas_gemm_ex(handle,
|
| 151 |
+
transa,
|
| 152 |
+
transb,
|
| 153 |
+
m,
|
| 154 |
+
n,
|
| 155 |
+
k,
|
| 156 |
+
(const void*)alpha,
|
| 157 |
+
(const void*)A,
|
| 158 |
+
rocblas_dtype_16,
|
| 159 |
+
(transa == rocblas_operation_none) ? m : k,
|
| 160 |
+
(const void*)B,
|
| 161 |
+
rocblas_dtype_16,
|
| 162 |
+
ldb,
|
| 163 |
+
(const void*)beta,
|
| 164 |
+
(void*)C,
|
| 165 |
+
rocblas_dtype_16,
|
| 166 |
+
m,
|
| 167 |
+
(void*)C,
|
| 168 |
+
rocblas_dtype_16,
|
| 169 |
+
m,
|
| 170 |
+
rocblas_datatype_f32_r,
|
| 171 |
+
algo,
|
| 172 |
+
0,
|
| 173 |
+
0);
|
| 174 |
+
#else
|
| 175 |
+
constexpr auto cublas_dtype_16 = std::is_same<T, __half>::value ? CUDA_R_16F : CUDA_R_16BF;
|
| 176 |
+
cublasStatus_t status = cublasGemmEx(handle,
|
| 177 |
+
transa,
|
| 178 |
+
transb,
|
| 179 |
+
m,
|
| 180 |
+
n,
|
| 181 |
+
k,
|
| 182 |
+
(const void*)alpha,
|
| 183 |
+
(const void*)A,
|
| 184 |
+
cublas_dtype_16,
|
| 185 |
+
(transa == CUBLAS_OP_N) ? m : k,
|
| 186 |
+
(const void*)B,
|
| 187 |
+
cublas_dtype_16,
|
| 188 |
+
ldb,
|
| 189 |
+
(const void*)beta,
|
| 190 |
+
(void*)C,
|
| 191 |
+
cublas_dtype_16,
|
| 192 |
+
m,
|
| 193 |
+
CUDA_R_32F,
|
| 194 |
+
algo);
|
| 195 |
+
#endif
|
| 196 |
+
|
| 197 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 198 |
+
if (status != rocblas_status_success) {
|
| 199 |
+
#else
|
| 200 |
+
if (status != CUBLAS_STATUS_SUCCESS) {
|
| 201 |
+
#endif
|
| 202 |
+
fprintf(stderr,
|
| 203 |
+
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
|
| 204 |
+
m,
|
| 205 |
+
n,
|
| 206 |
+
k,
|
| 207 |
+
(int)status);
|
| 208 |
+
return EXIT_FAILURE;
|
| 209 |
+
}
|
| 210 |
+
return 0;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 214 |
+
int cublas_strided_batched_gemm(rocblas_handle handle,
|
| 215 |
+
int m,
|
| 216 |
+
int n,
|
| 217 |
+
int k,
|
| 218 |
+
const float* alpha,
|
| 219 |
+
const float* beta,
|
| 220 |
+
const float* A,
|
| 221 |
+
const float* B,
|
| 222 |
+
float* C,
|
| 223 |
+
rocblas_operation op_A,
|
| 224 |
+
rocblas_operation op_B,
|
| 225 |
+
int stride_A,
|
| 226 |
+
int stride_B,
|
| 227 |
+
int stride_C,
|
| 228 |
+
int batch,
|
| 229 |
+
rocblas_gemm_algo algo)
|
| 230 |
+
#else
|
| 231 |
+
int cublas_strided_batched_gemm(cublasHandle_t handle,
|
| 232 |
+
int m,
|
| 233 |
+
int n,
|
| 234 |
+
int k,
|
| 235 |
+
const float* alpha,
|
| 236 |
+
const float* beta,
|
| 237 |
+
const float* A,
|
| 238 |
+
const float* B,
|
| 239 |
+
float* C,
|
| 240 |
+
cublasOperation_t op_A,
|
| 241 |
+
cublasOperation_t op_B,
|
| 242 |
+
int stride_A,
|
| 243 |
+
int stride_B,
|
| 244 |
+
int stride_C,
|
| 245 |
+
int batch,
|
| 246 |
+
cublasGemmAlgo_t algo)
|
| 247 |
+
#endif
|
| 248 |
+
{
|
| 249 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 250 |
+
rocblas_status status =
|
| 251 |
+
rocblas_gemm_strided_batched_ex(handle,
|
| 252 |
+
op_A,
|
| 253 |
+
op_B,
|
| 254 |
+
m,
|
| 255 |
+
n,
|
| 256 |
+
k,
|
| 257 |
+
alpha,
|
| 258 |
+
A,
|
| 259 |
+
rocblas_datatype_f32_r,
|
| 260 |
+
(op_A == rocblas_operation_none) ? m : k,
|
| 261 |
+
stride_A,
|
| 262 |
+
B,
|
| 263 |
+
rocblas_datatype_f32_r,
|
| 264 |
+
(op_B == rocblas_operation_none) ? k : n,
|
| 265 |
+
stride_B,
|
| 266 |
+
beta,
|
| 267 |
+
C,
|
| 268 |
+
rocblas_datatype_f32_r,
|
| 269 |
+
m,
|
| 270 |
+
stride_C,
|
| 271 |
+
C,
|
| 272 |
+
rocblas_datatype_f32_r,
|
| 273 |
+
m,
|
| 274 |
+
stride_C,
|
| 275 |
+
batch,
|
| 276 |
+
rocblas_datatype_f32_r,
|
| 277 |
+
algo,
|
| 278 |
+
0,
|
| 279 |
+
0);
|
| 280 |
+
#else
|
| 281 |
+
cublasStatus_t status = cublasGemmStridedBatchedEx(handle,
|
| 282 |
+
op_A,
|
| 283 |
+
op_B,
|
| 284 |
+
m,
|
| 285 |
+
n,
|
| 286 |
+
k,
|
| 287 |
+
alpha,
|
| 288 |
+
A,
|
| 289 |
+
CUDA_R_32F,
|
| 290 |
+
(op_A == CUBLAS_OP_N) ? m : k,
|
| 291 |
+
stride_A,
|
| 292 |
+
B,
|
| 293 |
+
CUDA_R_32F,
|
| 294 |
+
(op_B == CUBLAS_OP_N) ? k : n,
|
| 295 |
+
stride_B,
|
| 296 |
+
beta,
|
| 297 |
+
C,
|
| 298 |
+
CUDA_R_32F,
|
| 299 |
+
m,
|
| 300 |
+
stride_C,
|
| 301 |
+
batch,
|
| 302 |
+
CUDA_R_32F,
|
| 303 |
+
algo);
|
| 304 |
+
#endif
|
| 305 |
+
|
| 306 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 307 |
+
if (status != rocblas_status_success) {
|
| 308 |
+
#else
|
| 309 |
+
if (status != CUBLAS_STATUS_SUCCESS) {
|
| 310 |
+
#endif
|
| 311 |
+
fprintf(stderr,
|
| 312 |
+
"!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n",
|
| 313 |
+
batch,
|
| 314 |
+
m,
|
| 315 |
+
n,
|
| 316 |
+
k,
|
| 317 |
+
(int)status);
|
| 318 |
+
return EXIT_FAILURE;
|
| 319 |
+
}
|
| 320 |
+
return 0;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
template <typename T>
|
| 324 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 325 |
+
int cublas_strided_batched_gemm(rocblas_handle handle,
|
| 326 |
+
int m,
|
| 327 |
+
int n,
|
| 328 |
+
int k,
|
| 329 |
+
const float* alpha,
|
| 330 |
+
const float* beta,
|
| 331 |
+
const T* A,
|
| 332 |
+
const T* B,
|
| 333 |
+
T* C,
|
| 334 |
+
rocblas_operation op_A,
|
| 335 |
+
rocblas_operation op_B,
|
| 336 |
+
int stride_A,
|
| 337 |
+
int stride_B,
|
| 338 |
+
int stride_C,
|
| 339 |
+
int batch,
|
| 340 |
+
rocblas_gemm_algo algo)
|
| 341 |
+
#else
|
| 342 |
+
int cublas_strided_batched_gemm(cublasHandle_t handle,
|
| 343 |
+
int m,
|
| 344 |
+
int n,
|
| 345 |
+
int k,
|
| 346 |
+
const float* alpha,
|
| 347 |
+
const float* beta,
|
| 348 |
+
const T* A,
|
| 349 |
+
const T* B,
|
| 350 |
+
T* C,
|
| 351 |
+
cublasOperation_t op_A,
|
| 352 |
+
cublasOperation_t op_B,
|
| 353 |
+
int stride_A,
|
| 354 |
+
int stride_B,
|
| 355 |
+
int stride_C,
|
| 356 |
+
int batch,
|
| 357 |
+
cublasGemmAlgo_t algo)
|
| 358 |
+
#endif
|
| 359 |
+
{
|
| 360 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 361 |
+
constexpr auto rocblas_dtype_16 = std::is_same<T, __half>::value ? rocblas_datatype_f16_r
|
| 362 |
+
: rocblas_datatype_bf16_r;
|
| 363 |
+
rocblas_status status =
|
| 364 |
+
rocblas_gemm_strided_batched_ex(handle,
|
| 365 |
+
op_A,
|
| 366 |
+
op_B,
|
| 367 |
+
m,
|
| 368 |
+
n,
|
| 369 |
+
k,
|
| 370 |
+
alpha,
|
| 371 |
+
A,
|
| 372 |
+
rocblas_dtype_16,
|
| 373 |
+
(op_A == rocblas_operation_none) ? m : k,
|
| 374 |
+
stride_A,
|
| 375 |
+
B,
|
| 376 |
+
rocblas_dtype_16,
|
| 377 |
+
(op_B == rocblas_operation_none) ? k : n,
|
| 378 |
+
stride_B,
|
| 379 |
+
beta,
|
| 380 |
+
C,
|
| 381 |
+
rocblas_dtype_16,
|
| 382 |
+
m,
|
| 383 |
+
stride_C,
|
| 384 |
+
C,
|
| 385 |
+
rocblas_dtype_16,
|
| 386 |
+
m,
|
| 387 |
+
stride_C,
|
| 388 |
+
batch,
|
| 389 |
+
rocblas_datatype_f32_r,
|
| 390 |
+
algo,
|
| 391 |
+
0,
|
| 392 |
+
0);
|
| 393 |
+
#else
|
| 394 |
+
constexpr auto cublas_dtype_16 = std::is_same<T, __half>::value ? CUDA_R_16F : CUDA_R_16BF;
|
| 395 |
+
cublasStatus_t status = cublasGemmStridedBatchedEx(handle,
|
| 396 |
+
op_A,
|
| 397 |
+
op_B,
|
| 398 |
+
m,
|
| 399 |
+
n,
|
| 400 |
+
k,
|
| 401 |
+
alpha,
|
| 402 |
+
A,
|
| 403 |
+
cublas_dtype_16,
|
| 404 |
+
(op_A == CUBLAS_OP_N) ? m : k,
|
| 405 |
+
stride_A,
|
| 406 |
+
B,
|
| 407 |
+
cublas_dtype_16,
|
| 408 |
+
(op_B == CUBLAS_OP_N) ? k : n,
|
| 409 |
+
stride_B,
|
| 410 |
+
beta,
|
| 411 |
+
C,
|
| 412 |
+
cublas_dtype_16,
|
| 413 |
+
m,
|
| 414 |
+
stride_C,
|
| 415 |
+
batch,
|
| 416 |
+
CUDA_R_32F,
|
| 417 |
+
algo);
|
| 418 |
+
#endif
|
| 419 |
+
|
| 420 |
+
#ifdef __HIP_PLATFORM_AMD__
|
| 421 |
+
if (status != rocblas_status_success) {
|
| 422 |
+
#else
|
| 423 |
+
if (status != CUBLAS_STATUS_SUCCESS) {
|
| 424 |
+
#endif
|
| 425 |
+
fprintf(stderr,
|
| 426 |
+
"!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
|
| 427 |
+
m,
|
| 428 |
+
n,
|
| 429 |
+
k,
|
| 430 |
+
(int)status);
|
| 431 |
+
return EXIT_FAILURE;
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
return 0;
|
| 435 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include "ds_kernel_utils.h"
|
| 9 |
+
|
| 10 |
+
#include <cuda.h>
|
| 11 |
+
#ifdef BF16_AVAILABLE
|
| 12 |
+
#include <cuda_bf16.h>
|
| 13 |
+
#endif
|
| 14 |
+
#include <cuda_fp16.h>
|
| 15 |
+
#include <stdio.h>
|
| 16 |
+
#include <stdlib.h>
|
| 17 |
+
#include <cassert>
|
| 18 |
+
#include <iostream>
|
| 19 |
+
|
| 20 |
+
#define MAX_WARP_NUM 32
|
| 21 |
+
#define WARP_SIZE 32
|
| 22 |
+
|
| 23 |
+
#define MAX_THREADS 1024
|
| 24 |
+
#define SMs 80
|
| 25 |
+
|
| 26 |
+
#define MAX_REGISTERS 256
|
| 27 |
+
|
| 28 |
+
template <typename T>
|
| 29 |
+
void launch_attn_softmax_v2(T* vals,
|
| 30 |
+
T* mask,
|
| 31 |
+
T* alibi,
|
| 32 |
+
float layer_scale,
|
| 33 |
+
bool triangular,
|
| 34 |
+
bool recompute,
|
| 35 |
+
bool local_attention,
|
| 36 |
+
int window_size,
|
| 37 |
+
int batch_size,
|
| 38 |
+
int heads,
|
| 39 |
+
int num_seq,
|
| 40 |
+
int sequence_length,
|
| 41 |
+
int offset,
|
| 42 |
+
int mask_stride,
|
| 43 |
+
int mp_size,
|
| 44 |
+
cudaStream_t stream);
|
| 45 |
+
|
| 46 |
+
// Fused bias add with gelu activation
|
| 47 |
+
template <typename T>
|
| 48 |
+
void launch_bias_gelu(T* input,
|
| 49 |
+
const T* bias,
|
| 50 |
+
int intermediate_size,
|
| 51 |
+
int batch_size,
|
| 52 |
+
cudaStream_t stream);
|
| 53 |
+
|
| 54 |
+
template <typename T>
|
| 55 |
+
void launch_gated_activation(T* output,
|
| 56 |
+
const T* activation,
|
| 57 |
+
const T* bias,
|
| 58 |
+
int rows,
|
| 59 |
+
int output_stride,
|
| 60 |
+
int elems_per_row,
|
| 61 |
+
bool use_gelu,
|
| 62 |
+
cudaStream_t stream);
|
| 63 |
+
|
| 64 |
+
// Fused bias add with relu activation
|
| 65 |
+
template <typename T>
|
| 66 |
+
void launch_bias_relu(T* input,
|
| 67 |
+
const T* bias,
|
| 68 |
+
int intermediate_size,
|
| 69 |
+
int batch_size,
|
| 70 |
+
cudaStream_t stream);
|
| 71 |
+
|
| 72 |
+
template <typename T>
|
| 73 |
+
void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream);
|
| 74 |
+
|
| 75 |
+
template <typename T>
|
| 76 |
+
void launch_bias_residual(T* input,
|
| 77 |
+
T* output,
|
| 78 |
+
T* attn,
|
| 79 |
+
T* bias,
|
| 80 |
+
T* attn_bias,
|
| 81 |
+
int batch,
|
| 82 |
+
int hidden_dim,
|
| 83 |
+
int mp_size,
|
| 84 |
+
bool preln,
|
| 85 |
+
cudaStream_t stream);
|
| 86 |
+
|
| 87 |
+
template <typename T>
|
| 88 |
+
void launch_fused_ln(T* output,
|
| 89 |
+
const T* vals,
|
| 90 |
+
const T* gamma,
|
| 91 |
+
const T* beta,
|
| 92 |
+
float epsilon,
|
| 93 |
+
int rows,
|
| 94 |
+
int elems_per_row,
|
| 95 |
+
cudaStream_t stream);
|
| 96 |
+
|
| 97 |
+
template <typename T>
|
| 98 |
+
void launch_fused_residual_ln(T* output,
|
| 99 |
+
const T* vals,
|
| 100 |
+
const T* residual,
|
| 101 |
+
const T* bias,
|
| 102 |
+
const T* gamma,
|
| 103 |
+
const T* beta,
|
| 104 |
+
float epsilon,
|
| 105 |
+
int rows,
|
| 106 |
+
int elems_per_row,
|
| 107 |
+
cudaStream_t stream);
|
| 108 |
+
|
| 109 |
+
template <typename T>
|
| 110 |
+
void launch_fused_residual_ln_store_pre_ln_res(T* norm_output,
|
| 111 |
+
T* res_output,
|
| 112 |
+
const T* vals,
|
| 113 |
+
const T* residual,
|
| 114 |
+
const T* bias,
|
| 115 |
+
const T* gamma,
|
| 116 |
+
const T* beta,
|
| 117 |
+
float epsilon,
|
| 118 |
+
int rows,
|
| 119 |
+
int elems_per_row,
|
| 120 |
+
cudaStream_t stream);
|
| 121 |
+
|
| 122 |
+
template <typename T>
|
| 123 |
+
void launch_rms_norm(T* norm_output,
|
| 124 |
+
T* res_output,
|
| 125 |
+
const T* vals,
|
| 126 |
+
const T* residual,
|
| 127 |
+
const T* gamma,
|
| 128 |
+
float epsilon,
|
| 129 |
+
int rows,
|
| 130 |
+
int elems_per_row,
|
| 131 |
+
cudaStream_t stream);
|
| 132 |
+
|
| 133 |
+
template <typename T>
|
| 134 |
+
void launch_dequantize(T* output,
|
| 135 |
+
const int8_t* input,
|
| 136 |
+
const float* qscale,
|
| 137 |
+
unsigned output_size,
|
| 138 |
+
unsigned hidden_dim,
|
| 139 |
+
unsigned groups,
|
| 140 |
+
unsigned merge_count,
|
| 141 |
+
cudaStream_t stream);
|
| 142 |
+
|
| 143 |
+
template <typename T>
|
| 144 |
+
void launch_dequantize(T* output,
|
| 145 |
+
const int8_t* input,
|
| 146 |
+
const float* qscale,
|
| 147 |
+
unsigned output_size,
|
| 148 |
+
unsigned hidden_dim,
|
| 149 |
+
unsigned groups,
|
| 150 |
+
cudaStream_t stream);
|
| 151 |
+
template <typename T>
|
| 152 |
+
void launch_gptj_residual_add(T* input,
|
| 153 |
+
T* output,
|
| 154 |
+
T* attn,
|
| 155 |
+
T* bias,
|
| 156 |
+
T* attn_bias,
|
| 157 |
+
int batch,
|
| 158 |
+
int head_size,
|
| 159 |
+
int mp_size,
|
| 160 |
+
cudaStream_t stream);
|
| 161 |
+
|
| 162 |
+
template <typename T>
|
| 163 |
+
void launch_apply_rotary_pos_emb(T* mixed_query,
|
| 164 |
+
T* key_layer,
|
| 165 |
+
unsigned head_size,
|
| 166 |
+
unsigned seq_len,
|
| 167 |
+
unsigned rotary_dim,
|
| 168 |
+
unsigned offset,
|
| 169 |
+
unsigned num_heads,
|
| 170 |
+
unsigned batch,
|
| 171 |
+
float rope_theta,
|
| 172 |
+
cudaStream_t stream,
|
| 173 |
+
int max_out_tokens);
|
| 174 |
+
|
| 175 |
+
template <typename T>
|
| 176 |
+
void launch_moe_res_matmul(T* residual,
|
| 177 |
+
T* coef,
|
| 178 |
+
T* mlp_out,
|
| 179 |
+
int seq_len,
|
| 180 |
+
int hidden_dim,
|
| 181 |
+
cudaStream_t stream);
|
| 182 |
+
|
| 183 |
+
// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3]
|
| 184 |
+
template <typename T>
|
| 185 |
+
void launch_transform4d_0213(T* out,
|
| 186 |
+
const T* in,
|
| 187 |
+
int batch_size,
|
| 188 |
+
int heads,
|
| 189 |
+
int seq_length,
|
| 190 |
+
int hidden_dim,
|
| 191 |
+
cudaStream_t stream,
|
| 192 |
+
int trans_count);
|
| 193 |
+
template <typename T>
|
| 194 |
+
void launch_bias_add_transform_0213(T* outputs,
|
| 195 |
+
T* vals,
|
| 196 |
+
T* vals1,
|
| 197 |
+
const T* vals2,
|
| 198 |
+
const T* bias,
|
| 199 |
+
int batch_size,
|
| 200 |
+
int seq_length,
|
| 201 |
+
unsigned seq_offset,
|
| 202 |
+
int seq_length1,
|
| 203 |
+
int hidden_dim,
|
| 204 |
+
int heads,
|
| 205 |
+
int num_kv,
|
| 206 |
+
int rotary_dim,
|
| 207 |
+
bool rotate_half,
|
| 208 |
+
bool rotate_every_two,
|
| 209 |
+
cudaStream_t stream,
|
| 210 |
+
int trans_count,
|
| 211 |
+
int max_out_tokens,
|
| 212 |
+
float rope_theta);
|
| 213 |
+
template <typename T>
|
| 214 |
+
void pad_data(T* padded_output,
|
| 215 |
+
T* output,
|
| 216 |
+
int bsz,
|
| 217 |
+
int head_size,
|
| 218 |
+
int padded_head_size,
|
| 219 |
+
cudaStream_t stream);
|
| 220 |
+
|
| 221 |
+
template <typename T>
|
| 222 |
+
void pad_head_seq(T* padded_output,
|
| 223 |
+
T* output,
|
| 224 |
+
int bsz,
|
| 225 |
+
int seq_len,
|
| 226 |
+
int padded_seq_len,
|
| 227 |
+
int head_size,
|
| 228 |
+
int padded_head_size,
|
| 229 |
+
cudaStream_t stream);
|
| 230 |
+
|
| 231 |
+
template <typename T>
|
| 232 |
+
void launch_pad_add_transform_0213(T* output,
|
| 233 |
+
const T* vals,
|
| 234 |
+
int batch_size,
|
| 235 |
+
int hidden_dim,
|
| 236 |
+
int seq_length,
|
| 237 |
+
int padded_seq_len,
|
| 238 |
+
int heads,
|
| 239 |
+
int padded_head_size,
|
| 240 |
+
cudaStream_t stream);
|
| 241 |
+
|
| 242 |
+
template <typename T>
|
| 243 |
+
void launch_vector_add(T* out,
|
| 244 |
+
const T* a,
|
| 245 |
+
const T* b,
|
| 246 |
+
float gamma,
|
| 247 |
+
int num_elems,
|
| 248 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu
ADDED
|
@@ -0,0 +1,2134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
|
| 8 |
+
namespace cg = cooperative_groups;
|
| 9 |
+
|
| 10 |
+
/*
|
| 11 |
+
Fused bias add, residual (elementwise) add, and normalization layer.
|
| 12 |
+
|
| 13 |
+
For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for
|
| 14 |
+
__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic).
|
| 15 |
+
|
| 16 |
+
For specific launch constraints, see the launch functions.
|
| 17 |
+
*/
|
| 18 |
+
|
| 19 |
+
#define NORM_REG (MAX_REGISTERS / 4)
|
| 20 |
+
|
| 21 |
+
__global__ void fused_bias_residual_layer_norm(float* vals,
|
| 22 |
+
const float* residual,
|
| 23 |
+
const float* gamma,
|
| 24 |
+
const float* beta,
|
| 25 |
+
float epsilon,
|
| 26 |
+
bool preLayerNorm,
|
| 27 |
+
bool training,
|
| 28 |
+
float* vars,
|
| 29 |
+
float* means,
|
| 30 |
+
int row_stride)
|
| 31 |
+
{
|
| 32 |
+
int iteration_stride = blockDim.x;
|
| 33 |
+
int iterations = row_stride / iteration_stride;
|
| 34 |
+
|
| 35 |
+
cg::thread_block b = cg::this_thread_block();
|
| 36 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 37 |
+
|
| 38 |
+
int row = blockIdx.x;
|
| 39 |
+
int id = threadIdx.x;
|
| 40 |
+
int gid = id / WARP_SIZE;
|
| 41 |
+
|
| 42 |
+
float vals_arr[NORM_REG];
|
| 43 |
+
__shared__ float shr[MAX_WARP_NUM];
|
| 44 |
+
|
| 45 |
+
residual += (row * row_stride);
|
| 46 |
+
vals += (row * row_stride);
|
| 47 |
+
|
| 48 |
+
float sum = 0.f;
|
| 49 |
+
int high_index = iterations * iteration_stride + id;
|
| 50 |
+
#pragma unroll
|
| 51 |
+
for (int i = 0; i < iterations; i++) {
|
| 52 |
+
vals_arr[i] = residual[i * iteration_stride + id];
|
| 53 |
+
sum += vals_arr[i];
|
| 54 |
+
}
|
| 55 |
+
if (high_index < row_stride) {
|
| 56 |
+
vals_arr[iterations] = residual[high_index];
|
| 57 |
+
sum += vals_arr[iterations];
|
| 58 |
+
iterations++;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 62 |
+
|
| 63 |
+
if (g.thread_rank() == 0) shr[gid] = sum;
|
| 64 |
+
|
| 65 |
+
b.sync();
|
| 66 |
+
|
| 67 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
|
| 68 |
+
|
| 69 |
+
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
|
| 70 |
+
b.sync();
|
| 71 |
+
#endif
|
| 72 |
+
|
| 73 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 74 |
+
sum += g.shfl_down(sum, i);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
sum = g.shfl(sum, 0);
|
| 78 |
+
float mean = sum / row_stride;
|
| 79 |
+
if (training)
|
| 80 |
+
if (threadIdx.x == 0) means[row] = mean;
|
| 81 |
+
float variance = 0.f;
|
| 82 |
+
for (int i = 0; i < iterations; i++) {
|
| 83 |
+
vals_arr[i] -= mean;
|
| 84 |
+
variance += vals_arr[i] * vals_arr[i];
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
|
| 88 |
+
|
| 89 |
+
if (g.thread_rank() == 0) shr[gid] = variance;
|
| 90 |
+
|
| 91 |
+
b.sync();
|
| 92 |
+
|
| 93 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
|
| 94 |
+
|
| 95 |
+
#ifndef __STOCHASTIC_MODE__
|
| 96 |
+
b.sync();
|
| 97 |
+
#endif
|
| 98 |
+
|
| 99 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 100 |
+
variance += g.shfl_down(variance, i);
|
| 101 |
+
}
|
| 102 |
+
variance = g.shfl(variance, 0);
|
| 103 |
+
variance /= row_stride;
|
| 104 |
+
variance += epsilon;
|
| 105 |
+
if (training)
|
| 106 |
+
if (threadIdx.x == 0) vars[row] = variance;
|
| 107 |
+
|
| 108 |
+
iterations = row_stride / iteration_stride;
|
| 109 |
+
for (int i = 0; i < iterations; i++) {
|
| 110 |
+
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
|
| 111 |
+
vals_arr[i] =
|
| 112 |
+
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
|
| 113 |
+
vals[i * iteration_stride + id] = vals_arr[i];
|
| 114 |
+
}
|
| 115 |
+
if ((high_index) < row_stride) {
|
| 116 |
+
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
|
| 117 |
+
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
|
| 118 |
+
vals[high_index] = vals_arr[iterations];
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
__global__ void fused_bias_residual_layer_norm(__half* vals,
|
| 123 |
+
const __half* residual,
|
| 124 |
+
const __half* gamma,
|
| 125 |
+
const __half* beta,
|
| 126 |
+
float epsilon,
|
| 127 |
+
bool preLayerNorm,
|
| 128 |
+
bool training,
|
| 129 |
+
__half* vars,
|
| 130 |
+
__half* means,
|
| 131 |
+
int row_stride)
|
| 132 |
+
{
|
| 133 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 134 |
+
int iteration_stride = blockDim.x;
|
| 135 |
+
int iterations = row_stride / iteration_stride;
|
| 136 |
+
|
| 137 |
+
cg::thread_block b = cg::this_thread_block();
|
| 138 |
+
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
|
| 139 |
+
|
| 140 |
+
int row = blockIdx.x;
|
| 141 |
+
int id = threadIdx.x;
|
| 142 |
+
int gid = id >> WARP_SIZE_BITS;
|
| 143 |
+
|
| 144 |
+
float2 vals_f[NORM_REG];
|
| 145 |
+
__shared__ float shr[MAX_WARP_NUM];
|
| 146 |
+
|
| 147 |
+
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
|
| 148 |
+
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
|
| 149 |
+
|
| 150 |
+
residual_cast += (row * row_stride);
|
| 151 |
+
vals_cast += (row * row_stride);
|
| 152 |
+
|
| 153 |
+
float sum = 0.f;
|
| 154 |
+
int high_index = iterations * iteration_stride + id;
|
| 155 |
+
#pragma unroll
|
| 156 |
+
for (int i = 0; i < iterations; i++) {
|
| 157 |
+
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
|
| 158 |
+
sum += vals_f[i].x;
|
| 159 |
+
sum += vals_f[i].y;
|
| 160 |
+
}
|
| 161 |
+
if ((high_index) < row_stride) {
|
| 162 |
+
vals_f[iterations] = __half22float2(residual_cast[high_index]);
|
| 163 |
+
sum += vals_f[iterations].x;
|
| 164 |
+
sum += vals_f[iterations].y;
|
| 165 |
+
iterations++;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 169 |
+
|
| 170 |
+
if (g.thread_rank() == 0) shr[gid] = sum;
|
| 171 |
+
|
| 172 |
+
b.sync();
|
| 173 |
+
|
| 174 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
|
| 175 |
+
|
| 176 |
+
#ifndef __STOCHASTIC_MODE__
|
| 177 |
+
b.sync();
|
| 178 |
+
#endif
|
| 179 |
+
|
| 180 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 181 |
+
sum += g.shfl_down(sum, i);
|
| 182 |
+
}
|
| 183 |
+
sum = g.shfl(sum, 0);
|
| 184 |
+
float mean = sum / (row_stride * 2);
|
| 185 |
+
|
| 186 |
+
float variance = 0.f;
|
| 187 |
+
for (int i = 0; i < iterations; i++) {
|
| 188 |
+
vals_f[i].x -= mean;
|
| 189 |
+
vals_f[i].y -= mean;
|
| 190 |
+
variance += vals_f[i].x * vals_f[i].x;
|
| 191 |
+
variance += vals_f[i].y * vals_f[i].y;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
|
| 195 |
+
|
| 196 |
+
if (g.thread_rank() == 0) shr[gid] = variance;
|
| 197 |
+
|
| 198 |
+
b.sync();
|
| 199 |
+
|
| 200 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
|
| 201 |
+
|
| 202 |
+
#ifndef __STOCHASTIC_MODE__
|
| 203 |
+
b.sync();
|
| 204 |
+
#endif
|
| 205 |
+
|
| 206 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 207 |
+
variance += g.shfl_down(variance, i);
|
| 208 |
+
}
|
| 209 |
+
variance = g.shfl(variance, 0);
|
| 210 |
+
variance /= (row_stride * 2);
|
| 211 |
+
variance += epsilon;
|
| 212 |
+
|
| 213 |
+
__half2 variance_h = __float2half2_rn(variance);
|
| 214 |
+
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
|
| 215 |
+
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
|
| 216 |
+
|
| 217 |
+
if (training && threadIdx.x == 0) {
|
| 218 |
+
vars[row] = __float2half(variance);
|
| 219 |
+
means[row] = __float2half(mean);
|
| 220 |
+
}
|
| 221 |
+
iterations = row_stride / iteration_stride;
|
| 222 |
+
for (int i = 0; i < iterations; i++) {
|
| 223 |
+
__half2 vals_arr = __float22half2_rn(vals_f[i]);
|
| 224 |
+
vals_arr = vals_arr * h2rsqrt(variance_h);
|
| 225 |
+
vals_arr =
|
| 226 |
+
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
|
| 227 |
+
vals_cast[i * iteration_stride + id] = vals_arr;
|
| 228 |
+
}
|
| 229 |
+
if ((high_index) < row_stride) {
|
| 230 |
+
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
|
| 231 |
+
vals_arr = vals_arr * h2rsqrt(variance_h);
|
| 232 |
+
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
|
| 233 |
+
vals_cast[high_index] = vals_arr;
|
| 234 |
+
}
|
| 235 |
+
#endif
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
template <typename T>
|
| 239 |
+
void launch_bias_residual_layer_norm(T* vals,
|
| 240 |
+
const T* residual,
|
| 241 |
+
const T* gamma,
|
| 242 |
+
const T* beta,
|
| 243 |
+
float epsilon,
|
| 244 |
+
int batch_size,
|
| 245 |
+
int hidden_dim,
|
| 246 |
+
cudaStream_t stream,
|
| 247 |
+
bool preLayerNorm,
|
| 248 |
+
bool training,
|
| 249 |
+
T* vars,
|
| 250 |
+
T* means);
|
| 251 |
+
|
| 252 |
+
template <>
|
| 253 |
+
void launch_bias_residual_layer_norm<float>(float* vals,
|
| 254 |
+
const float* residual,
|
| 255 |
+
const float* gamma,
|
| 256 |
+
const float* beta,
|
| 257 |
+
float epsilon,
|
| 258 |
+
int batch_size,
|
| 259 |
+
int hidden_dim,
|
| 260 |
+
cudaStream_t stream,
|
| 261 |
+
bool preLayerNorm,
|
| 262 |
+
bool training,
|
| 263 |
+
float* vars,
|
| 264 |
+
float* means)
|
| 265 |
+
{
|
| 266 |
+
int threads = THREADS;
|
| 267 |
+
|
| 268 |
+
dim3 grid_dim(batch_size);
|
| 269 |
+
|
| 270 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 271 |
+
threads <<= 1;
|
| 272 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 273 |
+
threads <<= 2;
|
| 274 |
+
else if (hidden_dim > 65536)
|
| 275 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 276 |
+
|
| 277 |
+
dim3 block_dim(threads);
|
| 278 |
+
|
| 279 |
+
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
|
| 280 |
+
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
template <>
|
| 284 |
+
void launch_bias_residual_layer_norm<__half>(__half* vals,
|
| 285 |
+
const __half* residual,
|
| 286 |
+
const __half* gamma,
|
| 287 |
+
const __half* beta,
|
| 288 |
+
float epsilon,
|
| 289 |
+
int batch_size,
|
| 290 |
+
int hidden_dim,
|
| 291 |
+
cudaStream_t stream,
|
| 292 |
+
bool preLayerNorm,
|
| 293 |
+
bool training,
|
| 294 |
+
__half* vars,
|
| 295 |
+
__half* means)
|
| 296 |
+
{
|
| 297 |
+
int threads = 128;
|
| 298 |
+
|
| 299 |
+
dim3 grid_dim(batch_size);
|
| 300 |
+
|
| 301 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 302 |
+
threads <<= 1;
|
| 303 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 304 |
+
threads <<= 2;
|
| 305 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 306 |
+
threads <<= 3;
|
| 307 |
+
else if (hidden_dim > 65536)
|
| 308 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 309 |
+
|
| 310 |
+
dim3 block_dim(threads);
|
| 311 |
+
|
| 312 |
+
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
|
| 313 |
+
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
__global__ void fused_bias_residual_layer_norm(float* vals,
|
| 317 |
+
const float* residual,
|
| 318 |
+
const float* gamma,
|
| 319 |
+
const float* beta,
|
| 320 |
+
float epsilon,
|
| 321 |
+
bool preLayerNorm,
|
| 322 |
+
bool training,
|
| 323 |
+
float* vars,
|
| 324 |
+
int row_stride)
|
| 325 |
+
{
|
| 326 |
+
int iteration_stride = blockDim.x;
|
| 327 |
+
int iterations = row_stride / iteration_stride;
|
| 328 |
+
|
| 329 |
+
cg::thread_block b = cg::this_thread_block();
|
| 330 |
+
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
|
| 331 |
+
|
| 332 |
+
int row = blockIdx.x;
|
| 333 |
+
int id = threadIdx.x;
|
| 334 |
+
int gid = id / 32;
|
| 335 |
+
|
| 336 |
+
float vals_arr[NORM_REG];
|
| 337 |
+
__shared__ float shr[MAX_WARP_NUM];
|
| 338 |
+
|
| 339 |
+
residual += (row * row_stride);
|
| 340 |
+
vals += (row * row_stride);
|
| 341 |
+
|
| 342 |
+
float sum = 0.f;
|
| 343 |
+
int high_index = iterations * iteration_stride + id;
|
| 344 |
+
#pragma unroll
|
| 345 |
+
for (int i = 0; i < iterations; i++) {
|
| 346 |
+
vals_arr[i] = residual[i * iteration_stride + id];
|
| 347 |
+
sum += vals_arr[i];
|
| 348 |
+
}
|
| 349 |
+
if ((high_index) < row_stride) {
|
| 350 |
+
vals_arr[iterations] = residual[high_index];
|
| 351 |
+
sum += vals_arr[iterations];
|
| 352 |
+
iterations++;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 356 |
+
|
| 357 |
+
if (g.thread_rank() == 0) shr[gid] = sum;
|
| 358 |
+
|
| 359 |
+
b.sync();
|
| 360 |
+
|
| 361 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
|
| 362 |
+
|
| 363 |
+
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
|
| 364 |
+
b.sync();
|
| 365 |
+
#endif
|
| 366 |
+
|
| 367 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 368 |
+
sum += g.shfl_down(sum, i);
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
sum = g.shfl(sum, 0);
|
| 372 |
+
float mean = sum / row_stride;
|
| 373 |
+
float variance = 0.f;
|
| 374 |
+
for (int i = 0; i < iterations; i++) {
|
| 375 |
+
vals_arr[i] -= mean;
|
| 376 |
+
variance += vals_arr[i] * vals_arr[i];
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
|
| 380 |
+
|
| 381 |
+
if (g.thread_rank() == 0) shr[gid] = variance;
|
| 382 |
+
|
| 383 |
+
b.sync();
|
| 384 |
+
|
| 385 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
|
| 386 |
+
|
| 387 |
+
#ifndef __STOCHASTIC_MODE__
|
| 388 |
+
b.sync();
|
| 389 |
+
#endif
|
| 390 |
+
|
| 391 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 392 |
+
variance += g.shfl_down(variance, i);
|
| 393 |
+
}
|
| 394 |
+
variance = g.shfl(variance, 0);
|
| 395 |
+
variance /= row_stride;
|
| 396 |
+
variance += epsilon;
|
| 397 |
+
if (training)
|
| 398 |
+
if (threadIdx.x == 0) vars[row] = variance;
|
| 399 |
+
|
| 400 |
+
iterations = row_stride / iteration_stride;
|
| 401 |
+
for (int i = 0; i < iterations; i++) {
|
| 402 |
+
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
|
| 403 |
+
vals_arr[i] =
|
| 404 |
+
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
|
| 405 |
+
vals[i * iteration_stride + id] = vals_arr[i];
|
| 406 |
+
}
|
| 407 |
+
if ((high_index) < row_stride) {
|
| 408 |
+
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
|
| 409 |
+
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
|
| 410 |
+
vals[high_index] = vals_arr[iterations];
|
| 411 |
+
}
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
__global__ void fused_bias_residual_layer_norm(__half* vals,
|
| 415 |
+
const __half* residual,
|
| 416 |
+
const __half* gamma,
|
| 417 |
+
const __half* beta,
|
| 418 |
+
float epsilon,
|
| 419 |
+
bool preLayerNorm,
|
| 420 |
+
bool training,
|
| 421 |
+
__half* vars,
|
| 422 |
+
int row_stride)
|
| 423 |
+
{
|
| 424 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 425 |
+
|
| 426 |
+
int iteration_stride = blockDim.x;
|
| 427 |
+
int iterations = row_stride / iteration_stride;
|
| 428 |
+
|
| 429 |
+
cg::thread_block b = cg::this_thread_block();
|
| 430 |
+
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
|
| 431 |
+
|
| 432 |
+
int row = blockIdx.x;
|
| 433 |
+
int id = threadIdx.x;
|
| 434 |
+
int gid = id >> WARP_SIZE_BITS;
|
| 435 |
+
|
| 436 |
+
float2 vals_f[NORM_REG];
|
| 437 |
+
__shared__ float shr[MAX_WARP_NUM];
|
| 438 |
+
|
| 439 |
+
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
|
| 440 |
+
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
|
| 441 |
+
|
| 442 |
+
residual_cast += (row * row_stride);
|
| 443 |
+
vals_cast += (row * row_stride);
|
| 444 |
+
|
| 445 |
+
float sum = 0.f;
|
| 446 |
+
int high_index = iterations * iteration_stride + id;
|
| 447 |
+
#pragma unroll
|
| 448 |
+
for (int i = 0; i < iterations; i++) {
|
| 449 |
+
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
|
| 450 |
+
sum += vals_f[i].x;
|
| 451 |
+
sum += vals_f[i].y;
|
| 452 |
+
}
|
| 453 |
+
if ((high_index) < row_stride) {
|
| 454 |
+
vals_f[iterations] = __half22float2(residual_cast[high_index]);
|
| 455 |
+
sum += vals_f[iterations].x;
|
| 456 |
+
sum += vals_f[iterations].y;
|
| 457 |
+
iterations++;
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 461 |
+
|
| 462 |
+
if (g.thread_rank() == 0) shr[gid] = sum;
|
| 463 |
+
|
| 464 |
+
b.sync();
|
| 465 |
+
|
| 466 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
|
| 467 |
+
|
| 468 |
+
#ifndef __STOCHASTIC_MODE__
|
| 469 |
+
b.sync();
|
| 470 |
+
#endif
|
| 471 |
+
|
| 472 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 473 |
+
sum += g.shfl_down(sum, i);
|
| 474 |
+
}
|
| 475 |
+
sum = g.shfl(sum, 0);
|
| 476 |
+
float mean = sum / (row_stride * 2);
|
| 477 |
+
|
| 478 |
+
float variance = 0.f;
|
| 479 |
+
for (int i = 0; i < iterations; i++) {
|
| 480 |
+
vals_f[i].x -= mean;
|
| 481 |
+
vals_f[i].y -= mean;
|
| 482 |
+
variance += vals_f[i].x * vals_f[i].x;
|
| 483 |
+
variance += vals_f[i].y * vals_f[i].y;
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
|
| 487 |
+
|
| 488 |
+
if (g.thread_rank() == 0) shr[gid] = variance;
|
| 489 |
+
|
| 490 |
+
b.sync();
|
| 491 |
+
|
| 492 |
+
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
|
| 493 |
+
|
| 494 |
+
#ifndef __STOCHASTIC_MODE__
|
| 495 |
+
b.sync();
|
| 496 |
+
#endif
|
| 497 |
+
|
| 498 |
+
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
|
| 499 |
+
variance += g.shfl_down(variance, i);
|
| 500 |
+
}
|
| 501 |
+
variance = g.shfl(variance, 0);
|
| 502 |
+
variance /= (row_stride * 2);
|
| 503 |
+
variance += epsilon;
|
| 504 |
+
|
| 505 |
+
__half2 variance_h = __float2half2_rn(variance);
|
| 506 |
+
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
|
| 507 |
+
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
|
| 508 |
+
|
| 509 |
+
if (training && threadIdx.x == 0) vars[row] = __float2half(variance);
|
| 510 |
+
|
| 511 |
+
iterations = row_stride / iteration_stride;
|
| 512 |
+
for (int i = 0; i < iterations; i++) {
|
| 513 |
+
__half2 vals_arr = __float22half2_rn(vals_f[i]);
|
| 514 |
+
vals_arr = vals_arr * h2rsqrt(variance_h);
|
| 515 |
+
vals_arr =
|
| 516 |
+
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
|
| 517 |
+
vals_cast[i * iteration_stride + id] = vals_arr;
|
| 518 |
+
}
|
| 519 |
+
if ((high_index) < row_stride) {
|
| 520 |
+
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
|
| 521 |
+
vals_arr = vals_arr * h2rsqrt(variance_h);
|
| 522 |
+
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
|
| 523 |
+
vals_cast[high_index] = vals_arr;
|
| 524 |
+
}
|
| 525 |
+
#endif
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
template <typename T>
|
| 529 |
+
void launch_bias_residual_layer_norm(T* vals,
|
| 530 |
+
const T* residual,
|
| 531 |
+
const T* gamma,
|
| 532 |
+
const T* beta,
|
| 533 |
+
float epsilon,
|
| 534 |
+
int batch_size,
|
| 535 |
+
int hidden_dim,
|
| 536 |
+
cudaStream_t stream,
|
| 537 |
+
bool preLayerNorm,
|
| 538 |
+
bool training,
|
| 539 |
+
T* vars);
|
| 540 |
+
|
| 541 |
+
/*
|
| 542 |
+
To tune this launch the following restrictions must be met:
|
| 543 |
+
|
| 544 |
+
For float:
|
| 545 |
+
row_stride == hidden_size
|
| 546 |
+
threads * iterations == row_stride
|
| 547 |
+
threads is in [32, 64, 128, 256, 512, 1024]
|
| 548 |
+
|
| 549 |
+
For half:
|
| 550 |
+
row_stride == hidden_size / 2
|
| 551 |
+
threads * iterations == row_stride
|
| 552 |
+
threads is in [32, 64, 128, 256, 512, 1024]
|
| 553 |
+
|
| 554 |
+
*/
|
| 555 |
+
|
| 556 |
+
template <>
|
| 557 |
+
void launch_bias_residual_layer_norm<float>(float* vals,
|
| 558 |
+
const float* residual,
|
| 559 |
+
const float* gamma,
|
| 560 |
+
const float* beta,
|
| 561 |
+
float epsilon,
|
| 562 |
+
int batch_size,
|
| 563 |
+
int hidden_dim,
|
| 564 |
+
cudaStream_t stream,
|
| 565 |
+
bool preLayerNorm,
|
| 566 |
+
bool training,
|
| 567 |
+
float* vars)
|
| 568 |
+
{
|
| 569 |
+
int threads = THREADS;
|
| 570 |
+
|
| 571 |
+
dim3 grid_dim(batch_size);
|
| 572 |
+
|
| 573 |
+
// There are some limitations to call below functions, now just enumerate the situations.
|
| 574 |
+
|
| 575 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 576 |
+
threads <<= 1;
|
| 577 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 578 |
+
threads <<= 2;
|
| 579 |
+
else if (hidden_dim > 65536)
|
| 580 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 581 |
+
|
| 582 |
+
dim3 block_dim(threads);
|
| 583 |
+
|
| 584 |
+
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
|
| 585 |
+
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim);
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
template <>
|
| 589 |
+
void launch_bias_residual_layer_norm<__half>(__half* vals,
|
| 590 |
+
const __half* residual,
|
| 591 |
+
const __half* gamma,
|
| 592 |
+
const __half* beta,
|
| 593 |
+
float epsilon,
|
| 594 |
+
int batch_size,
|
| 595 |
+
int hidden_dim,
|
| 596 |
+
cudaStream_t stream,
|
| 597 |
+
bool preLayerNorm,
|
| 598 |
+
bool training,
|
| 599 |
+
__half* vars)
|
| 600 |
+
{
|
| 601 |
+
int threads = 128;
|
| 602 |
+
|
| 603 |
+
dim3 grid_dim(batch_size);
|
| 604 |
+
|
| 605 |
+
// There are some limitations to call below functions, now just enumerate the situations.
|
| 606 |
+
|
| 607 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 608 |
+
threads <<= 1;
|
| 609 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 610 |
+
threads <<= 2;
|
| 611 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 612 |
+
threads <<= 3;
|
| 613 |
+
else if (hidden_dim > 65536)
|
| 614 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 615 |
+
|
| 616 |
+
dim3 block_dim(threads);
|
| 617 |
+
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
|
| 618 |
+
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2);
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
/* Normalize Gamma & Betta gradients
|
| 622 |
+
* Compute gradients using either X_hat or
|
| 623 |
+
* normalize input (invertible).
|
| 624 |
+
* Combine transpose with gradients computation.
|
| 625 |
+
*/
|
| 626 |
+
|
| 627 |
+
template <typename T>
|
| 628 |
+
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
|
| 629 |
+
const T* __restrict__ vals_hat,
|
| 630 |
+
const T* __restrict__ gamma,
|
| 631 |
+
const T* __restrict__ betta,
|
| 632 |
+
T* __restrict__ gamma_grad,
|
| 633 |
+
T* __restrict__ betta_grad,
|
| 634 |
+
int rows,
|
| 635 |
+
int width,
|
| 636 |
+
bool invertible)
|
| 637 |
+
{
|
| 638 |
+
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
|
| 639 |
+
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
|
| 640 |
+
|
| 641 |
+
cg::thread_block b = cg::this_thread_block();
|
| 642 |
+
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
|
| 643 |
+
|
| 644 |
+
int idx = blockDim.x * blockIdx.x + threadIdx.x;
|
| 645 |
+
int offset = threadIdx.y * width + idx;
|
| 646 |
+
int y_stride = width * TILE_DIM;
|
| 647 |
+
|
| 648 |
+
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
|
| 649 |
+
float gamma_reg = (float)gamma[idx];
|
| 650 |
+
|
| 651 |
+
// Loop across matrix height
|
| 652 |
+
float betta_tmp = 0;
|
| 653 |
+
float gamma_tmp = 0;
|
| 654 |
+
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
|
| 655 |
+
float grad = (float)out_grad[offset];
|
| 656 |
+
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
|
| 657 |
+
: (float)vals_hat[offset]);
|
| 658 |
+
betta_tmp += grad;
|
| 659 |
+
gamma_tmp += (val * grad);
|
| 660 |
+
|
| 661 |
+
offset += y_stride;
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
|
| 665 |
+
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
|
| 666 |
+
|
| 667 |
+
__syncthreads();
|
| 668 |
+
|
| 669 |
+
// Sum the shared buffer.
|
| 670 |
+
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
|
| 671 |
+
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
|
| 672 |
+
|
| 673 |
+
#ifndef __STOCHASTIC_MODE__
|
| 674 |
+
__syncthreads();
|
| 675 |
+
#endif
|
| 676 |
+
|
| 677 |
+
for (int i = 1; i < TILE_DIM; i <<= 1) {
|
| 678 |
+
s1 += g.shfl_down(s1, i);
|
| 679 |
+
s2 += g.shfl_down(s2, i);
|
| 680 |
+
}
|
| 681 |
+
|
| 682 |
+
if (threadIdx.x == 0) {
|
| 683 |
+
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
|
| 684 |
+
betta_grad[pos] = s1;
|
| 685 |
+
gamma_grad[pos] = s2;
|
| 686 |
+
}
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
/* Normalize Gamma & Betta gradients
|
| 690 |
+
* Compute gradients using the input to
|
| 691 |
+
* the normalize.
|
| 692 |
+
* Combine transpose with gradients computation.
|
| 693 |
+
*/
|
| 694 |
+
|
| 695 |
+
template <typename T>
|
| 696 |
+
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
|
| 697 |
+
const T* __restrict__ X_data,
|
| 698 |
+
const T* __restrict__ vars,
|
| 699 |
+
const T* __restrict__ means,
|
| 700 |
+
T* __restrict__ gamma_grad,
|
| 701 |
+
T* __restrict__ betta_grad,
|
| 702 |
+
int rows,
|
| 703 |
+
int width)
|
| 704 |
+
{
|
| 705 |
+
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
|
| 706 |
+
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
|
| 707 |
+
|
| 708 |
+
cg::thread_block b = cg::this_thread_block();
|
| 709 |
+
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
|
| 710 |
+
|
| 711 |
+
int idx = blockDim.x * blockIdx.x + threadIdx.x;
|
| 712 |
+
int offset = threadIdx.y * width + idx;
|
| 713 |
+
int y_stride = width * TILE_DIM;
|
| 714 |
+
|
| 715 |
+
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
|
| 716 |
+
// Loop across matrix height
|
| 717 |
+
|
| 718 |
+
float betta_tmp = 0;
|
| 719 |
+
float gamma_tmp = 0;
|
| 720 |
+
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
|
| 721 |
+
float grad = (float)out_grad[offset];
|
| 722 |
+
float val = (float)X_data[offset];
|
| 723 |
+
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
|
| 724 |
+
betta_tmp += grad;
|
| 725 |
+
gamma_tmp += (val * grad);
|
| 726 |
+
|
| 727 |
+
offset += y_stride;
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
|
| 731 |
+
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
|
| 732 |
+
|
| 733 |
+
__syncthreads();
|
| 734 |
+
|
| 735 |
+
// Sum the shared buffer.
|
| 736 |
+
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
|
| 737 |
+
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
|
| 738 |
+
|
| 739 |
+
#ifndef __STOCHASTIC_MODE__
|
| 740 |
+
__syncthreads();
|
| 741 |
+
#endif
|
| 742 |
+
|
| 743 |
+
for (int i = 1; i < TILE_DIM; i <<= 1) {
|
| 744 |
+
s1 += g.shfl_down(s1, i);
|
| 745 |
+
s2 += g.shfl_down(s2, i);
|
| 746 |
+
}
|
| 747 |
+
|
| 748 |
+
if (threadIdx.x == 0) {
|
| 749 |
+
betta_grad[pos] = s1;
|
| 750 |
+
gamma_grad[pos] = s2;
|
| 751 |
+
}
|
| 752 |
+
}
|
| 753 |
+
/*
|
| 754 |
+
|
| 755 |
+
/* Backward Normalize (Input-Gradient)
|
| 756 |
+
* Using the means and variances from the input
|
| 757 |
+
* This type of backward is invertible!
|
| 758 |
+
* We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization.
|
| 759 |
+
*/
|
| 760 |
+
|
| 761 |
+
__global__ void LayerNormBackward2(const float* out_grad,
|
| 762 |
+
const float* vals_hat,
|
| 763 |
+
const float* gamma,
|
| 764 |
+
const float* betta,
|
| 765 |
+
const float* vars,
|
| 766 |
+
float* inp_grad,
|
| 767 |
+
bool invertible,
|
| 768 |
+
int row_stride)
|
| 769 |
+
{
|
| 770 |
+
int iteration_stride = blockDim.x;
|
| 771 |
+
int iterations = row_stride / iteration_stride;
|
| 772 |
+
|
| 773 |
+
cg::thread_block b = cg::this_thread_block();
|
| 774 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 775 |
+
|
| 776 |
+
int row = blockIdx.x;
|
| 777 |
+
int id = threadIdx.x;
|
| 778 |
+
int wid = id / WARP_SIZE;
|
| 779 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 780 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 781 |
+
|
| 782 |
+
out_grad += (row * row_stride);
|
| 783 |
+
vals_hat += (row * row_stride);
|
| 784 |
+
inp_grad += (row * row_stride);
|
| 785 |
+
|
| 786 |
+
float vals_arr[NORM_REG];
|
| 787 |
+
float vals_hat_arr[NORM_REG];
|
| 788 |
+
int high_index = iterations * iteration_stride + id;
|
| 789 |
+
#pragma unroll
|
| 790 |
+
for (int i = 0; i < iterations; i++) {
|
| 791 |
+
float gamma_reg = gamma[i * iteration_stride + id];
|
| 792 |
+
vals_arr[i] = out_grad[i * iteration_stride + id];
|
| 793 |
+
vals_arr[i] *= gamma_reg;
|
| 794 |
+
vals_hat_arr[i] =
|
| 795 |
+
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
|
| 796 |
+
gamma_reg
|
| 797 |
+
: vals_hat[i * iteration_stride + id]);
|
| 798 |
+
}
|
| 799 |
+
if ((high_index) < row_stride) {
|
| 800 |
+
float gamma_reg = gamma[high_index];
|
| 801 |
+
vals_arr[iterations] = out_grad[high_index];
|
| 802 |
+
vals_arr[iterations] *= gamma_reg;
|
| 803 |
+
vals_hat_arr[iterations] =
|
| 804 |
+
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
|
| 805 |
+
: vals_hat[high_index]);
|
| 806 |
+
iterations++;
|
| 807 |
+
}
|
| 808 |
+
|
| 809 |
+
float var_reg = vars[row];
|
| 810 |
+
|
| 811 |
+
float sum = 0;
|
| 812 |
+
for (int i = 0; i < iterations; i++) {
|
| 813 |
+
sum += vals_hat_arr[i] * vals_arr[i] *
|
| 814 |
+
sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad
|
| 815 |
+
vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var)
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 819 |
+
|
| 820 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 821 |
+
|
| 822 |
+
__syncthreads();
|
| 823 |
+
|
| 824 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 825 |
+
|
| 826 |
+
#ifndef __STOCHASTIC_MODE__
|
| 827 |
+
__syncthreads();
|
| 828 |
+
#endif
|
| 829 |
+
|
| 830 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 831 |
+
|
| 832 |
+
sum = g.shfl(sum, 0);
|
| 833 |
+
sum /= row_stride;
|
| 834 |
+
|
| 835 |
+
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
|
| 836 |
+
|
| 837 |
+
sum = 0;
|
| 838 |
+
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
|
| 839 |
+
|
| 840 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 841 |
+
|
| 842 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 843 |
+
|
| 844 |
+
__syncthreads();
|
| 845 |
+
|
| 846 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 847 |
+
|
| 848 |
+
#ifndef __STOCHASTIC_MODE__
|
| 849 |
+
__syncthreads();
|
| 850 |
+
#endif
|
| 851 |
+
|
| 852 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 853 |
+
sum = g.shfl(sum, 0);
|
| 854 |
+
sum /= row_stride;
|
| 855 |
+
|
| 856 |
+
iterations = row_stride / iteration_stride;
|
| 857 |
+
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
|
| 858 |
+
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
|
| 859 |
+
}
|
| 860 |
+
|
| 861 |
+
__global__ void LayerNormBackward2(const __half* out_grad,
|
| 862 |
+
const __half* vals_hat,
|
| 863 |
+
const __half* gamma,
|
| 864 |
+
const __half* betta,
|
| 865 |
+
const __half* vars,
|
| 866 |
+
__half* inp_grad,
|
| 867 |
+
bool invertible,
|
| 868 |
+
int row_stride)
|
| 869 |
+
{
|
| 870 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 871 |
+
int iteration_stride = blockDim.x;
|
| 872 |
+
int iterations = row_stride / iteration_stride;
|
| 873 |
+
|
| 874 |
+
cg::thread_block b = cg::this_thread_block();
|
| 875 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 876 |
+
|
| 877 |
+
int row = blockIdx.x;
|
| 878 |
+
int id = threadIdx.x;
|
| 879 |
+
int wid = id / WARP_SIZE;
|
| 880 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 881 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 882 |
+
|
| 883 |
+
__half2 vals_arr[NORM_REG];
|
| 884 |
+
float2 vals_arr_f[NORM_REG];
|
| 885 |
+
__half2 vals_hat_arr[NORM_REG];
|
| 886 |
+
|
| 887 |
+
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
|
| 888 |
+
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
|
| 889 |
+
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
|
| 890 |
+
|
| 891 |
+
inp_grad_h += (row * row_stride);
|
| 892 |
+
out_grad_h += (row * row_stride);
|
| 893 |
+
vals_hat_h += (row * row_stride);
|
| 894 |
+
|
| 895 |
+
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
|
| 896 |
+
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
|
| 897 |
+
int high_index = iterations * iteration_stride + id;
|
| 898 |
+
#pragma unroll
|
| 899 |
+
for (int i = 0; i < iterations; i++) {
|
| 900 |
+
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
|
| 901 |
+
vals_arr[i] = out_grad_h[i * iteration_stride + id];
|
| 902 |
+
vals_arr[i] *= gamma_reg;
|
| 903 |
+
vals_hat_arr[i] =
|
| 904 |
+
(invertible
|
| 905 |
+
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
|
| 906 |
+
gamma_reg
|
| 907 |
+
: vals_hat_h[i * iteration_stride + id]);
|
| 908 |
+
}
|
| 909 |
+
if ((high_index) < row_stride) {
|
| 910 |
+
__half2 gamma_reg = gamma_h[high_index];
|
| 911 |
+
vals_arr[iterations] = out_grad_h[high_index];
|
| 912 |
+
vals_arr[iterations] *= gamma_reg;
|
| 913 |
+
vals_hat_arr[iterations] =
|
| 914 |
+
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
|
| 915 |
+
: vals_hat_h[high_index]);
|
| 916 |
+
iterations++;
|
| 917 |
+
}
|
| 918 |
+
__half var_h = vars[row];
|
| 919 |
+
__half2 var_reg = __halves2half2(var_h, var_h);
|
| 920 |
+
|
| 921 |
+
float sum = 0.f;
|
| 922 |
+
for (int i = 0; i < iterations; i++) {
|
| 923 |
+
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
|
| 924 |
+
float2 result_f = __half22float2(result_h);
|
| 925 |
+
sum += result_f.x;
|
| 926 |
+
sum += result_f.y;
|
| 927 |
+
vals_arr[i] *= h2rsqrt(var_reg);
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 931 |
+
|
| 932 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 933 |
+
|
| 934 |
+
__syncthreads();
|
| 935 |
+
|
| 936 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 937 |
+
|
| 938 |
+
#ifndef __STOCHASTIC_MODE__
|
| 939 |
+
__syncthreads();
|
| 940 |
+
#endif
|
| 941 |
+
|
| 942 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 943 |
+
|
| 944 |
+
sum = g.shfl(sum, 0);
|
| 945 |
+
sum /= (2 * row_stride);
|
| 946 |
+
__half2 sum_h = __float2half2_rn(sum);
|
| 947 |
+
|
| 948 |
+
for (int i = 0; i < iterations; i++) {
|
| 949 |
+
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
|
| 950 |
+
vals_arr_f[i] = __half22float2(vals_arr[i]);
|
| 951 |
+
float2 temp_f = __half22float2(temp);
|
| 952 |
+
vals_arr_f[i].x += temp_f.x;
|
| 953 |
+
vals_arr_f[i].y += temp_f.y;
|
| 954 |
+
}
|
| 955 |
+
sum = 0.f;
|
| 956 |
+
|
| 957 |
+
for (int i = 0; i < iterations; i++) {
|
| 958 |
+
sum += (vals_arr_f[i].x);
|
| 959 |
+
sum += (vals_arr_f[i].y);
|
| 960 |
+
}
|
| 961 |
+
|
| 962 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 963 |
+
|
| 964 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 965 |
+
|
| 966 |
+
__syncthreads();
|
| 967 |
+
|
| 968 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 969 |
+
|
| 970 |
+
#ifndef __STOCHASTIC_MODE__
|
| 971 |
+
__syncthreads();
|
| 972 |
+
#endif
|
| 973 |
+
|
| 974 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 975 |
+
|
| 976 |
+
sum = g.shfl(sum, 0);
|
| 977 |
+
sum /= (2 * row_stride);
|
| 978 |
+
|
| 979 |
+
iterations = row_stride / iteration_stride;
|
| 980 |
+
for (int i = 0; i < iterations; i++) {
|
| 981 |
+
vals_arr_f[i].x -= sum;
|
| 982 |
+
vals_arr_f[i].y -= sum;
|
| 983 |
+
__half2 temp = __float22half2_rn(vals_arr_f[i]);
|
| 984 |
+
|
| 985 |
+
inp_grad_h[i * iteration_stride + id] = temp;
|
| 986 |
+
}
|
| 987 |
+
if ((high_index) < row_stride) {
|
| 988 |
+
vals_arr_f[iterations].x -= sum;
|
| 989 |
+
vals_arr_f[iterations].y -= sum;
|
| 990 |
+
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
|
| 991 |
+
|
| 992 |
+
inp_grad_h[high_index] = temp;
|
| 993 |
+
}
|
| 994 |
+
#endif
|
| 995 |
+
}
|
| 996 |
+
|
| 997 |
+
template <>
|
| 998 |
+
void launch_layerNorm_backward<float>(const float* out_grad,
|
| 999 |
+
const float* vals_hat,
|
| 1000 |
+
const float* vars,
|
| 1001 |
+
const float* gamma,
|
| 1002 |
+
float* gamma_grad,
|
| 1003 |
+
float* betta_grad,
|
| 1004 |
+
float* inp_grad,
|
| 1005 |
+
int batch,
|
| 1006 |
+
int hidden_dim,
|
| 1007 |
+
cudaStream_t stream[2],
|
| 1008 |
+
bool invertible,
|
| 1009 |
+
const float* betta)
|
| 1010 |
+
{
|
| 1011 |
+
int threads = THREADS;
|
| 1012 |
+
|
| 1013 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1014 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1015 |
+
|
| 1016 |
+
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1017 |
+
out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
|
| 1018 |
+
|
| 1019 |
+
dim3 grid_dim2(batch);
|
| 1020 |
+
|
| 1021 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1022 |
+
threads <<= 1;
|
| 1023 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1024 |
+
threads <<= 2;
|
| 1025 |
+
else if (hidden_dim > 65536)
|
| 1026 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1027 |
+
|
| 1028 |
+
dim3 block_dim2(threads);
|
| 1029 |
+
|
| 1030 |
+
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1031 |
+
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
|
| 1032 |
+
}
|
| 1033 |
+
|
| 1034 |
+
template <>
|
| 1035 |
+
void launch_layerNorm_backward<__half>(const __half* out_grad,
|
| 1036 |
+
const __half* vals_hat,
|
| 1037 |
+
const __half* vars,
|
| 1038 |
+
const __half* gamma,
|
| 1039 |
+
__half* gamma_grad,
|
| 1040 |
+
__half* betta_grad,
|
| 1041 |
+
__half* inp_grad,
|
| 1042 |
+
int batch,
|
| 1043 |
+
int hidden_dim,
|
| 1044 |
+
cudaStream_t stream[2],
|
| 1045 |
+
bool invertible,
|
| 1046 |
+
const __half* betta)
|
| 1047 |
+
{
|
| 1048 |
+
int threads = THREADS;
|
| 1049 |
+
|
| 1050 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1051 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1052 |
+
|
| 1053 |
+
// LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1054 |
+
// out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
|
| 1055 |
+
|
| 1056 |
+
dim3 grid_dim2(batch);
|
| 1057 |
+
|
| 1058 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 1059 |
+
threads <<= 1;
|
| 1060 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1061 |
+
threads <<= 2;
|
| 1062 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1063 |
+
threads <<= 3;
|
| 1064 |
+
else if (hidden_dim > 65536)
|
| 1065 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1066 |
+
|
| 1067 |
+
dim3 block_dim2(threads / 2);
|
| 1068 |
+
|
| 1069 |
+
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1070 |
+
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
|
| 1071 |
+
}
|
| 1072 |
+
|
| 1073 |
+
/* Backward Normalize (Input-Gradient)
|
| 1074 |
+
* Using the means and variances from the input
|
| 1075 |
+
* This type of backward is not invertible!
|
| 1076 |
+
* We do the backward using the input (X)
|
| 1077 |
+
*/
|
| 1078 |
+
|
| 1079 |
+
__global__ void LayerNormBackward2(const float* out_grad,
|
| 1080 |
+
const float* X_vals,
|
| 1081 |
+
const float* gamma,
|
| 1082 |
+
const float* vars,
|
| 1083 |
+
const float* means,
|
| 1084 |
+
float* inp_grad,
|
| 1085 |
+
int row_stride)
|
| 1086 |
+
{
|
| 1087 |
+
int iteration_stride = blockDim.x;
|
| 1088 |
+
int iterations = row_stride / iteration_stride;
|
| 1089 |
+
|
| 1090 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1091 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1092 |
+
|
| 1093 |
+
int row = blockIdx.x;
|
| 1094 |
+
int id = threadIdx.x;
|
| 1095 |
+
int wid = id >> WARP_SIZE_BITS;
|
| 1096 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1097 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1098 |
+
|
| 1099 |
+
out_grad += (row * row_stride);
|
| 1100 |
+
X_vals += (row * row_stride);
|
| 1101 |
+
inp_grad += (row * row_stride);
|
| 1102 |
+
|
| 1103 |
+
float vals_arr[NORM_REG];
|
| 1104 |
+
int high_index = iterations * iteration_stride + id;
|
| 1105 |
+
#pragma unroll
|
| 1106 |
+
for (int i = 0; i < iterations; i++) {
|
| 1107 |
+
float gamma_reg = gamma[i * iteration_stride + id];
|
| 1108 |
+
vals_arr[i] = out_grad[i * iteration_stride + id];
|
| 1109 |
+
vals_arr[i] *= gamma_reg;
|
| 1110 |
+
}
|
| 1111 |
+
if ((high_index) < row_stride) {
|
| 1112 |
+
float gamma_reg = gamma[high_index];
|
| 1113 |
+
vals_arr[iterations] = out_grad[high_index];
|
| 1114 |
+
vals_arr[iterations] *= gamma_reg;
|
| 1115 |
+
iterations++;
|
| 1116 |
+
}
|
| 1117 |
+
|
| 1118 |
+
float var_reg = vars[row];
|
| 1119 |
+
float mean_reg = means[row];
|
| 1120 |
+
|
| 1121 |
+
float sum = 0;
|
| 1122 |
+
float xu[NORM_REG];
|
| 1123 |
+
for (int i = 0; i < iterations; i++) {
|
| 1124 |
+
xu[i] = (X_vals[i * iteration_stride + id] - mean_reg);
|
| 1125 |
+
sum += vals_arr[i] * xu[i];
|
| 1126 |
+
vals_arr[i] *= rsqrtf(var_reg);
|
| 1127 |
+
}
|
| 1128 |
+
|
| 1129 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1130 |
+
|
| 1131 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1132 |
+
|
| 1133 |
+
__syncthreads();
|
| 1134 |
+
|
| 1135 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1136 |
+
|
| 1137 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1138 |
+
__syncthreads();
|
| 1139 |
+
#endif
|
| 1140 |
+
|
| 1141 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1142 |
+
|
| 1143 |
+
sum = g.shfl(sum, 0);
|
| 1144 |
+
sum /= row_stride;
|
| 1145 |
+
|
| 1146 |
+
for (int i = 0; i < iterations; i++) {
|
| 1147 |
+
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
sum = 0;
|
| 1151 |
+
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
|
| 1152 |
+
|
| 1153 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1154 |
+
|
| 1155 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1156 |
+
|
| 1157 |
+
__syncthreads();
|
| 1158 |
+
|
| 1159 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1160 |
+
|
| 1161 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1162 |
+
__syncthreads();
|
| 1163 |
+
#endif
|
| 1164 |
+
|
| 1165 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1166 |
+
sum = g.shfl(sum, 0);
|
| 1167 |
+
sum /= row_stride;
|
| 1168 |
+
|
| 1169 |
+
iterations = row_stride / iteration_stride;
|
| 1170 |
+
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
|
| 1171 |
+
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
|
| 1172 |
+
}
|
| 1173 |
+
|
| 1174 |
+
__global__ void LayerNormBackward2(const __half* out_grad,
|
| 1175 |
+
const __half* X_vals,
|
| 1176 |
+
const __half* gamma,
|
| 1177 |
+
const __half* vars,
|
| 1178 |
+
const __half* means,
|
| 1179 |
+
__half* inp_grad,
|
| 1180 |
+
int row_stride)
|
| 1181 |
+
{
|
| 1182 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 1183 |
+
int iteration_stride = blockDim.x;
|
| 1184 |
+
int iterations = row_stride / iteration_stride;
|
| 1185 |
+
|
| 1186 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1187 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1188 |
+
|
| 1189 |
+
int row = blockIdx.x;
|
| 1190 |
+
int id = threadIdx.x;
|
| 1191 |
+
int wid = id >> WARP_SIZE_BITS;
|
| 1192 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1193 |
+
|
| 1194 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1195 |
+
|
| 1196 |
+
__half2 vals_arr[NORM_REG];
|
| 1197 |
+
float2 vals_arr_f[NORM_REG];
|
| 1198 |
+
__half2 xu[NORM_REG];
|
| 1199 |
+
|
| 1200 |
+
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
|
| 1201 |
+
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
|
| 1202 |
+
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
|
| 1203 |
+
|
| 1204 |
+
inp_grad_h += (row * row_stride);
|
| 1205 |
+
out_grad_h += (row * row_stride);
|
| 1206 |
+
vals_hat_h += (row * row_stride);
|
| 1207 |
+
|
| 1208 |
+
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
|
| 1209 |
+
int high_index = iterations * iteration_stride + id;
|
| 1210 |
+
|
| 1211 |
+
__half mean_h = means[row];
|
| 1212 |
+
__half2 mean_reg = __halves2half2(mean_h, mean_h);
|
| 1213 |
+
#pragma unroll
|
| 1214 |
+
for (int i = 0; i < iterations; i++) {
|
| 1215 |
+
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
|
| 1216 |
+
vals_arr[i] = out_grad_h[i * iteration_stride + id];
|
| 1217 |
+
vals_arr[i] *= gamma_reg; // out_grad * gamma
|
| 1218 |
+
xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg);
|
| 1219 |
+
}
|
| 1220 |
+
if ((high_index) < row_stride) {
|
| 1221 |
+
__half2 gamma_reg = gamma_h[high_index];
|
| 1222 |
+
vals_arr[iterations] = out_grad_h[high_index];
|
| 1223 |
+
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
|
| 1224 |
+
xu[iterations] = (vals_hat_h[high_index] - mean_reg);
|
| 1225 |
+
iterations++;
|
| 1226 |
+
}
|
| 1227 |
+
__half var_h = vars[row];
|
| 1228 |
+
__half2 var_reg = __halves2half2(var_h, var_h);
|
| 1229 |
+
|
| 1230 |
+
float sum = 0.f;
|
| 1231 |
+
for (int i = 0; i < iterations; i++) {
|
| 1232 |
+
__half2 result_h = (xu[i] * vals_arr[i]);
|
| 1233 |
+
float2 result_f = __half22float2(result_h);
|
| 1234 |
+
sum += result_f.x;
|
| 1235 |
+
sum += result_f.y;
|
| 1236 |
+
vals_arr[i] *= h2rsqrt(var_reg);
|
| 1237 |
+
}
|
| 1238 |
+
|
| 1239 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1240 |
+
|
| 1241 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1242 |
+
|
| 1243 |
+
__syncthreads();
|
| 1244 |
+
|
| 1245 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1246 |
+
|
| 1247 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1248 |
+
__syncthreads();
|
| 1249 |
+
#endif
|
| 1250 |
+
|
| 1251 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1252 |
+
|
| 1253 |
+
sum = g.shfl(sum, 0);
|
| 1254 |
+
sum /= (2 * row_stride);
|
| 1255 |
+
__half2 sum_h = __float2half2_rn(sum);
|
| 1256 |
+
|
| 1257 |
+
for (int i = 0; i < iterations; i++) {
|
| 1258 |
+
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
|
| 1259 |
+
vals_arr_f[i] = __half22float2(vals_arr[i]);
|
| 1260 |
+
float2 xu_grad_f = __half22float2(xu_grad);
|
| 1261 |
+
vals_arr_f[i].x += xu_grad_f.x;
|
| 1262 |
+
vals_arr_f[i].y += xu_grad_f.y;
|
| 1263 |
+
}
|
| 1264 |
+
|
| 1265 |
+
sum = 0.f;
|
| 1266 |
+
for (int i = 0; i < iterations; i++) {
|
| 1267 |
+
sum += (vals_arr_f[i].x);
|
| 1268 |
+
sum += (vals_arr_f[i].y);
|
| 1269 |
+
}
|
| 1270 |
+
|
| 1271 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1272 |
+
|
| 1273 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1274 |
+
|
| 1275 |
+
__syncthreads();
|
| 1276 |
+
|
| 1277 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1278 |
+
|
| 1279 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1280 |
+
__syncthreads();
|
| 1281 |
+
#endif
|
| 1282 |
+
|
| 1283 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1284 |
+
|
| 1285 |
+
sum = g.shfl(sum, 0);
|
| 1286 |
+
sum /= (2 * row_stride);
|
| 1287 |
+
|
| 1288 |
+
iterations = row_stride / iteration_stride;
|
| 1289 |
+
for (int i = 0; i < iterations; i++) {
|
| 1290 |
+
vals_arr_f[i].x -= sum;
|
| 1291 |
+
vals_arr_f[i].y -= sum;
|
| 1292 |
+
__half2 temp = __float22half2_rn(vals_arr_f[i]);
|
| 1293 |
+
inp_grad_h[i * iteration_stride + id] = temp;
|
| 1294 |
+
}
|
| 1295 |
+
if ((high_index) < row_stride) {
|
| 1296 |
+
vals_arr_f[iterations].x -= sum;
|
| 1297 |
+
vals_arr_f[iterations].y -= sum;
|
| 1298 |
+
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
|
| 1299 |
+
inp_grad_h[high_index] = temp;
|
| 1300 |
+
}
|
| 1301 |
+
#endif
|
| 1302 |
+
}
|
| 1303 |
+
|
| 1304 |
+
template <>
|
| 1305 |
+
void launch_layerNorm_backward<float>(const float* out_grad,
|
| 1306 |
+
const float* X_data,
|
| 1307 |
+
const float* vars,
|
| 1308 |
+
const float* means,
|
| 1309 |
+
const float* gamma,
|
| 1310 |
+
float* gamma_grad,
|
| 1311 |
+
float* betta_grad,
|
| 1312 |
+
float* inp_grad,
|
| 1313 |
+
int batch,
|
| 1314 |
+
int hidden_dim,
|
| 1315 |
+
cudaStream_t stream[2])
|
| 1316 |
+
{
|
| 1317 |
+
int threads = THREADS;
|
| 1318 |
+
|
| 1319 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1320 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1321 |
+
|
| 1322 |
+
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1323 |
+
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
|
| 1324 |
+
|
| 1325 |
+
dim3 grid_dim2(batch);
|
| 1326 |
+
|
| 1327 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1328 |
+
threads <<= 1;
|
| 1329 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1330 |
+
threads <<= 2;
|
| 1331 |
+
else if (hidden_dim > 65536)
|
| 1332 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1333 |
+
|
| 1334 |
+
dim3 block_dim2(threads);
|
| 1335 |
+
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1336 |
+
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim);
|
| 1337 |
+
}
|
| 1338 |
+
|
| 1339 |
+
template <>
|
| 1340 |
+
void launch_layerNorm_backward<__half>(const __half* out_grad,
|
| 1341 |
+
const __half* X_data,
|
| 1342 |
+
const __half* vars,
|
| 1343 |
+
const __half* means,
|
| 1344 |
+
const __half* gamma,
|
| 1345 |
+
__half* gamma_grad,
|
| 1346 |
+
__half* betta_grad,
|
| 1347 |
+
__half* inp_grad,
|
| 1348 |
+
int batch,
|
| 1349 |
+
int hidden_dim,
|
| 1350 |
+
cudaStream_t stream[2])
|
| 1351 |
+
{
|
| 1352 |
+
int threads = THREADS;
|
| 1353 |
+
|
| 1354 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1355 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1356 |
+
|
| 1357 |
+
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1358 |
+
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
|
| 1359 |
+
|
| 1360 |
+
dim3 grid_dim2(batch);
|
| 1361 |
+
|
| 1362 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 1363 |
+
threads <<= 1;
|
| 1364 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1365 |
+
threads <<= 2;
|
| 1366 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1367 |
+
threads <<= 3;
|
| 1368 |
+
else if (hidden_dim > 65536)
|
| 1369 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1370 |
+
|
| 1371 |
+
dim3 block_dim2(threads / 2);
|
| 1372 |
+
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1373 |
+
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
|
| 1374 |
+
}
|
| 1375 |
+
|
| 1376 |
+
template <typename T>
|
| 1377 |
+
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
|
| 1378 |
+
const T* __restrict__ out_grad2,
|
| 1379 |
+
const T* __restrict__ vals_hat,
|
| 1380 |
+
const T* __restrict__ gamma,
|
| 1381 |
+
const T* __restrict__ betta,
|
| 1382 |
+
T* __restrict__ gamma_grad,
|
| 1383 |
+
T* __restrict__ betta_grad,
|
| 1384 |
+
int rows,
|
| 1385 |
+
int width,
|
| 1386 |
+
bool invertible)
|
| 1387 |
+
{
|
| 1388 |
+
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
|
| 1389 |
+
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
|
| 1390 |
+
|
| 1391 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1392 |
+
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
|
| 1393 |
+
|
| 1394 |
+
int idx = blockDim.x * blockIdx.x + threadIdx.x;
|
| 1395 |
+
int offset = threadIdx.y * width + idx;
|
| 1396 |
+
int y_stride = width * TILE_DIM;
|
| 1397 |
+
|
| 1398 |
+
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
|
| 1399 |
+
float gamma_reg = (float)gamma[idx];
|
| 1400 |
+
|
| 1401 |
+
// Loop across matrix height
|
| 1402 |
+
float betta_tmp = 0;
|
| 1403 |
+
float gamma_tmp = 0;
|
| 1404 |
+
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
|
| 1405 |
+
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
|
| 1406 |
+
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
|
| 1407 |
+
: (float)vals_hat[offset]);
|
| 1408 |
+
betta_tmp += grad;
|
| 1409 |
+
gamma_tmp += (val * grad);
|
| 1410 |
+
|
| 1411 |
+
offset += y_stride;
|
| 1412 |
+
}
|
| 1413 |
+
|
| 1414 |
+
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
|
| 1415 |
+
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
|
| 1416 |
+
|
| 1417 |
+
__syncthreads();
|
| 1418 |
+
|
| 1419 |
+
// Sum the shared buffer.
|
| 1420 |
+
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
|
| 1421 |
+
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
|
| 1422 |
+
|
| 1423 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1424 |
+
__syncthreads();
|
| 1425 |
+
#endif
|
| 1426 |
+
|
| 1427 |
+
for (int i = 1; i < TILE_DIM; i <<= 1) {
|
| 1428 |
+
s1 += g.shfl_down(s1, i);
|
| 1429 |
+
s2 += g.shfl_down(s2, i);
|
| 1430 |
+
}
|
| 1431 |
+
|
| 1432 |
+
if (threadIdx.x == 0) {
|
| 1433 |
+
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
|
| 1434 |
+
betta_grad[pos] = s1;
|
| 1435 |
+
gamma_grad[pos] = s2;
|
| 1436 |
+
}
|
| 1437 |
+
}
|
| 1438 |
+
|
| 1439 |
+
template <typename T>
|
| 1440 |
+
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
|
| 1441 |
+
const T* __restrict__ out_grad2,
|
| 1442 |
+
const T* __restrict__ X_data,
|
| 1443 |
+
const T* __restrict__ vars,
|
| 1444 |
+
const T* __restrict__ means,
|
| 1445 |
+
T* __restrict__ gamma_grad,
|
| 1446 |
+
T* __restrict__ betta_grad,
|
| 1447 |
+
int rows,
|
| 1448 |
+
int width)
|
| 1449 |
+
{
|
| 1450 |
+
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
|
| 1451 |
+
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
|
| 1452 |
+
|
| 1453 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1454 |
+
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
|
| 1455 |
+
|
| 1456 |
+
int idx = blockDim.x * blockIdx.x + threadIdx.x;
|
| 1457 |
+
int offset = threadIdx.y * width + idx;
|
| 1458 |
+
int y_stride = width * TILE_DIM;
|
| 1459 |
+
|
| 1460 |
+
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
|
| 1461 |
+
// Loop across matrix height
|
| 1462 |
+
|
| 1463 |
+
float betta_tmp = 0;
|
| 1464 |
+
float gamma_tmp = 0;
|
| 1465 |
+
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
|
| 1466 |
+
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
|
| 1467 |
+
float val = (float)X_data[offset];
|
| 1468 |
+
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
|
| 1469 |
+
betta_tmp += grad;
|
| 1470 |
+
gamma_tmp += (val * grad);
|
| 1471 |
+
|
| 1472 |
+
offset += y_stride;
|
| 1473 |
+
}
|
| 1474 |
+
|
| 1475 |
+
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
|
| 1476 |
+
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
|
| 1477 |
+
|
| 1478 |
+
__syncthreads();
|
| 1479 |
+
|
| 1480 |
+
// Sum the shared buffer.
|
| 1481 |
+
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
|
| 1482 |
+
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
|
| 1483 |
+
|
| 1484 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1485 |
+
__syncthreads();
|
| 1486 |
+
#endif
|
| 1487 |
+
|
| 1488 |
+
for (int i = 1; i < TILE_DIM; i <<= 1) {
|
| 1489 |
+
s1 += g.shfl_down(s1, i);
|
| 1490 |
+
s2 += g.shfl_down(s2, i);
|
| 1491 |
+
}
|
| 1492 |
+
|
| 1493 |
+
if (threadIdx.x == 0) {
|
| 1494 |
+
betta_grad[pos] = s1;
|
| 1495 |
+
gamma_grad[pos] = s2;
|
| 1496 |
+
}
|
| 1497 |
+
}
|
| 1498 |
+
|
| 1499 |
+
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
|
| 1500 |
+
const float* out_grad2,
|
| 1501 |
+
const float* vals_hat,
|
| 1502 |
+
const float* gamma,
|
| 1503 |
+
const float* betta,
|
| 1504 |
+
const float* vars,
|
| 1505 |
+
float* inp_grad,
|
| 1506 |
+
bool invertible,
|
| 1507 |
+
int row_stride)
|
| 1508 |
+
{
|
| 1509 |
+
int iteration_stride = blockDim.x;
|
| 1510 |
+
int iterations = row_stride / iteration_stride;
|
| 1511 |
+
|
| 1512 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1513 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1514 |
+
|
| 1515 |
+
int row = blockIdx.x;
|
| 1516 |
+
int id = threadIdx.x;
|
| 1517 |
+
int wid = id / WARP_SIZE;
|
| 1518 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1519 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1520 |
+
|
| 1521 |
+
out_grad1 += (row * row_stride);
|
| 1522 |
+
out_grad2 += (row * row_stride);
|
| 1523 |
+
vals_hat += (row * row_stride);
|
| 1524 |
+
inp_grad += (row * row_stride);
|
| 1525 |
+
|
| 1526 |
+
float vals_arr[NORM_REG];
|
| 1527 |
+
float vals_hat_arr[NORM_REG];
|
| 1528 |
+
int high_index = iterations * iteration_stride + id;
|
| 1529 |
+
#pragma unroll
|
| 1530 |
+
for (int i = 0; i < iterations; i++) {
|
| 1531 |
+
float gamma_reg = gamma[i * iteration_stride + id];
|
| 1532 |
+
vals_arr[i] = out_grad1[i * iteration_stride + id];
|
| 1533 |
+
vals_arr[i] *= gamma_reg;
|
| 1534 |
+
vals_hat_arr[i] =
|
| 1535 |
+
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
|
| 1536 |
+
gamma_reg
|
| 1537 |
+
: vals_hat[i * iteration_stride + id]);
|
| 1538 |
+
}
|
| 1539 |
+
if ((high_index) < row_stride) {
|
| 1540 |
+
float gamma_reg = gamma[high_index];
|
| 1541 |
+
vals_arr[iterations] = out_grad1[high_index];
|
| 1542 |
+
vals_arr[iterations] *= gamma_reg;
|
| 1543 |
+
vals_hat_arr[iterations] =
|
| 1544 |
+
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
|
| 1545 |
+
: vals_hat[high_index]);
|
| 1546 |
+
iterations++;
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
float var_reg = vars[row];
|
| 1550 |
+
|
| 1551 |
+
float sum = 0;
|
| 1552 |
+
for (int i = 0; i < iterations; i++) {
|
| 1553 |
+
sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg);
|
| 1554 |
+
vals_arr[i] *= rsqrtf(var_reg);
|
| 1555 |
+
}
|
| 1556 |
+
|
| 1557 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1558 |
+
|
| 1559 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1560 |
+
|
| 1561 |
+
__syncthreads();
|
| 1562 |
+
|
| 1563 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1564 |
+
|
| 1565 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1566 |
+
__syncthreads();
|
| 1567 |
+
#endif
|
| 1568 |
+
|
| 1569 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1570 |
+
|
| 1571 |
+
sum = g.shfl(sum, 0);
|
| 1572 |
+
sum /= row_stride;
|
| 1573 |
+
|
| 1574 |
+
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
|
| 1575 |
+
|
| 1576 |
+
sum = 0;
|
| 1577 |
+
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
|
| 1578 |
+
|
| 1579 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1580 |
+
|
| 1581 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1582 |
+
|
| 1583 |
+
__syncthreads();
|
| 1584 |
+
|
| 1585 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1586 |
+
|
| 1587 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1588 |
+
__syncthreads();
|
| 1589 |
+
#endif
|
| 1590 |
+
|
| 1591 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1592 |
+
sum = g.shfl(sum, 0);
|
| 1593 |
+
sum /= row_stride;
|
| 1594 |
+
|
| 1595 |
+
iterations = row_stride / iteration_stride;
|
| 1596 |
+
for (int i = 0; i < iterations; i++)
|
| 1597 |
+
inp_grad[i * iteration_stride + id] =
|
| 1598 |
+
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
|
| 1599 |
+
if ((high_index) < row_stride)
|
| 1600 |
+
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
|
| 1601 |
+
}
|
| 1602 |
+
|
| 1603 |
+
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
|
| 1604 |
+
const __half* out_grad2,
|
| 1605 |
+
const __half* vals_hat,
|
| 1606 |
+
const __half* gamma,
|
| 1607 |
+
const __half* betta,
|
| 1608 |
+
const __half* vars,
|
| 1609 |
+
__half* inp_grad,
|
| 1610 |
+
bool invertible,
|
| 1611 |
+
int row_stride)
|
| 1612 |
+
{
|
| 1613 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 1614 |
+
int iteration_stride = blockDim.x;
|
| 1615 |
+
int iterations = row_stride / iteration_stride;
|
| 1616 |
+
|
| 1617 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1618 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1619 |
+
|
| 1620 |
+
int row = blockIdx.x;
|
| 1621 |
+
int id = threadIdx.x;
|
| 1622 |
+
int wid = id / WARP_SIZE;
|
| 1623 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1624 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1625 |
+
|
| 1626 |
+
__half2 vals_arr[NORM_REG];
|
| 1627 |
+
float2 vals_arr_f[NORM_REG];
|
| 1628 |
+
__half2 vals_hat_arr[NORM_REG];
|
| 1629 |
+
|
| 1630 |
+
// float2 result[iterations];
|
| 1631 |
+
|
| 1632 |
+
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
|
| 1633 |
+
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
|
| 1634 |
+
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
|
| 1635 |
+
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
|
| 1636 |
+
|
| 1637 |
+
inp_grad_h += (row * row_stride);
|
| 1638 |
+
out_grad_h1 += (row * row_stride);
|
| 1639 |
+
out_grad_h2 += (row * row_stride);
|
| 1640 |
+
vals_hat_h += (row * row_stride);
|
| 1641 |
+
|
| 1642 |
+
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
|
| 1643 |
+
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
|
| 1644 |
+
int high_index = iterations * iteration_stride + id;
|
| 1645 |
+
#pragma unroll
|
| 1646 |
+
for (int i = 0; i < iterations; i++) {
|
| 1647 |
+
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
|
| 1648 |
+
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
|
| 1649 |
+
vals_arr[i] *= gamma_reg; // out_grad * gamma
|
| 1650 |
+
vals_hat_arr[i] =
|
| 1651 |
+
(invertible
|
| 1652 |
+
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
|
| 1653 |
+
gamma_reg
|
| 1654 |
+
: vals_hat_h[i * iteration_stride + id]);
|
| 1655 |
+
}
|
| 1656 |
+
if ((high_index) < row_stride) {
|
| 1657 |
+
__half2 gamma_reg = gamma_h[high_index];
|
| 1658 |
+
vals_arr[iterations] = out_grad_h1[high_index];
|
| 1659 |
+
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
|
| 1660 |
+
vals_hat_arr[iterations] =
|
| 1661 |
+
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
|
| 1662 |
+
: vals_hat_h[high_index]);
|
| 1663 |
+
iterations++;
|
| 1664 |
+
}
|
| 1665 |
+
__half var_h = vars[row];
|
| 1666 |
+
__half2 var_reg = __halves2half2(var_h, var_h);
|
| 1667 |
+
|
| 1668 |
+
float sum = 0.f;
|
| 1669 |
+
for (int i = 0; i < iterations; i++) {
|
| 1670 |
+
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
|
| 1671 |
+
float2 result_f = __half22float2(result_h);
|
| 1672 |
+
sum += result_f.x;
|
| 1673 |
+
sum += result_f.y;
|
| 1674 |
+
vals_arr[i] *= h2rsqrt(var_reg);
|
| 1675 |
+
}
|
| 1676 |
+
|
| 1677 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1678 |
+
|
| 1679 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1680 |
+
|
| 1681 |
+
__syncthreads();
|
| 1682 |
+
|
| 1683 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1684 |
+
|
| 1685 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1686 |
+
__syncthreads();
|
| 1687 |
+
#endif
|
| 1688 |
+
|
| 1689 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1690 |
+
|
| 1691 |
+
sum = g.shfl(sum, 0);
|
| 1692 |
+
sum /= (2 * row_stride);
|
| 1693 |
+
__half2 sum_h = __float2half2_rn(sum);
|
| 1694 |
+
|
| 1695 |
+
for (int i = 0; i < iterations; i++) {
|
| 1696 |
+
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
|
| 1697 |
+
vals_arr_f[i] = __half22float2(vals_arr[i]);
|
| 1698 |
+
float2 temp_f = __half22float2(temp);
|
| 1699 |
+
vals_arr_f[i].x += temp_f.x;
|
| 1700 |
+
vals_arr_f[i].y += temp_f.y;
|
| 1701 |
+
}
|
| 1702 |
+
sum = 0.f;
|
| 1703 |
+
for (int i = 0; i < iterations; i++) {
|
| 1704 |
+
sum += (vals_arr_f[i].x);
|
| 1705 |
+
sum += (vals_arr_f[i].y);
|
| 1706 |
+
}
|
| 1707 |
+
|
| 1708 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1709 |
+
|
| 1710 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1711 |
+
|
| 1712 |
+
__syncthreads();
|
| 1713 |
+
|
| 1714 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1715 |
+
|
| 1716 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1717 |
+
__syncthreads();
|
| 1718 |
+
#endif
|
| 1719 |
+
|
| 1720 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1721 |
+
|
| 1722 |
+
sum = g.shfl(sum, 0);
|
| 1723 |
+
sum /= (2 * row_stride);
|
| 1724 |
+
|
| 1725 |
+
iterations = row_stride / iteration_stride;
|
| 1726 |
+
for (int i = 0; i < iterations; i++) {
|
| 1727 |
+
vals_arr_f[i].x -= sum;
|
| 1728 |
+
vals_arr_f[i].y -= sum;
|
| 1729 |
+
__half2 temp = __float22half2_rn(vals_arr_f[i]);
|
| 1730 |
+
|
| 1731 |
+
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
|
| 1732 |
+
}
|
| 1733 |
+
if ((high_index) < row_stride) {
|
| 1734 |
+
vals_arr_f[iterations].x -= sum;
|
| 1735 |
+
vals_arr_f[iterations].y -= sum;
|
| 1736 |
+
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
|
| 1737 |
+
|
| 1738 |
+
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
|
| 1739 |
+
}
|
| 1740 |
+
#endif
|
| 1741 |
+
}
|
| 1742 |
+
|
| 1743 |
+
template <>
|
| 1744 |
+
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
|
| 1745 |
+
const float* out_grad2,
|
| 1746 |
+
const float* vals_hat,
|
| 1747 |
+
const float* vars,
|
| 1748 |
+
const float* gamma,
|
| 1749 |
+
float* gamma_grad,
|
| 1750 |
+
float* betta_grad,
|
| 1751 |
+
float* inp_grad,
|
| 1752 |
+
int batch,
|
| 1753 |
+
int hidden_dim,
|
| 1754 |
+
cudaStream_t stream[2],
|
| 1755 |
+
bool invertible,
|
| 1756 |
+
const float* betta)
|
| 1757 |
+
{
|
| 1758 |
+
int threads = THREADS;
|
| 1759 |
+
|
| 1760 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1761 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1762 |
+
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1763 |
+
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
|
| 1764 |
+
|
| 1765 |
+
dim3 grid_dim2(batch);
|
| 1766 |
+
|
| 1767 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1768 |
+
threads <<= 1;
|
| 1769 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1770 |
+
threads <<= 2;
|
| 1771 |
+
else if (hidden_dim > 65536)
|
| 1772 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1773 |
+
|
| 1774 |
+
dim3 block_dim2(threads);
|
| 1775 |
+
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1776 |
+
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
|
| 1777 |
+
}
|
| 1778 |
+
|
| 1779 |
+
template <>
|
| 1780 |
+
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
|
| 1781 |
+
const __half* out_grad2,
|
| 1782 |
+
const __half* vals_hat,
|
| 1783 |
+
const __half* vars,
|
| 1784 |
+
const __half* gamma,
|
| 1785 |
+
__half* gamma_grad,
|
| 1786 |
+
__half* betta_grad,
|
| 1787 |
+
__half* inp_grad,
|
| 1788 |
+
int batch,
|
| 1789 |
+
int hidden_dim,
|
| 1790 |
+
cudaStream_t stream[2],
|
| 1791 |
+
bool invertible,
|
| 1792 |
+
const __half* betta)
|
| 1793 |
+
{
|
| 1794 |
+
int threads = THREADS;
|
| 1795 |
+
|
| 1796 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 1797 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 1798 |
+
|
| 1799 |
+
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 1800 |
+
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
|
| 1801 |
+
|
| 1802 |
+
dim3 grid_dim2(batch);
|
| 1803 |
+
|
| 1804 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 1805 |
+
threads <<= 1;
|
| 1806 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 1807 |
+
threads <<= 2;
|
| 1808 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 1809 |
+
threads <<= 3;
|
| 1810 |
+
else if (hidden_dim > 65536)
|
| 1811 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 1812 |
+
|
| 1813 |
+
dim3 block_dim2(threads / 2);
|
| 1814 |
+
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 1815 |
+
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
|
| 1816 |
+
}
|
| 1817 |
+
|
| 1818 |
+
/* Backward Normalize (Input-Gradient)
|
| 1819 |
+
* Using the means and variances from the input
|
| 1820 |
+
* This type of backward is not invertible!
|
| 1821 |
+
* We do the backward using the input (X)
|
| 1822 |
+
*/
|
| 1823 |
+
|
| 1824 |
+
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
|
| 1825 |
+
const float* out_grad2,
|
| 1826 |
+
const float* X_vals,
|
| 1827 |
+
const float* gamma,
|
| 1828 |
+
const float* vars,
|
| 1829 |
+
const float* means,
|
| 1830 |
+
float* inp_grad,
|
| 1831 |
+
int row_stride)
|
| 1832 |
+
{
|
| 1833 |
+
int iteration_stride = blockDim.x;
|
| 1834 |
+
int iterations = row_stride / iteration_stride;
|
| 1835 |
+
|
| 1836 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1837 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1838 |
+
|
| 1839 |
+
int row = blockIdx.x;
|
| 1840 |
+
int id = threadIdx.x;
|
| 1841 |
+
int wid = id / WARP_SIZE;
|
| 1842 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1843 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1844 |
+
|
| 1845 |
+
float vals_arr[NORM_REG];
|
| 1846 |
+
float vals_hat_arr[NORM_REG];
|
| 1847 |
+
|
| 1848 |
+
out_grad1 += (row * row_stride);
|
| 1849 |
+
out_grad2 += (row * row_stride);
|
| 1850 |
+
X_vals += (row * row_stride);
|
| 1851 |
+
inp_grad += (row * row_stride);
|
| 1852 |
+
int high_index = iterations * iteration_stride + id;
|
| 1853 |
+
#pragma unroll
|
| 1854 |
+
for (int i = 0; i < iterations; i++) {
|
| 1855 |
+
float gamma_reg = gamma[i * iteration_stride + id];
|
| 1856 |
+
vals_arr[i] = out_grad1[i * iteration_stride + id];
|
| 1857 |
+
vals_arr[i] *= gamma_reg;
|
| 1858 |
+
vals_hat_arr[i] = X_vals[i * iteration_stride + id];
|
| 1859 |
+
}
|
| 1860 |
+
if ((high_index) < row_stride) {
|
| 1861 |
+
float gamma_reg = gamma[high_index];
|
| 1862 |
+
vals_arr[iterations] = out_grad1[high_index];
|
| 1863 |
+
vals_arr[iterations] *= gamma_reg;
|
| 1864 |
+
vals_hat_arr[iterations] = X_vals[high_index];
|
| 1865 |
+
iterations++;
|
| 1866 |
+
}
|
| 1867 |
+
|
| 1868 |
+
float var_reg = vars[row];
|
| 1869 |
+
float mean_reg = means[row];
|
| 1870 |
+
|
| 1871 |
+
float sum = 0;
|
| 1872 |
+
float xu[NORM_REG];
|
| 1873 |
+
for (int i = 0; i < iterations; i++) {
|
| 1874 |
+
xu[i] = (vals_hat_arr[i] - mean_reg);
|
| 1875 |
+
sum += vals_arr[i] * xu[i];
|
| 1876 |
+
vals_arr[i] *= rsqrtf(var_reg);
|
| 1877 |
+
}
|
| 1878 |
+
|
| 1879 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1880 |
+
|
| 1881 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1882 |
+
|
| 1883 |
+
__syncthreads();
|
| 1884 |
+
|
| 1885 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1886 |
+
|
| 1887 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1888 |
+
__syncthreads();
|
| 1889 |
+
#endif
|
| 1890 |
+
|
| 1891 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1892 |
+
|
| 1893 |
+
sum = g.shfl(sum, 0);
|
| 1894 |
+
sum /= row_stride;
|
| 1895 |
+
|
| 1896 |
+
for (int i = 0; i < iterations; i++) {
|
| 1897 |
+
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
|
| 1898 |
+
}
|
| 1899 |
+
|
| 1900 |
+
sum = 0;
|
| 1901 |
+
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
|
| 1902 |
+
|
| 1903 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1904 |
+
|
| 1905 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 1906 |
+
|
| 1907 |
+
__syncthreads();
|
| 1908 |
+
|
| 1909 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 1910 |
+
|
| 1911 |
+
#ifndef __STOCHASTIC_MODE__
|
| 1912 |
+
__syncthreads();
|
| 1913 |
+
#endif
|
| 1914 |
+
|
| 1915 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 1916 |
+
sum = g.shfl(sum, 0);
|
| 1917 |
+
sum /= row_stride;
|
| 1918 |
+
|
| 1919 |
+
iterations = row_stride / iteration_stride;
|
| 1920 |
+
for (int i = 0; i < iterations; i++)
|
| 1921 |
+
inp_grad[i * iteration_stride + id] =
|
| 1922 |
+
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
|
| 1923 |
+
if ((high_index) < row_stride)
|
| 1924 |
+
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
|
| 1925 |
+
}
|
| 1926 |
+
|
| 1927 |
+
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
|
| 1928 |
+
const __half* out_grad2,
|
| 1929 |
+
const __half* X_vals,
|
| 1930 |
+
const __half* gamma,
|
| 1931 |
+
const __half* vars,
|
| 1932 |
+
const __half* means,
|
| 1933 |
+
__half* inp_grad,
|
| 1934 |
+
int row_stride)
|
| 1935 |
+
{
|
| 1936 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 1937 |
+
int iteration_stride = blockDim.x;
|
| 1938 |
+
int iterations = row_stride / iteration_stride;
|
| 1939 |
+
|
| 1940 |
+
cg::thread_block b = cg::this_thread_block();
|
| 1941 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 1942 |
+
|
| 1943 |
+
int row = blockIdx.x;
|
| 1944 |
+
int id = threadIdx.x;
|
| 1945 |
+
int wid = id / WARP_SIZE;
|
| 1946 |
+
int warp_num = iteration_stride >> WARP_SIZE_BITS;
|
| 1947 |
+
|
| 1948 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 1949 |
+
|
| 1950 |
+
__half2 vals_arr[NORM_REG];
|
| 1951 |
+
float2 vals_arr_f[NORM_REG];
|
| 1952 |
+
__half2 vals_hat_arr[NORM_REG];
|
| 1953 |
+
|
| 1954 |
+
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
|
| 1955 |
+
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
|
| 1956 |
+
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
|
| 1957 |
+
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
|
| 1958 |
+
|
| 1959 |
+
out_grad_h1 += (row * row_stride);
|
| 1960 |
+
out_grad_h2 += (row * row_stride);
|
| 1961 |
+
inp_grad_h += (row * row_stride);
|
| 1962 |
+
vals_hat_h += (row * row_stride);
|
| 1963 |
+
|
| 1964 |
+
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
|
| 1965 |
+
int high_index = iterations * iteration_stride + id;
|
| 1966 |
+
#pragma unroll
|
| 1967 |
+
for (int i = 0; i < iterations; i++) {
|
| 1968 |
+
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
|
| 1969 |
+
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
|
| 1970 |
+
vals_arr[i] *= gamma_reg; // out_grad * gamma
|
| 1971 |
+
vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id];
|
| 1972 |
+
}
|
| 1973 |
+
if ((high_index) < row_stride) {
|
| 1974 |
+
__half2 gamma_reg = gamma_h[high_index];
|
| 1975 |
+
vals_arr[iterations] = out_grad_h1[high_index];
|
| 1976 |
+
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
|
| 1977 |
+
vals_hat_arr[iterations] = vals_hat_h[high_index];
|
| 1978 |
+
iterations++;
|
| 1979 |
+
}
|
| 1980 |
+
|
| 1981 |
+
__half mean_h = means[row];
|
| 1982 |
+
__half var_h = vars[row];
|
| 1983 |
+
__half2 var_reg = __halves2half2(var_h, var_h);
|
| 1984 |
+
__half2 mean_reg = __halves2half2(mean_h, mean_h);
|
| 1985 |
+
__half2 xu[NORM_REG];
|
| 1986 |
+
|
| 1987 |
+
float sum = 0.f;
|
| 1988 |
+
for (int i = 0; i < iterations; i++) {
|
| 1989 |
+
xu[i] = (vals_hat_arr[i] - mean_reg);
|
| 1990 |
+
__half2 result_h = (xu[i] * vals_arr[i]);
|
| 1991 |
+
float2 result_f = __half22float2(result_h);
|
| 1992 |
+
sum += result_f.x;
|
| 1993 |
+
sum += result_f.y;
|
| 1994 |
+
vals_arr[i] *= h2rsqrt(var_reg);
|
| 1995 |
+
}
|
| 1996 |
+
|
| 1997 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 1998 |
+
|
| 1999 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 2000 |
+
|
| 2001 |
+
__syncthreads();
|
| 2002 |
+
|
| 2003 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 2004 |
+
|
| 2005 |
+
#ifndef __STOCHASTIC_MODE__
|
| 2006 |
+
__syncthreads();
|
| 2007 |
+
#endif
|
| 2008 |
+
|
| 2009 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 2010 |
+
|
| 2011 |
+
sum = g.shfl(sum, 0);
|
| 2012 |
+
sum /= (2 * row_stride);
|
| 2013 |
+
__half2 sum_h = __float2half2_rn(sum);
|
| 2014 |
+
|
| 2015 |
+
for (int i = 0; i < iterations; i++) {
|
| 2016 |
+
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
|
| 2017 |
+
vals_arr_f[i] = __half22float2(vals_arr[i]);
|
| 2018 |
+
float2 xu_grad_f = __half22float2(xu_grad);
|
| 2019 |
+
vals_arr_f[i].x += xu_grad_f.x;
|
| 2020 |
+
vals_arr_f[i].y += xu_grad_f.y;
|
| 2021 |
+
}
|
| 2022 |
+
|
| 2023 |
+
sum = 0.f;
|
| 2024 |
+
for (int i = 0; i < iterations; i++) {
|
| 2025 |
+
sum += (vals_arr_f[i].x);
|
| 2026 |
+
sum += (vals_arr_f[i].y);
|
| 2027 |
+
}
|
| 2028 |
+
|
| 2029 |
+
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
|
| 2030 |
+
|
| 2031 |
+
if (g.thread_rank() == 0) partialSum[wid] = sum;
|
| 2032 |
+
|
| 2033 |
+
__syncthreads();
|
| 2034 |
+
|
| 2035 |
+
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
|
| 2036 |
+
|
| 2037 |
+
#ifndef __STOCHASTIC_MODE__
|
| 2038 |
+
__syncthreads();
|
| 2039 |
+
#endif
|
| 2040 |
+
|
| 2041 |
+
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
|
| 2042 |
+
|
| 2043 |
+
sum = g.shfl(sum, 0);
|
| 2044 |
+
sum /= (2 * row_stride);
|
| 2045 |
+
|
| 2046 |
+
iterations = row_stride / iteration_stride;
|
| 2047 |
+
for (int i = 0; i < iterations; i++) {
|
| 2048 |
+
vals_arr_f[i].x -= sum;
|
| 2049 |
+
vals_arr_f[i].y -= sum;
|
| 2050 |
+
__half2 temp = __float22half2_rn(vals_arr_f[i]);
|
| 2051 |
+
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
|
| 2052 |
+
}
|
| 2053 |
+
if ((high_index) < row_stride) {
|
| 2054 |
+
vals_arr_f[iterations].x -= sum;
|
| 2055 |
+
vals_arr_f[iterations].y -= sum;
|
| 2056 |
+
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
|
| 2057 |
+
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
|
| 2058 |
+
}
|
| 2059 |
+
#endif
|
| 2060 |
+
}
|
| 2061 |
+
|
| 2062 |
+
template <>
|
| 2063 |
+
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
|
| 2064 |
+
const float* out_grad2,
|
| 2065 |
+
const float* X_data,
|
| 2066 |
+
const float* vars,
|
| 2067 |
+
const float* means,
|
| 2068 |
+
const float* gamma,
|
| 2069 |
+
float* gamma_grad,
|
| 2070 |
+
float* betta_grad,
|
| 2071 |
+
float* inp_grad,
|
| 2072 |
+
int batch,
|
| 2073 |
+
int hidden_dim,
|
| 2074 |
+
cudaStream_t stream[2])
|
| 2075 |
+
{
|
| 2076 |
+
int threads = THREADS;
|
| 2077 |
+
|
| 2078 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 2079 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 2080 |
+
|
| 2081 |
+
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 2082 |
+
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
|
| 2083 |
+
|
| 2084 |
+
dim3 grid_dim2(batch);
|
| 2085 |
+
|
| 2086 |
+
if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 2087 |
+
threads <<= 1;
|
| 2088 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 2089 |
+
threads <<= 2;
|
| 2090 |
+
else if (hidden_dim > 65536)
|
| 2091 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 2092 |
+
|
| 2093 |
+
dim3 block_dim2(threads);
|
| 2094 |
+
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 2095 |
+
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim);
|
| 2096 |
+
}
|
| 2097 |
+
|
| 2098 |
+
template <>
|
| 2099 |
+
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
|
| 2100 |
+
const __half* out_grad2,
|
| 2101 |
+
const __half* X_data,
|
| 2102 |
+
const __half* vars,
|
| 2103 |
+
const __half* means,
|
| 2104 |
+
const __half* gamma,
|
| 2105 |
+
__half* gamma_grad,
|
| 2106 |
+
__half* betta_grad,
|
| 2107 |
+
__half* inp_grad,
|
| 2108 |
+
int batch,
|
| 2109 |
+
int hidden_dim,
|
| 2110 |
+
cudaStream_t stream[2])
|
| 2111 |
+
{
|
| 2112 |
+
int threads = THREADS;
|
| 2113 |
+
|
| 2114 |
+
dim3 grid_dim(hidden_dim / TILE_DIM);
|
| 2115 |
+
dim3 block_dim(TILE_DIM, TILE_DIM);
|
| 2116 |
+
|
| 2117 |
+
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
|
| 2118 |
+
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
|
| 2119 |
+
|
| 2120 |
+
dim3 grid_dim2(batch);
|
| 2121 |
+
|
| 2122 |
+
if (hidden_dim > 8192 && hidden_dim <= 16384)
|
| 2123 |
+
threads <<= 1;
|
| 2124 |
+
else if (hidden_dim > 16384 && hidden_dim <= 32768)
|
| 2125 |
+
threads <<= 2;
|
| 2126 |
+
else if (hidden_dim > 32768 && hidden_dim <= 65536)
|
| 2127 |
+
threads <<= 3;
|
| 2128 |
+
else if (hidden_dim > 65536)
|
| 2129 |
+
throw std::runtime_error("Unsupport hidden_dim.");
|
| 2130 |
+
|
| 2131 |
+
dim3 block_dim2(threads / 2);
|
| 2132 |
+
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
|
| 2133 |
+
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
|
| 2134 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu
ADDED
|
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <math.h>
|
| 7 |
+
#include "custom_cuda_layers.h"
|
| 8 |
+
#include "general_kernels.h"
|
| 9 |
+
|
| 10 |
+
namespace cg = cooperative_groups;
|
| 11 |
+
|
| 12 |
+
dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads)
|
| 13 |
+
{
|
| 14 |
+
int seq_length4 = sequence_length / 4;
|
| 15 |
+
int block_compute_size =
|
| 16 |
+
(seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1);
|
| 17 |
+
// Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited:
|
| 18 |
+
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications
|
| 19 |
+
// The batch size is typically relatively small, while the sequence length could potentially be
|
| 20 |
+
// arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit.
|
| 21 |
+
unsigned x = heads * sequence_length / block_compute_size;
|
| 22 |
+
unsigned y = batch_size;
|
| 23 |
+
return {x, y};
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// Fused attention + softmax
|
| 27 |
+
template <int tbSize, int blockStride, int tbSeq>
|
| 28 |
+
__global__ void attn_softmax(float* vals,
|
| 29 |
+
const float* attn_mask,
|
| 30 |
+
int heads,
|
| 31 |
+
int seq_length,
|
| 32 |
+
int iterations)
|
| 33 |
+
{
|
| 34 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 35 |
+
|
| 36 |
+
int warp_num = blockDim.x >> WARP_SIZE_BITS;
|
| 37 |
+
|
| 38 |
+
int iteration_stride = blockDim.x;
|
| 39 |
+
int block_width = blockStride * seq_length;
|
| 40 |
+
|
| 41 |
+
cg::thread_block b = cg::this_thread_block();
|
| 42 |
+
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
|
| 43 |
+
|
| 44 |
+
int batch = blockIdx.y;
|
| 45 |
+
int row = blockIdx.x;
|
| 46 |
+
int max_threads_in_sequence = std::max(seq_length, tbSeq);
|
| 47 |
+
int seq_lane = threadIdx.x % max_threads_in_sequence;
|
| 48 |
+
|
| 49 |
+
int data_offset = batch * (gridDim.x * block_width) + row * block_width +
|
| 50 |
+
(threadIdx.x / max_threads_in_sequence) * seq_length;
|
| 51 |
+
int mask_offset = batch * seq_length;
|
| 52 |
+
|
| 53 |
+
int wid = threadIdx.x >> WARP_SIZE_BITS;
|
| 54 |
+
int lane = threadIdx.x & 0x1f;
|
| 55 |
+
|
| 56 |
+
float4* val_cast = reinterpret_cast<float4*>(vals);
|
| 57 |
+
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
|
| 58 |
+
|
| 59 |
+
float4 data[MAX_THREAD_ITERATIONS];
|
| 60 |
+
|
| 61 |
+
float max_val = minus_infinity;
|
| 62 |
+
|
| 63 |
+
for (int i = 0; i < iterations; i++) {
|
| 64 |
+
int data_id = i * iteration_stride + seq_lane;
|
| 65 |
+
if (data_id < seq_length) {
|
| 66 |
+
float4 mask = attn_mask_cast[mask_offset + data_id];
|
| 67 |
+
data[i] = val_cast[data_offset + data_id];
|
| 68 |
+
|
| 69 |
+
data[i].x += mask.x;
|
| 70 |
+
data[i].y += mask.y;
|
| 71 |
+
data[i].z += mask.z;
|
| 72 |
+
data[i].w += mask.w;
|
| 73 |
+
|
| 74 |
+
max_val = (data[i].x > max_val ? data[i].x : max_val);
|
| 75 |
+
max_val = (data[i].y > max_val ? data[i].y : max_val);
|
| 76 |
+
max_val = (data[i].z > max_val ? data[i].z : max_val);
|
| 77 |
+
max_val = (data[i].w > max_val ? data[i].w : max_val);
|
| 78 |
+
} else {
|
| 79 |
+
data[i].x = minus_infinity;
|
| 80 |
+
data[i].y = minus_infinity;
|
| 81 |
+
data[i].z = minus_infinity;
|
| 82 |
+
data[i].w = minus_infinity;
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
for (int i = 1; i < tbSize; i *= 2) {
|
| 87 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 88 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
if (seq_length > tbSize) {
|
| 92 |
+
if (lane == 0) partialSum[wid] = max_val;
|
| 93 |
+
b.sync();
|
| 94 |
+
|
| 95 |
+
if (lane < warp_num) max_val = partialSum[lane];
|
| 96 |
+
|
| 97 |
+
#ifndef __STOCHASTIC_MODE__
|
| 98 |
+
b.sync();
|
| 99 |
+
#endif
|
| 100 |
+
|
| 101 |
+
int iters = warp_num;
|
| 102 |
+
if (seq_length < iteration_stride)
|
| 103 |
+
iters = warp_num / (iteration_stride / max_threads_in_sequence);
|
| 104 |
+
|
| 105 |
+
for (int i = 1; i < iters; i *= 2) {
|
| 106 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 107 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
max_val = g.shfl(max_val, threadIdx.x / tbSize);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
float sum = 0;
|
| 114 |
+
for (int i = 0; i < iterations; i++) {
|
| 115 |
+
data[i].x = __expf(data[i].x - max_val);
|
| 116 |
+
data[i].y = __expf(data[i].y - max_val);
|
| 117 |
+
data[i].z = __expf(data[i].z - max_val);
|
| 118 |
+
data[i].w = __expf(data[i].w - max_val);
|
| 119 |
+
|
| 120 |
+
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 124 |
+
|
| 125 |
+
if (seq_length > tbSize) {
|
| 126 |
+
if (lane == 0) partialSum[wid] = sum;
|
| 127 |
+
b.sync();
|
| 128 |
+
|
| 129 |
+
if (lane < warp_num) sum = partialSum[lane];
|
| 130 |
+
|
| 131 |
+
#ifndef __STOCHASTIC_MODE__
|
| 132 |
+
b.sync();
|
| 133 |
+
#endif
|
| 134 |
+
|
| 135 |
+
int iters = warp_num;
|
| 136 |
+
if (seq_length < iteration_stride)
|
| 137 |
+
iters = warp_num / (iteration_stride / max_threads_in_sequence);
|
| 138 |
+
|
| 139 |
+
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 140 |
+
|
| 141 |
+
sum = g.shfl(sum, threadIdx.x / tbSize);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
sum += 1e-6;
|
| 145 |
+
|
| 146 |
+
for (int i = 0; i < iterations; i++) {
|
| 147 |
+
data[i].x /= sum;
|
| 148 |
+
data[i].y /= sum;
|
| 149 |
+
data[i].z /= sum;
|
| 150 |
+
data[i].w /= sum;
|
| 151 |
+
|
| 152 |
+
int data_id = i * iteration_stride + seq_lane;
|
| 153 |
+
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
template <int tbSize, int blockStride, int tbSeq>
|
| 158 |
+
__global__ void attn_softmax(__half* vals,
|
| 159 |
+
const __half* attn_mask,
|
| 160 |
+
int heads,
|
| 161 |
+
int seq_length,
|
| 162 |
+
int iterations)
|
| 163 |
+
{
|
| 164 |
+
#ifdef HALF_PRECISION_AVAILABLE
|
| 165 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 166 |
+
|
| 167 |
+
int warp_num = blockDim.x >> WARP_SIZE_BITS;
|
| 168 |
+
|
| 169 |
+
int iteration_stride = blockDim.x;
|
| 170 |
+
int block_width = blockStride * seq_length;
|
| 171 |
+
|
| 172 |
+
cg::thread_block b = cg::this_thread_block();
|
| 173 |
+
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
|
| 174 |
+
|
| 175 |
+
int batch = blockIdx.y;
|
| 176 |
+
int row = blockIdx.x;
|
| 177 |
+
int max_threads_in_sequence = std::max(seq_length, tbSeq);
|
| 178 |
+
int seq_lane = threadIdx.x % max_threads_in_sequence;
|
| 179 |
+
|
| 180 |
+
int data_offset = batch * (gridDim.x * block_width) + row * block_width +
|
| 181 |
+
(threadIdx.x / max_threads_in_sequence) * seq_length;
|
| 182 |
+
int mask_offset = batch * seq_length;
|
| 183 |
+
|
| 184 |
+
int wid = threadIdx.x >> WARP_SIZE_BITS;
|
| 185 |
+
int lane = threadIdx.x & 0x1f;
|
| 186 |
+
|
| 187 |
+
float2* val_cast = reinterpret_cast<float2*>(vals);
|
| 188 |
+
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
|
| 189 |
+
|
| 190 |
+
val_cast += data_offset;
|
| 191 |
+
attn_mask_cast += mask_offset;
|
| 192 |
+
|
| 193 |
+
float2 low_data[MAX_THREAD_ITERATIONS];
|
| 194 |
+
float2 high_data[MAX_THREAD_ITERATIONS];
|
| 195 |
+
|
| 196 |
+
float max_val = minus_infinity;
|
| 197 |
+
|
| 198 |
+
for (int i = 0; i < iterations; i++) {
|
| 199 |
+
int data_id = i * iteration_stride + seq_lane;
|
| 200 |
+
if (data_id < seq_length) {
|
| 201 |
+
float2 data = val_cast[data_id];
|
| 202 |
+
float2 mask = attn_mask_cast[data_id];
|
| 203 |
+
|
| 204 |
+
__half2* data_arr = reinterpret_cast<__half2*>(&data);
|
| 205 |
+
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
|
| 206 |
+
|
| 207 |
+
low_data[i] = __half22float2(data_arr[0]);
|
| 208 |
+
high_data[i] = __half22float2(data_arr[1]);
|
| 209 |
+
float2 low_mask = __half22float2(mask_arr[0]);
|
| 210 |
+
float2 high_mask = __half22float2(mask_arr[1]);
|
| 211 |
+
|
| 212 |
+
low_data[i].x += low_mask.x;
|
| 213 |
+
low_data[i].y += low_mask.y;
|
| 214 |
+
high_data[i].x += high_mask.x;
|
| 215 |
+
high_data[i].y += high_mask.y;
|
| 216 |
+
|
| 217 |
+
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
|
| 218 |
+
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
|
| 219 |
+
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
|
| 220 |
+
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
|
| 221 |
+
}
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
for (int i = 1; i < tbSize; i *= 2) {
|
| 225 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 226 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
if (seq_length > tbSize) {
|
| 230 |
+
if (lane == 0) partialSum[wid] = max_val;
|
| 231 |
+
b.sync();
|
| 232 |
+
|
| 233 |
+
if (lane < warp_num) max_val = partialSum[lane];
|
| 234 |
+
|
| 235 |
+
#ifndef __STOCHASTIC_MODE__
|
| 236 |
+
b.sync();
|
| 237 |
+
#endif
|
| 238 |
+
|
| 239 |
+
int iters = warp_num;
|
| 240 |
+
if (seq_length < iteration_stride)
|
| 241 |
+
iters = warp_num / (iteration_stride / max_threads_in_sequence);
|
| 242 |
+
|
| 243 |
+
for (int i = 1; i < iters; i *= 2) {
|
| 244 |
+
auto temp = g.shfl_xor(max_val, i);
|
| 245 |
+
max_val = (temp > max_val ? temp : max_val);
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
max_val = g.shfl(max_val, threadIdx.x / tbSize);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
float sum = 0;
|
| 252 |
+
for (int i = 0; i < iterations; i++) {
|
| 253 |
+
int data_id = i * iteration_stride + seq_lane;
|
| 254 |
+
if (data_id < seq_length) {
|
| 255 |
+
low_data[i].x = __expf(low_data[i].x - max_val);
|
| 256 |
+
low_data[i].y = __expf(low_data[i].y - max_val);
|
| 257 |
+
high_data[i].x = __expf(high_data[i].x - max_val);
|
| 258 |
+
high_data[i].y = __expf(high_data[i].y - max_val);
|
| 259 |
+
|
| 260 |
+
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 265 |
+
|
| 266 |
+
if (seq_length > tbSize) {
|
| 267 |
+
if (lane == 0) partialSum[wid] = sum;
|
| 268 |
+
b.sync();
|
| 269 |
+
|
| 270 |
+
if (lane < warp_num) sum = partialSum[lane];
|
| 271 |
+
|
| 272 |
+
#ifndef __STOCHASTIC_MODE__
|
| 273 |
+
b.sync();
|
| 274 |
+
#endif
|
| 275 |
+
|
| 276 |
+
int iters = warp_num;
|
| 277 |
+
if (seq_length < iteration_stride)
|
| 278 |
+
iters = warp_num / (iteration_stride / max_threads_in_sequence);
|
| 279 |
+
|
| 280 |
+
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
|
| 281 |
+
|
| 282 |
+
sum = g.shfl(sum, threadIdx.x / tbSize);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
sum += 1e-6;
|
| 286 |
+
|
| 287 |
+
for (int i = 0; i < iterations; i++) {
|
| 288 |
+
int data_id = i * iteration_stride + seq_lane;
|
| 289 |
+
if (data_id < seq_length) {
|
| 290 |
+
float2 result_f;
|
| 291 |
+
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
|
| 292 |
+
|
| 293 |
+
low_data[i].x /= sum;
|
| 294 |
+
low_data[i].y /= sum;
|
| 295 |
+
high_data[i].x /= sum;
|
| 296 |
+
high_data[i].y /= sum;
|
| 297 |
+
|
| 298 |
+
result_h[0] = __float22half2_rn(low_data[i]);
|
| 299 |
+
result_h[1] = __float22half2_rn(high_data[i]);
|
| 300 |
+
|
| 301 |
+
val_cast[data_id] = result_f;
|
| 302 |
+
}
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
#endif
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
template <typename T>
|
| 309 |
+
void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t);
|
| 310 |
+
|
| 311 |
+
template <>
|
| 312 |
+
void launch_attn_softmax<float>(float* vals,
|
| 313 |
+
const float* attn_mask,
|
| 314 |
+
int batch_size,
|
| 315 |
+
int heads,
|
| 316 |
+
int sequence_length,
|
| 317 |
+
cudaStream_t stream)
|
| 318 |
+
{
|
| 319 |
+
const int threads = 128;
|
| 320 |
+
int seq_length4 = sequence_length / 4;
|
| 321 |
+
|
| 322 |
+
dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads);
|
| 323 |
+
|
| 324 |
+
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
|
| 325 |
+
|
| 326 |
+
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
|
| 327 |
+
subblock_max_workload * threads)
|
| 328 |
+
: threads);
|
| 329 |
+
int iterations =
|
| 330 |
+
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
|
| 331 |
+
: MAX_THREAD_ITERATIONS);
|
| 332 |
+
|
| 333 |
+
if (sequence_length <= 8)
|
| 334 |
+
attn_softmax<2, (threads / 2), 2>
|
| 335 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 336 |
+
else if (sequence_length <= 16)
|
| 337 |
+
attn_softmax<4, (threads / 4), 4>
|
| 338 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 339 |
+
else if (sequence_length <= 32)
|
| 340 |
+
attn_softmax<8, (threads / 8), 8>
|
| 341 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 342 |
+
else if (sequence_length <= 64)
|
| 343 |
+
attn_softmax<16, (threads / 16), 16>
|
| 344 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 345 |
+
else if (sequence_length <= 128)
|
| 346 |
+
attn_softmax<32, (threads / 32), 32>
|
| 347 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 348 |
+
else if (sequence_length <= 256)
|
| 349 |
+
attn_softmax<32, (threads / 64), 64>
|
| 350 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 351 |
+
else {
|
| 352 |
+
const int threads = 256;
|
| 353 |
+
dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads);
|
| 354 |
+
|
| 355 |
+
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
|
| 356 |
+
|
| 357 |
+
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
|
| 358 |
+
subblock_max_workload * threads)
|
| 359 |
+
: threads);
|
| 360 |
+
iterations =
|
| 361 |
+
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
|
| 362 |
+
: MAX_THREAD_ITERATIONS);
|
| 363 |
+
if (sequence_length <= 512)
|
| 364 |
+
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
|
| 365 |
+
vals, attn_mask, heads, seq_length4, iterations);
|
| 366 |
+
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
|
| 367 |
+
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
|
| 368 |
+
vals, attn_mask, heads, seq_length4, iterations);
|
| 369 |
+
else
|
| 370 |
+
throw std::runtime_error(
|
| 371 |
+
"Unsupport Seq_Length! Check the restriction of the max_threads and "
|
| 372 |
+
"max_thread_iterations!");
|
| 373 |
+
}
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
template <>
|
| 377 |
+
void launch_attn_softmax<__half>(__half* vals,
|
| 378 |
+
const __half* attn_mask,
|
| 379 |
+
int batch_size,
|
| 380 |
+
int heads,
|
| 381 |
+
int sequence_length,
|
| 382 |
+
cudaStream_t stream)
|
| 383 |
+
{
|
| 384 |
+
const int threads = 128;
|
| 385 |
+
int seq_length4 = sequence_length / 4;
|
| 386 |
+
|
| 387 |
+
dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads);
|
| 388 |
+
|
| 389 |
+
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
|
| 390 |
+
|
| 391 |
+
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
|
| 392 |
+
subblock_max_workload * threads)
|
| 393 |
+
: threads);
|
| 394 |
+
|
| 395 |
+
int iterations =
|
| 396 |
+
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
|
| 397 |
+
: MAX_THREAD_ITERATIONS);
|
| 398 |
+
|
| 399 |
+
if (sequence_length <= 8)
|
| 400 |
+
attn_softmax<2, (threads / 2), 2>
|
| 401 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 402 |
+
else if (sequence_length <= 16)
|
| 403 |
+
attn_softmax<4, (threads / 4), 4>
|
| 404 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 405 |
+
else if (sequence_length <= 32)
|
| 406 |
+
attn_softmax<8, (threads / 8), 8>
|
| 407 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 408 |
+
else if (sequence_length <= 64)
|
| 409 |
+
attn_softmax<16, (threads / 16), 16>
|
| 410 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 411 |
+
else if (sequence_length <= 128)
|
| 412 |
+
attn_softmax<32, (threads / 32), 32>
|
| 413 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 414 |
+
else if (sequence_length <= 256)
|
| 415 |
+
attn_softmax<32, (threads / 64), 64>
|
| 416 |
+
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
|
| 417 |
+
else {
|
| 418 |
+
const int threads = 256;
|
| 419 |
+
dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads);
|
| 420 |
+
|
| 421 |
+
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
|
| 422 |
+
|
| 423 |
+
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
|
| 424 |
+
subblock_max_workload * threads)
|
| 425 |
+
: threads);
|
| 426 |
+
iterations =
|
| 427 |
+
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
|
| 428 |
+
: MAX_THREAD_ITERATIONS);
|
| 429 |
+
if (sequence_length <= 512)
|
| 430 |
+
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
|
| 431 |
+
vals, attn_mask, heads, seq_length4, iterations);
|
| 432 |
+
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
|
| 433 |
+
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
|
| 434 |
+
vals, attn_mask, heads, seq_length4, iterations);
|
| 435 |
+
else
|
| 436 |
+
throw std::runtime_error(
|
| 437 |
+
"Unsupport Seq_Length! Check the restriction of the max_threads and "
|
| 438 |
+
"max_thread_iterations!");
|
| 439 |
+
}
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
template <typename T, int tbSize, int blockStride>
|
| 443 |
+
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
|
| 444 |
+
{
|
| 445 |
+
__shared__ float partialSum[MAX_WARP_NUM];
|
| 446 |
+
|
| 447 |
+
int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32)
|
| 448 |
+
|
| 449 |
+
int iteration_stride = blockDim.x;
|
| 450 |
+
int block_width = blockStride * seq_length;
|
| 451 |
+
|
| 452 |
+
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
|
| 453 |
+
? (seq_length + iteration_stride - 1) / iteration_stride
|
| 454 |
+
: MAX_THREAD_ITERATIONS);
|
| 455 |
+
|
| 456 |
+
cg::thread_block b = cg::this_thread_block();
|
| 457 |
+
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
|
| 458 |
+
|
| 459 |
+
int row = blockIdx.x;
|
| 460 |
+
int id = threadIdx.x;
|
| 461 |
+
|
| 462 |
+
int wid = id >> WARP_SIZE_BITS;
|
| 463 |
+
int lane = id & 0x1f;
|
| 464 |
+
|
| 465 |
+
T val_reg[MAX_THREAD_ITERATIONS];
|
| 466 |
+
T soft_reg[MAX_THREAD_ITERATIONS];
|
| 467 |
+
float grad_reg = 0.0f;
|
| 468 |
+
|
| 469 |
+
#pragma unroll
|
| 470 |
+
for (int i = 0; i < iterations; i++) {
|
| 471 |
+
int data_id = i * iteration_stride + id;
|
| 472 |
+
if (data_id < block_width) {
|
| 473 |
+
val_reg[i] = out_grad[row * block_width + data_id];
|
| 474 |
+
soft_reg[i] = soft_inp[row * block_width + data_id];
|
| 475 |
+
|
| 476 |
+
grad_reg += ((float)val_reg[i] *
|
| 477 |
+
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
|
| 478 |
+
// 2% of accuracy in computation!!
|
| 479 |
+
}
|
| 480 |
+
}
|
| 481 |
+
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
|
| 482 |
+
|
| 483 |
+
if (seq_length > tbSize) {
|
| 484 |
+
if (lane == 0) partialSum[wid] = grad_reg;
|
| 485 |
+
b.sync();
|
| 486 |
+
|
| 487 |
+
if (lane < warp_num) grad_reg = partialSum[lane];
|
| 488 |
+
|
| 489 |
+
int iters = warp_num;
|
| 490 |
+
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
|
| 491 |
+
|
| 492 |
+
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
|
| 493 |
+
|
| 494 |
+
grad_reg = g.shfl(grad_reg, id / tbSize);
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
for (int i = 0; i < iterations; i++) {
|
| 498 |
+
int data_id = i * iteration_stride + id;
|
| 499 |
+
if (data_id < block_width) {
|
| 500 |
+
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
|
| 501 |
+
out_grad[row * block_width + data_id] = (T)temp;
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
template <typename T, int ITERATIONS>
|
| 507 |
+
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
|
| 508 |
+
const T* output,
|
| 509 |
+
int softmax_length)
|
| 510 |
+
{
|
| 511 |
+
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
| 512 |
+
int offset = batch_idx * softmax_length + threadIdx.x;
|
| 513 |
+
|
| 514 |
+
grad += offset;
|
| 515 |
+
output += offset;
|
| 516 |
+
|
| 517 |
+
T grad_reg[ITERATIONS];
|
| 518 |
+
T output_reg[ITERATIONS];
|
| 519 |
+
float sum = 0.0;
|
| 520 |
+
|
| 521 |
+
#pragma unroll
|
| 522 |
+
for (int i = 0; i < ITERATIONS; ++i) {
|
| 523 |
+
int curr_idx = threadIdx.x + i * WARP_SIZE;
|
| 524 |
+
if (curr_idx < softmax_length) {
|
| 525 |
+
grad_reg[i] = grad[i * WARP_SIZE];
|
| 526 |
+
output_reg[i] = output[i * WARP_SIZE];
|
| 527 |
+
sum += (float)grad_reg[i] * (float)output_reg[i];
|
| 528 |
+
}
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
cg::thread_block b = cg::this_thread_block();
|
| 532 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 533 |
+
|
| 534 |
+
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
|
| 535 |
+
|
| 536 |
+
#pragma unroll
|
| 537 |
+
for (int i = 0; i < ITERATIONS; ++i) {
|
| 538 |
+
int curr_idx = threadIdx.x + i * WARP_SIZE;
|
| 539 |
+
if (curr_idx < softmax_length)
|
| 540 |
+
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
|
| 541 |
+
}
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
__global__ void softmax_backward_kernel_arbitrary_length(__half* grad /* input & output*/,
|
| 545 |
+
const __half* output,
|
| 546 |
+
int softmax_length)
|
| 547 |
+
{
|
| 548 |
+
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
| 549 |
+
int offset = batch_idx * softmax_length + threadIdx.x;
|
| 550 |
+
|
| 551 |
+
const float4* output_cast = reinterpret_cast<const float4*>(output);
|
| 552 |
+
float4* grad_cast = reinterpret_cast<float4*>(grad);
|
| 553 |
+
|
| 554 |
+
grad_cast += offset;
|
| 555 |
+
output_cast += offset;
|
| 556 |
+
|
| 557 |
+
float sum = 0.0;
|
| 558 |
+
int curr_idx = threadIdx.x;
|
| 559 |
+
while (curr_idx < softmax_length) {
|
| 560 |
+
float4 out_reg = output_cast[curr_idx];
|
| 561 |
+
float4 grad_reg = grad_cast[curr_idx];
|
| 562 |
+
__half2* out_h = reinterpret_cast<__half2*>(&out_reg);
|
| 563 |
+
__half2* grad_h = reinterpret_cast<__half2*>(&grad_reg);
|
| 564 |
+
#pragma unroll
|
| 565 |
+
for (int m = 0; m < 4; m++) grad_h[m] *= out_h[m];
|
| 566 |
+
sum += ((float)grad_h[0].x + (float)grad_h[0].y + (float)grad_h[1].x + (float)grad_h[1].y) +
|
| 567 |
+
((float)grad_h[2].x + (float)grad_h[2].y + (float)grad_h[3].x + (float)grad_h[3].y);
|
| 568 |
+
curr_idx += WARP_SIZE;
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
cg::thread_block b = cg::this_thread_block();
|
| 572 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 573 |
+
|
| 574 |
+
#pragma unroll
|
| 575 |
+
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
|
| 576 |
+
|
| 577 |
+
curr_idx = threadIdx.x;
|
| 578 |
+
while (curr_idx < softmax_length) {
|
| 579 |
+
float4 out_reg = output_cast[curr_idx];
|
| 580 |
+
float4 grad_reg = grad_cast[curr_idx];
|
| 581 |
+
__half* grad_h = reinterpret_cast<__half*>(&grad_reg);
|
| 582 |
+
__half* out_h = reinterpret_cast<__half*>(&out_reg);
|
| 583 |
+
|
| 584 |
+
#pragma unroll
|
| 585 |
+
for (int m = 0; m < 8; m++) grad_h[m] = (float)out_h[m] * ((float)grad_h[m] - sum);
|
| 586 |
+
|
| 587 |
+
grad_cast[curr_idx] = grad_reg;
|
| 588 |
+
curr_idx += WARP_SIZE;
|
| 589 |
+
}
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
__global__ void softmax_backward_kernel_arbitrary_length(float* grad /* input & output*/,
|
| 593 |
+
const float* output,
|
| 594 |
+
int softmax_length)
|
| 595 |
+
{
|
| 596 |
+
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
| 597 |
+
int offset = batch_idx * softmax_length + threadIdx.x;
|
| 598 |
+
|
| 599 |
+
const float4* output_cast = reinterpret_cast<const float4*>(output);
|
| 600 |
+
float4* grad_cast = reinterpret_cast<float4*>(grad);
|
| 601 |
+
|
| 602 |
+
grad_cast += offset;
|
| 603 |
+
output_cast += offset;
|
| 604 |
+
|
| 605 |
+
float sum = 0.0;
|
| 606 |
+
int curr_idx = threadIdx.x;
|
| 607 |
+
while (curr_idx < softmax_length) {
|
| 608 |
+
float4 out_reg = output_cast[curr_idx];
|
| 609 |
+
float4 grad_reg = grad_cast[curr_idx];
|
| 610 |
+
|
| 611 |
+
grad_reg.x *= out_reg.x;
|
| 612 |
+
grad_reg.y *= out_reg.y;
|
| 613 |
+
grad_reg.z *= out_reg.z;
|
| 614 |
+
grad_reg.w *= out_reg.w;
|
| 615 |
+
sum += (grad_reg.x + grad_reg.y + grad_reg.z + grad_reg.w);
|
| 616 |
+
|
| 617 |
+
curr_idx += WARP_SIZE;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
cg::thread_block b = cg::this_thread_block();
|
| 621 |
+
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
|
| 622 |
+
|
| 623 |
+
#pragma unroll
|
| 624 |
+
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
|
| 625 |
+
|
| 626 |
+
curr_idx = threadIdx.x;
|
| 627 |
+
while (curr_idx < softmax_length) {
|
| 628 |
+
float4 out_reg = output_cast[curr_idx];
|
| 629 |
+
float4 grad_reg = grad_cast[curr_idx];
|
| 630 |
+
grad_reg.x = out_reg.x * (grad_reg.x - sum);
|
| 631 |
+
grad_reg.y = out_reg.y * (grad_reg.y - sum);
|
| 632 |
+
grad_reg.z = out_reg.z * (grad_reg.z - sum);
|
| 633 |
+
grad_reg.w = out_reg.w * (grad_reg.w - sum);
|
| 634 |
+
|
| 635 |
+
grad_cast[curr_idx] = grad_reg;
|
| 636 |
+
curr_idx += WARP_SIZE;
|
| 637 |
+
}
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
template <typename T>
|
| 641 |
+
void launch_attn_softmax_backward_v2(T* out_grad,
|
| 642 |
+
const T* soft_inp,
|
| 643 |
+
int batch_size,
|
| 644 |
+
int heads,
|
| 645 |
+
int seq_length,
|
| 646 |
+
cudaStream_t stream)
|
| 647 |
+
{
|
| 648 |
+
const int warps_per_block = 4;
|
| 649 |
+
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
|
| 650 |
+
dim3 block_dim(WARP_SIZE, warps_per_block);
|
| 651 |
+
|
| 652 |
+
if (seq_length <= 32)
|
| 653 |
+
softmax_backward_kernel_v2<T, 1>
|
| 654 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 655 |
+
else if (seq_length <= 64)
|
| 656 |
+
softmax_backward_kernel_v2<T, 2>
|
| 657 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 658 |
+
else if (seq_length <= 128)
|
| 659 |
+
softmax_backward_kernel_v2<T, 4>
|
| 660 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 661 |
+
else if (seq_length <= 256)
|
| 662 |
+
softmax_backward_kernel_v2<T, 8>
|
| 663 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 664 |
+
else if (seq_length <= 384)
|
| 665 |
+
softmax_backward_kernel_v2<T, 12>
|
| 666 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 667 |
+
else if (seq_length <= 512)
|
| 668 |
+
softmax_backward_kernel_v2<T, 16>
|
| 669 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 670 |
+
else if (seq_length <= 768)
|
| 671 |
+
softmax_backward_kernel_v2<T, 24>
|
| 672 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 673 |
+
else if (seq_length <= 1024)
|
| 674 |
+
softmax_backward_kernel_v2<T, 32>
|
| 675 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 676 |
+
else if (seq_length <= 2048)
|
| 677 |
+
softmax_backward_kernel_v2<T, 64>
|
| 678 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 679 |
+
else if (seq_length <= 4096)
|
| 680 |
+
softmax_backward_kernel_v2<T, 128>
|
| 681 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 682 |
+
else if (seq_length <= 8192)
|
| 683 |
+
softmax_backward_kernel_v2<T, 256>
|
| 684 |
+
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
|
| 685 |
+
else
|
| 686 |
+
softmax_backward_kernel_arbitrary_length<<<grid_dim, block_dim, 0, stream>>>(
|
| 687 |
+
out_grad, soft_inp, seq_length / (4 << ((sizeof(T) & 2) >> 1)));
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
|
| 691 |
+
const __half* soft_inp,
|
| 692 |
+
int batch_size,
|
| 693 |
+
int heads,
|
| 694 |
+
int seq_length,
|
| 695 |
+
cudaStream_t stream);
|
| 696 |
+
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
|
| 697 |
+
const float* soft_inp,
|
| 698 |
+
int batch_size,
|
| 699 |
+
int heads,
|
| 700 |
+
int seq_length,
|
| 701 |
+
cudaStream_t stream);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <torch/csrc/utils/tensor_flatten.h>
|
| 12 |
+
#include <torch/extension.h>
|
| 13 |
+
// https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h
|
| 14 |
+
|
| 15 |
+
at::Tensor flatten(std::vector<at::Tensor> tensors)
|
| 16 |
+
{
|
| 17 |
+
return torch::utils::flatten_dense_tensors(tensors);
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
std::vector<at::Tensor> unflatten(at::Tensor flat, std::vector<at::Tensor> tensors)
|
| 21 |
+
{
|
| 22 |
+
return torch::utils::unflatten_dense_tensors(flat, tensors);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 26 |
+
{
|
| 27 |
+
m.def("flatten", &flatten, "Flatten dense tensors");
|
| 28 |
+
m.def("unflatten", &unflatten, "Unflatten dense tensors");
|
| 29 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam.cpp
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "cpu_adam.h"
|
| 7 |
+
|
| 8 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 9 |
+
{
|
| 10 |
+
m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)");
|
| 11 |
+
m.def("adam_update_copy",
|
| 12 |
+
&ds_adam_step_plus_copy,
|
| 13 |
+
"DeepSpeed CPU Adam update and param copy (C++)");
|
| 14 |
+
m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)");
|
| 15 |
+
m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)");
|
| 16 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/cpu_adam_impl.cpp
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
#include <cassert>
|
| 8 |
+
#include <cmath>
|
| 9 |
+
#include <iostream>
|
| 10 |
+
#include <memory>
|
| 11 |
+
#include <type_traits>
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include "cpu_adam.h"
|
| 14 |
+
|
| 15 |
+
static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
|
| 16 |
+
|
| 17 |
+
// C++ interface
|
| 18 |
+
|
| 19 |
+
void Adam_Optimizer::Step_1(float* _params,
|
| 20 |
+
float* grads,
|
| 21 |
+
float* _exp_avg,
|
| 22 |
+
float* _exp_avg_sq,
|
| 23 |
+
size_t _param_size,
|
| 24 |
+
ds_half_precision_t* dev_params,
|
| 25 |
+
bool half_precision)
|
| 26 |
+
{
|
| 27 |
+
size_t rounded_size = 0;
|
| 28 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 29 |
+
Step_AVX<1>(&rounded_size,
|
| 30 |
+
_params,
|
| 31 |
+
grads,
|
| 32 |
+
_exp_avg,
|
| 33 |
+
_exp_avg_sq,
|
| 34 |
+
_param_size,
|
| 35 |
+
dev_params,
|
| 36 |
+
half_precision);
|
| 37 |
+
#endif
|
| 38 |
+
if (_param_size > rounded_size) {
|
| 39 |
+
float betta1_minus1 = 1 - _betta1;
|
| 40 |
+
float betta2_minus1 = 1 - _betta2;
|
| 41 |
+
|
| 42 |
+
float step_size = -1 * _alpha / _bias_correction1;
|
| 43 |
+
float w_decay = -1 * _alpha * _weight_decay;
|
| 44 |
+
ds_half_precision_t* grads_cast_h;
|
| 45 |
+
ds_half_precision_t* params_cast_h;
|
| 46 |
+
if (half_precision) {
|
| 47 |
+
grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
|
| 48 |
+
params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
for (size_t t = rounded_size; t < _param_size; t += TILE) {
|
| 52 |
+
size_t copy_size = TILE;
|
| 53 |
+
if ((t + TILE) > _param_size) copy_size = _param_size - t;
|
| 54 |
+
size_t offset = copy_size + t;
|
| 55 |
+
#pragma omp parallel for
|
| 56 |
+
for (size_t k = t; k < offset; k++) {
|
| 57 |
+
float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
|
| 58 |
+
float param = half_precision ? (float)params_cast_h[k] : _params[k];
|
| 59 |
+
float momentum = _exp_avg[k];
|
| 60 |
+
float variance = _exp_avg_sq[k];
|
| 61 |
+
if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; }
|
| 62 |
+
momentum = momentum * _betta1;
|
| 63 |
+
momentum = grad * betta1_minus1 + momentum;
|
| 64 |
+
|
| 65 |
+
variance = variance * _betta2;
|
| 66 |
+
grad = grad * grad;
|
| 67 |
+
variance = grad * betta2_minus1 + variance;
|
| 68 |
+
|
| 69 |
+
grad = sqrt(variance);
|
| 70 |
+
grad = grad * _bias_correction2 + _eps;
|
| 71 |
+
grad = momentum / grad;
|
| 72 |
+
if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; }
|
| 73 |
+
param = grad * step_size + param;
|
| 74 |
+
if (half_precision)
|
| 75 |
+
params_cast_h[k] = (ds_half_precision_t)param;
|
| 76 |
+
else
|
| 77 |
+
_params[k] = param;
|
| 78 |
+
_exp_avg[k] = momentum;
|
| 79 |
+
_exp_avg_sq[k] = variance;
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
void Adam_Optimizer::Step_4(float* _params,
|
| 86 |
+
float* grads,
|
| 87 |
+
float* _exp_avg,
|
| 88 |
+
float* _exp_avg_sq,
|
| 89 |
+
size_t _param_size,
|
| 90 |
+
ds_half_precision_t* dev_params,
|
| 91 |
+
bool half_precision)
|
| 92 |
+
{
|
| 93 |
+
size_t rounded_size = 0;
|
| 94 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 95 |
+
Step_AVX<4>(&rounded_size,
|
| 96 |
+
_params,
|
| 97 |
+
grads,
|
| 98 |
+
_exp_avg,
|
| 99 |
+
_exp_avg_sq,
|
| 100 |
+
_param_size,
|
| 101 |
+
dev_params,
|
| 102 |
+
half_precision);
|
| 103 |
+
#endif
|
| 104 |
+
if (_param_size > rounded_size)
|
| 105 |
+
Step_1((_params + rounded_size),
|
| 106 |
+
(grads + rounded_size),
|
| 107 |
+
(_exp_avg + rounded_size),
|
| 108 |
+
(_exp_avg_sq + rounded_size),
|
| 109 |
+
(_param_size - rounded_size),
|
| 110 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 111 |
+
half_precision);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
int create_adam_optimizer(int optimizer_id,
|
| 115 |
+
float alpha,
|
| 116 |
+
float betta1,
|
| 117 |
+
float betta2,
|
| 118 |
+
float eps,
|
| 119 |
+
float weight_decay,
|
| 120 |
+
bool adamw_mode,
|
| 121 |
+
bool should_log)
|
| 122 |
+
{
|
| 123 |
+
auto opt =
|
| 124 |
+
std::make_shared<Adam_Optimizer>(alpha, betta1, betta2, eps, weight_decay, adamw_mode);
|
| 125 |
+
|
| 126 |
+
s_optimizers[optimizer_id] = opt;
|
| 127 |
+
|
| 128 |
+
if (should_log) {
|
| 129 |
+
std::string avx_type = "";
|
| 130 |
+
#if defined(__AVX512__)
|
| 131 |
+
avx_type = "AVX512";
|
| 132 |
+
#else
|
| 133 |
+
#if defined(__AVX256__)
|
| 134 |
+
avx_type = "AVX2";
|
| 135 |
+
#else
|
| 136 |
+
avx_type = "scalar";
|
| 137 |
+
#endif
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
printf("Adam Optimizer #%d is created with %s arithmetic capability.\n",
|
| 141 |
+
optimizer_id,
|
| 142 |
+
avx_type.c_str());
|
| 143 |
+
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
|
| 144 |
+
alpha,
|
| 145 |
+
betta1,
|
| 146 |
+
betta2,
|
| 147 |
+
weight_decay,
|
| 148 |
+
(int)adamw_mode);
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
return 0;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
void Adam_Optimizer::Step_8(float* _params,
|
| 155 |
+
float* grads,
|
| 156 |
+
float* _exp_avg,
|
| 157 |
+
float* _exp_avg_sq,
|
| 158 |
+
size_t _param_size,
|
| 159 |
+
ds_half_precision_t* dev_params,
|
| 160 |
+
bool half_precision)
|
| 161 |
+
{
|
| 162 |
+
size_t rounded_size = 0;
|
| 163 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 164 |
+
Step_AVX<8>(&rounded_size,
|
| 165 |
+
_params,
|
| 166 |
+
grads,
|
| 167 |
+
_exp_avg,
|
| 168 |
+
_exp_avg_sq,
|
| 169 |
+
_param_size,
|
| 170 |
+
dev_params,
|
| 171 |
+
half_precision);
|
| 172 |
+
#endif
|
| 173 |
+
if (_param_size > rounded_size)
|
| 174 |
+
Step_4((_params + rounded_size),
|
| 175 |
+
(grads + rounded_size),
|
| 176 |
+
(_exp_avg + rounded_size),
|
| 177 |
+
(_exp_avg_sq + rounded_size),
|
| 178 |
+
(_param_size - rounded_size),
|
| 179 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 180 |
+
half_precision);
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
int ds_adam_step(int optimizer_id,
|
| 184 |
+
size_t step,
|
| 185 |
+
float lr,
|
| 186 |
+
float beta1,
|
| 187 |
+
float beta2,
|
| 188 |
+
float epsilon,
|
| 189 |
+
float weight_decay,
|
| 190 |
+
bool bias_correction,
|
| 191 |
+
torch::Tensor& params,
|
| 192 |
+
torch::Tensor& grads,
|
| 193 |
+
torch::Tensor& exp_avg,
|
| 194 |
+
torch::Tensor& exp_avg_sq)
|
| 195 |
+
{
|
| 196 |
+
auto params_c = params.contiguous();
|
| 197 |
+
auto grads_c = grads.contiguous();
|
| 198 |
+
auto exp_avg_c = exp_avg.contiguous();
|
| 199 |
+
auto exp_avg_sq_c = exp_avg_sq.contiguous();
|
| 200 |
+
|
| 201 |
+
// assert(params.options().dtype() == grads.options().dtype());
|
| 202 |
+
|
| 203 |
+
float* params_ptr = (float*)params_c.data_ptr();
|
| 204 |
+
float* grads_ptr = (float*)grads_c.data_ptr();
|
| 205 |
+
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
|
| 206 |
+
float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
|
| 207 |
+
|
| 208 |
+
std::shared_ptr<Adam_Optimizer> opt =
|
| 209 |
+
std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
|
| 210 |
+
opt->IncrementStep(step, beta1, beta2);
|
| 211 |
+
opt->update_state(lr, epsilon, weight_decay, bias_correction);
|
| 212 |
+
|
| 213 |
+
opt->Step_8(params_ptr,
|
| 214 |
+
grads_ptr,
|
| 215 |
+
exp_avg_ptr,
|
| 216 |
+
exp_avg_sq_ptr,
|
| 217 |
+
params_c.numel(),
|
| 218 |
+
nullptr,
|
| 219 |
+
(params.options().dtype() == at::kHalf));
|
| 220 |
+
|
| 221 |
+
return 0;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
int ds_adam_step_plus_copy(int optimizer_id,
|
| 225 |
+
size_t step,
|
| 226 |
+
float lr,
|
| 227 |
+
float beta1,
|
| 228 |
+
float beta2,
|
| 229 |
+
float epsilon,
|
| 230 |
+
float weight_decay,
|
| 231 |
+
bool bias_correction,
|
| 232 |
+
torch::Tensor& params,
|
| 233 |
+
torch::Tensor& grads,
|
| 234 |
+
torch::Tensor& exp_avg,
|
| 235 |
+
torch::Tensor& exp_avg_sq,
|
| 236 |
+
torch::Tensor& gpu_params)
|
| 237 |
+
{
|
| 238 |
+
assert(false);
|
| 239 |
+
return 0;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
int destroy_adam_optimizer(int optimizer_id)
|
| 243 |
+
{
|
| 244 |
+
s_optimizers.erase(optimizer_id);
|
| 245 |
+
|
| 246 |
+
return 0;
|
| 247 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/fused_adam_frontend.cpp
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
void multi_tensor_adam_cuda(int chunk_size,
|
| 9 |
+
at::Tensor noop_flag,
|
| 10 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 11 |
+
const float lr,
|
| 12 |
+
const float beta1,
|
| 13 |
+
const float beta2,
|
| 14 |
+
const float epsilon,
|
| 15 |
+
const int step,
|
| 16 |
+
const int mode,
|
| 17 |
+
const int bias_correction,
|
| 18 |
+
const float weight_decay);
|
| 19 |
+
|
| 20 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 21 |
+
{
|
| 22 |
+
m.def("multi_tensor_adam",
|
| 23 |
+
&multi_tensor_adam_cuda,
|
| 24 |
+
"Compute and apply gradient update to parameters for Adam optimizer");
|
| 25 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adam/multi_tensor_adam.dp.cpp
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <ATen/AccumulateType.h>
|
| 13 |
+
#include <sycl/sycl.hpp>
|
| 14 |
+
|
| 15 |
+
#include <assert.h>
|
| 16 |
+
|
| 17 |
+
#include <cmath>
|
| 18 |
+
#include "multi_tensor_apply.dp.hpp"
|
| 19 |
+
#include "type_shim.h"
|
| 20 |
+
|
| 21 |
+
#define BLOCK_SIZE 512
|
| 22 |
+
#define ILP 4
|
| 23 |
+
|
| 24 |
+
typedef enum : int {
|
| 25 |
+
ADAM_MODE_0 = 0, // L2 regularization mode
|
| 26 |
+
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
|
| 27 |
+
} adamMode_t;
|
| 28 |
+
|
| 29 |
+
using MATH_T = float;
|
| 30 |
+
|
| 31 |
+
template <typename T>
|
| 32 |
+
struct AdamFunctor {
|
| 33 |
+
__inline__ __attribute__((always_inline)) void operator()(int chunk_size,
|
| 34 |
+
volatile int* noop_gmem,
|
| 35 |
+
TensorListMetadata<4>& tl,
|
| 36 |
+
const float beta1,
|
| 37 |
+
const float beta2,
|
| 38 |
+
const float beta1_correction,
|
| 39 |
+
const float beta2_correction,
|
| 40 |
+
const float epsilon,
|
| 41 |
+
const float lr,
|
| 42 |
+
adamMode_t mode,
|
| 43 |
+
const float decay)
|
| 44 |
+
{
|
| 45 |
+
auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>();
|
| 46 |
+
int tensor_loc = tl.block_to_tensor[item_ct1.get_group(2)];
|
| 47 |
+
|
| 48 |
+
int chunk_idx = tl.block_to_chunk[item_ct1.get_group(2)];
|
| 49 |
+
int n = tl.sizes[tensor_loc];
|
| 50 |
+
|
| 51 |
+
T* g = (T*)tl.addresses[0][tensor_loc];
|
| 52 |
+
g += chunk_idx * chunk_size;
|
| 53 |
+
|
| 54 |
+
T* p = (T*)tl.addresses[1][tensor_loc];
|
| 55 |
+
p += chunk_idx * chunk_size;
|
| 56 |
+
|
| 57 |
+
T* m = (T*)tl.addresses[2][tensor_loc];
|
| 58 |
+
m += chunk_idx * chunk_size;
|
| 59 |
+
|
| 60 |
+
T* v = (T*)tl.addresses[3][tensor_loc];
|
| 61 |
+
v += chunk_idx * chunk_size;
|
| 62 |
+
|
| 63 |
+
n -= chunk_idx * chunk_size;
|
| 64 |
+
|
| 65 |
+
// see note in multi_tensor_scale_kernel.cu
|
| 66 |
+
for (int i_start = 0; i_start < n && i_start < chunk_size;
|
| 67 |
+
i_start += item_ct1.get_local_range(2) * ILP) {
|
| 68 |
+
MATH_T r_g[ILP];
|
| 69 |
+
MATH_T r_p[ILP];
|
| 70 |
+
MATH_T r_m[ILP];
|
| 71 |
+
MATH_T r_v[ILP];
|
| 72 |
+
#pragma unroll
|
| 73 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 74 |
+
int i = i_start + item_ct1.get_local_id(2) + ii * item_ct1.get_local_range(2);
|
| 75 |
+
if (i < n && i < chunk_size) {
|
| 76 |
+
r_g[ii] = g[i];
|
| 77 |
+
r_p[ii] = p[i];
|
| 78 |
+
r_m[ii] = m[i];
|
| 79 |
+
r_v[ii] = v[i];
|
| 80 |
+
} else {
|
| 81 |
+
r_g[ii] = MATH_T(0);
|
| 82 |
+
r_p[ii] = MATH_T(0);
|
| 83 |
+
r_m[ii] = MATH_T(0);
|
| 84 |
+
r_v[ii] = MATH_T(0);
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
#pragma unroll
|
| 88 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 89 |
+
if (mode == ADAM_MODE_0) { // L2
|
| 90 |
+
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
|
| 91 |
+
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
|
| 92 |
+
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
|
| 93 |
+
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
|
| 94 |
+
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
|
| 95 |
+
MATH_T denom = sycl::sqrt(next_v_unbiased) + epsilon;
|
| 96 |
+
MATH_T update = next_m_unbiased / denom;
|
| 97 |
+
r_p[ii] = r_p[ii] - (lr * update);
|
| 98 |
+
} else { // weight decay
|
| 99 |
+
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
|
| 100 |
+
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
|
| 101 |
+
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
|
| 102 |
+
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
|
| 103 |
+
MATH_T denom = sycl::sqrt(next_v_unbiased) + epsilon;
|
| 104 |
+
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
|
| 105 |
+
r_p[ii] = r_p[ii] - (lr * update);
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
#pragma unroll
|
| 109 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 110 |
+
int i = i_start + item_ct1.get_local_id(2) + ii * item_ct1.get_local_range(2);
|
| 111 |
+
if (i < n && i < chunk_size) {
|
| 112 |
+
p[i] = r_p[ii];
|
| 113 |
+
m[i] = r_m[ii];
|
| 114 |
+
v[i] = r_v[ii];
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
void multi_tensor_adam_cuda(int chunk_size,
|
| 122 |
+
at::Tensor noop_flag,
|
| 123 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 124 |
+
const float lr,
|
| 125 |
+
const float beta1,
|
| 126 |
+
const float beta2,
|
| 127 |
+
const float epsilon,
|
| 128 |
+
const int step,
|
| 129 |
+
const int mode,
|
| 130 |
+
const int bias_correction,
|
| 131 |
+
const float weight_decay)
|
| 132 |
+
{
|
| 133 |
+
using namespace at;
|
| 134 |
+
|
| 135 |
+
// Handle bias correction mode
|
| 136 |
+
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
|
| 137 |
+
if (bias_correction == 1) {
|
| 138 |
+
bias_correction1 = 1 - std::pow(beta1, step);
|
| 139 |
+
bias_correction2 = 1 - std::pow(beta2, step);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
// Assume single type across p,g,m1,m2 now
|
| 143 |
+
DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
|
| 144 |
+
0,
|
| 145 |
+
"adam",
|
| 146 |
+
multi_tensor_apply<4>(BLOCK_SIZE,
|
| 147 |
+
chunk_size,
|
| 148 |
+
noop_flag,
|
| 149 |
+
tensor_lists,
|
| 150 |
+
AdamFunctor<scalar_t_0>(),
|
| 151 |
+
beta1,
|
| 152 |
+
beta2,
|
| 153 |
+
bias_correction1,
|
| 154 |
+
bias_correction2,
|
| 155 |
+
epsilon,
|
| 156 |
+
lr,
|
| 157 |
+
(adamMode_t)mode,
|
| 158 |
+
weight_decay);)
|
| 159 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/common/custom_cuda_kernel.dp.cpp
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <sycl/sycl.hpp>
|
| 7 |
+
|
| 8 |
+
inline void has_capability_or_fail(const sycl::device& dev,
|
| 9 |
+
const std::initializer_list<sycl::aspect>& props)
|
| 10 |
+
{
|
| 11 |
+
for (const auto& it : props) {
|
| 12 |
+
if (dev.has(it)) continue;
|
| 13 |
+
switch (it) {
|
| 14 |
+
case sycl::aspect::fp64:
|
| 15 |
+
throw std::runtime_error("'double' is not supported in '" +
|
| 16 |
+
dev.get_info<sycl::info::device::name>() + "' device");
|
| 17 |
+
break;
|
| 18 |
+
case sycl::aspect::fp16:
|
| 19 |
+
throw std::runtime_error("'half' is not supported in '" +
|
| 20 |
+
dev.get_info<sycl::info::device::name>() + "' device");
|
| 21 |
+
break;
|
| 22 |
+
default:
|
| 23 |
+
#define __SYCL_ASPECT(ASPECT, ID) \
|
| 24 |
+
case sycl::aspect::ASPECT: return #ASPECT;
|
| 25 |
+
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
|
| 26 |
+
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
|
| 27 |
+
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
|
| 28 |
+
switch (AspectNum) {
|
| 29 |
+
#include <sycl/info/aspects.def>
|
| 30 |
+
#include <sycl/info/aspects_deprecated.def>
|
| 31 |
+
default: return "unknown aspect";
|
| 32 |
+
}
|
| 33 |
+
};
|
| 34 |
+
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
|
| 35 |
+
#undef __SYCL_ASPECT_DEPRECATED
|
| 36 |
+
#undef __SYCL_ASPECT
|
| 37 |
+
throw std::runtime_error("'" + getAspectNameStr(it) + "' is not supported in '" +
|
| 38 |
+
dev.get_info<sycl::info::device::name>() + "' device");
|
| 39 |
+
}
|
| 40 |
+
break;
|
| 41 |
+
}
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
void param_update_kernel(const float* input, sycl::half* output, int size)
|
| 45 |
+
{
|
| 46 |
+
auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>();
|
| 47 |
+
int id = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2);
|
| 48 |
+
|
| 49 |
+
if (id < size) { output[id] = (sycl::half)input[id]; }
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
void launch_param_update(const float* input, sycl::half* output, int size, sycl::queue* stream)
|
| 53 |
+
{
|
| 54 |
+
int threads = 1024;
|
| 55 |
+
|
| 56 |
+
sycl::range<3> grid_dim(1, 1, (size - 1) / threads + 1);
|
| 57 |
+
sycl::range<3> block_dim(1, 1, threads);
|
| 58 |
+
|
| 59 |
+
{
|
| 60 |
+
has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16});
|
| 61 |
+
stream->parallel_for(
|
| 62 |
+
sycl::nd_range<3>(grid_dim * block_dim, block_dim),
|
| 63 |
+
[=](sycl::nd_item<3> item_ct1) { param_update_kernel(input, output, size); });
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
void param_update_kernel_half(const float* input, sycl::half* output, int size)
|
| 68 |
+
{
|
| 69 |
+
auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>();
|
| 70 |
+
int id = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2);
|
| 71 |
+
sycl::half2* output_cast = reinterpret_cast<sycl::half2*>(output);
|
| 72 |
+
if (id < size) {
|
| 73 |
+
float input_f = input[id];
|
| 74 |
+
sycl::half2* input_h = reinterpret_cast<sycl::half2*>(&input_f);
|
| 75 |
+
output_cast[id] = *input_h;
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
void launch_param_update_half(const float* input, sycl::half* output, int size, sycl::queue* stream)
|
| 80 |
+
{
|
| 81 |
+
int threads = 1024;
|
| 82 |
+
size /= 2;
|
| 83 |
+
sycl::range<3> grid_dim(1, 1, (size - 1) / threads + 1);
|
| 84 |
+
sycl::range<3> block_dim(1, 1, threads);
|
| 85 |
+
|
| 86 |
+
{
|
| 87 |
+
has_capability_or_fail(stream->get_device(), {sycl::aspect::fp16});
|
| 88 |
+
stream->parallel_for(
|
| 89 |
+
sycl::nd_range<3>(grid_dim * block_dim, block_dim),
|
| 90 |
+
[=](sycl::nd_item<3> item_ct1) { param_update_kernel_half(input, output, size); });
|
| 91 |
+
}
|
| 92 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/cpu_adagrad.h
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#define NOMINMAX // Windows idiosyncrasy
|
| 9 |
+
// https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
|
| 10 |
+
|
| 11 |
+
#include <stdio.h>
|
| 12 |
+
#include <cassert>
|
| 13 |
+
#include "simd.h"
|
| 14 |
+
|
| 15 |
+
typedef unsigned short ds_half_precision_t;
|
| 16 |
+
|
| 17 |
+
#define STEP(SPAN) \
|
| 18 |
+
void Step_##SPAN(float* _params, \
|
| 19 |
+
float* grads, \
|
| 20 |
+
float* _exp_avg_sq, \
|
| 21 |
+
size_t _param_size, \
|
| 22 |
+
ds_half_precision_t* dev_param = nullptr, \
|
| 23 |
+
bool half_precision = false);
|
| 24 |
+
|
| 25 |
+
class Adagrad_Optimizer {
|
| 26 |
+
public:
|
| 27 |
+
Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0)
|
| 28 |
+
: _alpha(alpha), _eps(eps), _weight_decay(weight_decay)
|
| 29 |
+
{
|
| 30 |
+
}
|
| 31 |
+
~Adagrad_Optimizer() {}
|
| 32 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 33 |
+
template <int span>
|
| 34 |
+
void Step_AVX(size_t* rounded_size,
|
| 35 |
+
float* _params,
|
| 36 |
+
float* grads,
|
| 37 |
+
float* _exp_avg_sq,
|
| 38 |
+
size_t param_size,
|
| 39 |
+
ds_half_precision_t* dev_param = nullptr,
|
| 40 |
+
bool half_precision = false);
|
| 41 |
+
#endif
|
| 42 |
+
STEP(1)
|
| 43 |
+
STEP(4)
|
| 44 |
+
STEP(8)
|
| 45 |
+
inline void IncrementStep(size_t step)
|
| 46 |
+
{
|
| 47 |
+
_step++;
|
| 48 |
+
if (_step != step) { _step = step; }
|
| 49 |
+
}
|
| 50 |
+
inline void update_state(float lr, float epsilon, float weight_decay)
|
| 51 |
+
{
|
| 52 |
+
_alpha = lr;
|
| 53 |
+
_eps = epsilon;
|
| 54 |
+
_weight_decay = weight_decay;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
private:
|
| 58 |
+
float _alpha;
|
| 59 |
+
float _eps;
|
| 60 |
+
float _weight_decay;
|
| 61 |
+
|
| 62 |
+
float _betta1_t;
|
| 63 |
+
float _betta2_t;
|
| 64 |
+
size_t _step;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 68 |
+
template <int span>
|
| 69 |
+
void Adagrad_Optimizer::Step_AVX(size_t* rounded_size,
|
| 70 |
+
float* _params,
|
| 71 |
+
float* grads,
|
| 72 |
+
float* _exp_avg_sq,
|
| 73 |
+
size_t _param_size,
|
| 74 |
+
ds_half_precision_t* dev_params,
|
| 75 |
+
bool half_precision)
|
| 76 |
+
{
|
| 77 |
+
size_t new_rounded_size = 0;
|
| 78 |
+
AVX_Data eps_4;
|
| 79 |
+
eps_4.data = SIMD_SET(_eps);
|
| 80 |
+
|
| 81 |
+
float step_size = -1 * _alpha;
|
| 82 |
+
AVX_Data step_size_4;
|
| 83 |
+
step_size_4.data = SIMD_SET(step_size);
|
| 84 |
+
|
| 85 |
+
AVX_Data weight_decay4;
|
| 86 |
+
if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay);
|
| 87 |
+
new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
|
| 88 |
+
for (size_t t = 0; t < new_rounded_size; t += TILE) {
|
| 89 |
+
size_t copy_size = TILE;
|
| 90 |
+
if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
|
| 91 |
+
size_t offset = copy_size + t;
|
| 92 |
+
#pragma omp parallel for
|
| 93 |
+
for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
|
| 94 |
+
AVX_Data grad_4[span];
|
| 95 |
+
simd_load<span>(grad_4, grads + i, half_precision);
|
| 96 |
+
|
| 97 |
+
AVX_Data momentum_4[span];
|
| 98 |
+
simd_load<span>(momentum_4, grads + i, false);
|
| 99 |
+
|
| 100 |
+
AVX_Data variance_4[span];
|
| 101 |
+
simd_load<span>(variance_4, _exp_avg_sq + i, false);
|
| 102 |
+
|
| 103 |
+
AVX_Data param_4[span];
|
| 104 |
+
simd_load<span>(param_4, _params + i, half_precision);
|
| 105 |
+
|
| 106 |
+
if (_weight_decay > 0) { simd_fma<span>(grad_4, param_4, weight_decay4, grad_4); }
|
| 107 |
+
|
| 108 |
+
simd_fma<span>(variance_4, grad_4, grad_4, variance_4);
|
| 109 |
+
simd_sqrt<span>(grad_4, variance_4);
|
| 110 |
+
simd_add<span>(grad_4, grad_4, eps_4);
|
| 111 |
+
simd_div<span>(grad_4, momentum_4, grad_4);
|
| 112 |
+
simd_fma<span>(param_4, grad_4, step_size_4, param_4);
|
| 113 |
+
|
| 114 |
+
simd_store<span>(_params + i, param_4, half_precision);
|
| 115 |
+
simd_store<span>(_exp_avg_sq + i, variance_4, false);
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
*rounded_size = new_rounded_size;
|
| 119 |
+
}
|
| 120 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/includes/type_shim.h
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */
|
| 7 |
+
#include <sycl/sycl.hpp>
|
| 8 |
+
/* #include <dpct/dpct.hpp> */
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
|
| 11 |
+
// Forward/backward compatibility hack around
|
| 12 |
+
// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288
|
| 13 |
+
// pending more future-proof guidance from upstream.
|
| 14 |
+
// struct TypeShim
|
| 15 |
+
// {
|
| 16 |
+
// const at::Type& payload;
|
| 17 |
+
// TypeShim(const at::Type& type) : payload(type) {}
|
| 18 |
+
// // Enable trivial conversion to a const at::Type& for pre-3aeb78
|
| 19 |
+
// operator const at::Type&(){ return payload; };
|
| 20 |
+
// // Enable dispatch switch statements to take *this directly for post-3aeb78
|
| 21 |
+
// //operator at::ScalarType(){ return payload.; };
|
| 22 |
+
// };
|
| 23 |
+
|
| 24 |
+
#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
|
| 25 |
+
switch (TYPE) { \
|
| 26 |
+
case at::ScalarType::Float: { \
|
| 27 |
+
using scalar_t_##LEVEL = float; \
|
| 28 |
+
__VA_ARGS__; \
|
| 29 |
+
break; \
|
| 30 |
+
} \
|
| 31 |
+
case at::ScalarType::Half: { \
|
| 32 |
+
using scalar_t_##LEVEL = at::Half; \
|
| 33 |
+
__VA_ARGS__; \
|
| 34 |
+
break; \
|
| 35 |
+
} \
|
| 36 |
+
case at::ScalarType::BFloat16: { \
|
| 37 |
+
using scalar_t_##LEVEL = at::BFloat16; \
|
| 38 |
+
__VA_ARGS__; \
|
| 39 |
+
break; \
|
| 40 |
+
} \
|
| 41 |
+
default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
|
| 45 |
+
switch (TYPE) { \
|
| 46 |
+
case at::ScalarType::Double: { \
|
| 47 |
+
using scalar_t_##LEVEL = double; \
|
| 48 |
+
__VA_ARGS__; \
|
| 49 |
+
break; \
|
| 50 |
+
} \
|
| 51 |
+
case at::ScalarType::Float: { \
|
| 52 |
+
using scalar_t_##LEVEL = float; \
|
| 53 |
+
__VA_ARGS__; \
|
| 54 |
+
break; \
|
| 55 |
+
} \
|
| 56 |
+
case at::ScalarType::Half: { \
|
| 57 |
+
using scalar_t_##LEVEL = at::Half; \
|
| 58 |
+
__VA_ARGS__; \
|
| 59 |
+
break; \
|
| 60 |
+
} \
|
| 61 |
+
case at::ScalarType::BFloat16: { \
|
| 62 |
+
using scalar_t_##LEVEL = at::BFloat16; \
|
| 63 |
+
__VA_ARGS__; \
|
| 64 |
+
break; \
|
| 65 |
+
} \
|
| 66 |
+
default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \
|
| 70 |
+
switch (TYPE) { \
|
| 71 |
+
case at::ScalarType::Double: { \
|
| 72 |
+
using scalar_t_##LEVEL = double; \
|
| 73 |
+
__VA_ARGS__; \
|
| 74 |
+
break; \
|
| 75 |
+
} \
|
| 76 |
+
case at::ScalarType::Float: { \
|
| 77 |
+
using scalar_t_##LEVEL = float; \
|
| 78 |
+
__VA_ARGS__; \
|
| 79 |
+
break; \
|
| 80 |
+
} \
|
| 81 |
+
default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <typename T>
|
| 85 |
+
__inline__ __attribute__((always_inline)) T reduce_block_into_lanes(
|
| 86 |
+
T* x,
|
| 87 |
+
T val,
|
| 88 |
+
int lanes = 1,
|
| 89 |
+
bool share_result = false) // lanes is intended to be <= 32.
|
| 90 |
+
{
|
| 91 |
+
auto item_ct1 = sycl::ext::oneapi::experimental::this_nd_item<3>();
|
| 92 |
+
int tid = item_ct1.get_local_id(2) + item_ct1.get_local_id(1) * item_ct1.get_local_range(2);
|
| 93 |
+
int blockSize = item_ct1.get_local_range(2) *
|
| 94 |
+
item_ct1.get_local_range(1); // blockSize is intended to be a multiple of 32.
|
| 95 |
+
|
| 96 |
+
if (blockSize >= 64) {
|
| 97 |
+
x[tid] = val;
|
| 98 |
+
/*
|
| 99 |
+
DPCT1118:1: SYCL group functions and algorithms must be encountered in converged control
|
| 100 |
+
flow. You may need to adjust the code.
|
| 101 |
+
*/
|
| 102 |
+
/*
|
| 103 |
+
DPCT1065:6: Consider replacing sycl::nd_item::barrier() with
|
| 104 |
+
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if
|
| 105 |
+
there is no access to global memory.
|
| 106 |
+
*/
|
| 107 |
+
item_ct1.barrier();
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
#pragma unroll
|
| 111 |
+
for (int i = (blockSize >> 1); i >= 64; i >>= 1) {
|
| 112 |
+
if (tid < i) x[tid] = x[tid] + x[tid + i];
|
| 113 |
+
/*
|
| 114 |
+
DPCT1118:2: SYCL group functions and algorithms must be encountered in converged control
|
| 115 |
+
flow. You may need to adjust the code.
|
| 116 |
+
*/
|
| 117 |
+
/*
|
| 118 |
+
DPCT1065:7: Consider replacing sycl::nd_item::barrier() with
|
| 119 |
+
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if
|
| 120 |
+
there is no access to global memory.
|
| 121 |
+
*/
|
| 122 |
+
item_ct1.barrier();
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
T final;
|
| 126 |
+
|
| 127 |
+
if (tid < 32) {
|
| 128 |
+
if (blockSize >= 64)
|
| 129 |
+
final = x[tid] + x[tid + 32];
|
| 130 |
+
else
|
| 131 |
+
final = val;
|
| 132 |
+
// __SYNCWARP();
|
| 133 |
+
|
| 134 |
+
#pragma unroll
|
| 135 |
+
for (int i = 16; i >= lanes; i >>= 1)
|
| 136 |
+
final = final + __shfl_down_sync(0xffffffff, final, i);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
if (share_result) {
|
| 140 |
+
if (tid < lanes) x[tid] = final; // EpilogueOp
|
| 141 |
+
// Make sure the smem result is visible to all warps.
|
| 142 |
+
/*
|
| 143 |
+
DPCT1118:3: SYCL group functions and algorithms must be encountered in converged control
|
| 144 |
+
flow. You may need to adjust the code.
|
| 145 |
+
*/
|
| 146 |
+
/*
|
| 147 |
+
DPCT1065:8: Consider replacing sycl::nd_item::barrier() with
|
| 148 |
+
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if
|
| 149 |
+
there is no access to global memory.
|
| 150 |
+
*/
|
| 151 |
+
item_ct1.barrier();
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
return final;
|
| 155 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
|
| 7 |
+
from .inference.config import DeepSpeedInferenceConfig
|
| 8 |
+
from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
|
| 9 |
+
from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
|
parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from typing import Optional
|
| 7 |
+
import torch
|
| 8 |
+
from deepspeed.ops.op_builder import SpatialInferenceBuilder
|
| 9 |
+
|
| 10 |
+
spatial_cuda_module = None
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def nhwc_bias_add(activation: torch.Tensor,
|
| 14 |
+
bias: torch.Tensor,
|
| 15 |
+
other: Optional[torch.Tensor] = None,
|
| 16 |
+
other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 17 |
+
global spatial_cuda_module
|
| 18 |
+
if spatial_cuda_module is None:
|
| 19 |
+
spatial_cuda_module = SpatialInferenceBuilder().load()
|
| 20 |
+
|
| 21 |
+
if other is None:
|
| 22 |
+
return spatial_cuda_module.nhwc_bias_add(activation, bias)
|
| 23 |
+
elif other_bias is None:
|
| 24 |
+
return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
|
| 25 |
+
else:
|
| 26 |
+
return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias)
|
parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import torch
|
| 8 |
+
from deepspeed.utils.types import ActivationFuncType, NormType
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TransformerConfig():
|
| 12 |
+
|
| 13 |
+
def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers):
|
| 14 |
+
self.layer_id = -1
|
| 15 |
+
self.hidden_size = hidden_size
|
| 16 |
+
self.intermediate_size = intermediate_size
|
| 17 |
+
self.heads = heads
|
| 18 |
+
self.num_hidden_layers = num_hidden_layers
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DeepSpeedInferenceConfig(TransformerConfig):
|
| 22 |
+
"""Initialize the DeepSpeed Transformer Config.
|
| 23 |
+
Arguments:
|
| 24 |
+
hidden_size: The hidden size of the transformer layer
|
| 25 |
+
intermediate_size: The intermediate size of the feed-forward part of transformer layer
|
| 26 |
+
heads: The number of heads in the self-attention of the transformer layer
|
| 27 |
+
num_hidden_layers: The number of transformer layers
|
| 28 |
+
layer_norm_eps: The epsilon value for the layer norm
|
| 29 |
+
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
|
| 30 |
+
to use if the model already set the current device, otherwise need to set it
|
| 31 |
+
so that the transformer kernel can work on the right device
|
| 32 |
+
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
|
| 33 |
+
using model-parallel architecture. If the client model already takes care of this, there is no
|
| 34 |
+
need to pass this argument.
|
| 35 |
+
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
|
| 36 |
+
stochastic_mode: Enable for high performance, please note that this flag has some level of
|
| 37 |
+
non-determinism and can produce different results on different runs. However, we have seen
|
| 38 |
+
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
|
| 39 |
+
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
|
| 40 |
+
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
|
| 41 |
+
|
| 42 |
+
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
|
| 43 |
+
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
|
| 44 |
+
bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture.
|
| 45 |
+
use_triton: This flag is to enable triton kernels in inference or not.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self,
|
| 49 |
+
hidden_size=-1,
|
| 50 |
+
intermediate_size=-1,
|
| 51 |
+
heads=-1,
|
| 52 |
+
num_hidden_layers=-1,
|
| 53 |
+
layer_norm_eps=1e-12,
|
| 54 |
+
local_rank=-1,
|
| 55 |
+
mp_size=1,
|
| 56 |
+
dtype=torch.float16,
|
| 57 |
+
pre_layer_norm=True,
|
| 58 |
+
norm_type=NormType.LayerNorm,
|
| 59 |
+
stochastic_mode=False,
|
| 60 |
+
scale_attention=True,
|
| 61 |
+
triangular_masking=True,
|
| 62 |
+
local_attention=False,
|
| 63 |
+
window_size=256,
|
| 64 |
+
rotary_dim=-1,
|
| 65 |
+
rotate_half=False,
|
| 66 |
+
rotate_every_two=True,
|
| 67 |
+
return_tuple=True,
|
| 68 |
+
mlp_after_attn=True,
|
| 69 |
+
mlp_act_func_type=ActivationFuncType.GELU,
|
| 70 |
+
training_mp_size=1,
|
| 71 |
+
bigscience_bloom=False,
|
| 72 |
+
max_out_tokens=1024,
|
| 73 |
+
min_out_tokens=1,
|
| 74 |
+
enable_qkv_quantization=False,
|
| 75 |
+
use_mup=False,
|
| 76 |
+
scale_attn_by_inverse_layer_idx=False,
|
| 77 |
+
return_single_tuple=False,
|
| 78 |
+
set_empty_params=False,
|
| 79 |
+
transposed_mode=False,
|
| 80 |
+
use_triton=False,
|
| 81 |
+
triton_autotune=False,
|
| 82 |
+
num_kv=-1,
|
| 83 |
+
rope_theta=10000):
|
| 84 |
+
super(DeepSpeedInferenceConfig,
|
| 85 |
+
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
|
| 86 |
+
num_hidden_layers)
|
| 87 |
+
self.dtype = dtype
|
| 88 |
+
self.pre_layer_norm = pre_layer_norm
|
| 89 |
+
self.norm_type = norm_type
|
| 90 |
+
self.local_rank = local_rank
|
| 91 |
+
self.stochastic_mode = stochastic_mode
|
| 92 |
+
self.epsilon = layer_norm_eps
|
| 93 |
+
self.mp_size = mp_size
|
| 94 |
+
self.scale_attention = scale_attention
|
| 95 |
+
self.triangular_masking = triangular_masking
|
| 96 |
+
self.local_attention = local_attention
|
| 97 |
+
self.window_size = window_size
|
| 98 |
+
self.rotary_dim = rotary_dim
|
| 99 |
+
self.rotate_half = rotate_half
|
| 100 |
+
self.rotate_every_two = rotate_every_two
|
| 101 |
+
self.return_tuple = return_tuple
|
| 102 |
+
self.mlp_after_attn = mlp_after_attn
|
| 103 |
+
self.mlp_act_func_type = mlp_act_func_type
|
| 104 |
+
self.specialized_mode = False
|
| 105 |
+
self.training_mp_size = training_mp_size
|
| 106 |
+
self.bigscience_bloom = bigscience_bloom
|
| 107 |
+
self.max_out_tokens = max_out_tokens
|
| 108 |
+
self.min_out_tokens = min_out_tokens
|
| 109 |
+
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
|
| 110 |
+
self.enable_qkv_quantization = enable_qkv_quantization
|
| 111 |
+
self.use_mup = use_mup
|
| 112 |
+
self.return_single_tuple = return_single_tuple
|
| 113 |
+
self.set_empty_params = set_empty_params
|
| 114 |
+
self.transposed_mode = transposed_mode
|
| 115 |
+
self.use_triton = use_triton
|
| 116 |
+
self.triton_autotune = triton_autotune
|
| 117 |
+
self.num_kv = num_kv
|
| 118 |
+
self.rope_theta = rope_theta
|
| 119 |
+
|
| 120 |
+
@classmethod
|
| 121 |
+
def from_dict(cls, json_object):
|
| 122 |
+
config = DeepSpeedInferenceConfig()
|
| 123 |
+
for key, value in json_object.items():
|
| 124 |
+
config.__dict__[key] = value
|
| 125 |
+
return config
|
| 126 |
+
|
| 127 |
+
@classmethod
|
| 128 |
+
def from_json_file(cls, json_file):
|
| 129 |
+
with open(json_file, "r", encoding='utf-8') as reader:
|
| 130 |
+
text = reader.read()
|
| 131 |
+
return cls.from_dict(json.loads(text))
|
parrot/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import math
|
| 7 |
+
import torch
|
| 8 |
+
from torch.autograd import Function
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from packaging import version as pkg_version
|
| 11 |
+
from deepspeed.utils.logging import log_dist
|
| 12 |
+
from deepspeed.accelerator import get_accelerator
|
| 13 |
+
from deepspeed.ops.op_builder import InferenceBuilder
|
| 14 |
+
|
| 15 |
+
# Cuda modules will be imported if needed
|
| 16 |
+
inference_module = None
|
| 17 |
+
minus_inf = -10000.0
|
| 18 |
+
triton_flash_attn = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def load_triton_flash_attn():
|
| 22 |
+
global triton_flash_attn
|
| 23 |
+
try:
|
| 24 |
+
import triton
|
| 25 |
+
except ImportError:
|
| 26 |
+
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
|
| 27 |
+
|
| 28 |
+
if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
|
| 29 |
+
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
|
| 30 |
+
|
| 31 |
+
from .triton_ops import triton_flash_attn
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class DeepSpeedDiffusersAttentionFunction(Function):
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb,
|
| 38 |
+
num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob,
|
| 39 |
+
do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta):
|
| 40 |
+
|
| 41 |
+
def _transpose_for_context(x):
|
| 42 |
+
x = x.permute(0, 2, 1, 3)
|
| 43 |
+
new_x_layer_shape = x.size()[:-2] + \
|
| 44 |
+
(hidden_size_per_partition,)
|
| 45 |
+
return x.reshape(*new_x_layer_shape)
|
| 46 |
+
|
| 47 |
+
def _transpose_for_scores(x):
|
| 48 |
+
attention_head_size = x.shape[-1] // num_attention_heads_per_partition
|
| 49 |
+
new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size)
|
| 50 |
+
x = x.reshape(*new_x_shape)
|
| 51 |
+
x = x.permute(0, 2, 1, 3)
|
| 52 |
+
return x.contiguous()
|
| 53 |
+
|
| 54 |
+
def selfAttention_fp(input, context, input_mask):
|
| 55 |
+
if config.dtype in [torch.half, torch.float16] and input.dtype == torch.float32:
|
| 56 |
+
input = input.half()
|
| 57 |
+
head_size = input.shape[-1] // config.heads
|
| 58 |
+
do_flash_attn = (head_size <= 128)
|
| 59 |
+
scale = (1 / norm_factor) * (1 / norm_factor)
|
| 60 |
+
if do_flash_attn and context is None:
|
| 61 |
+
qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb
|
| 62 |
+
is not None, do_flash_attn, config.heads, False, rope_theta)
|
| 63 |
+
|
| 64 |
+
context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale,
|
| 65 |
+
input.shape[-2] % 128 == 0)
|
| 66 |
+
context_layer = _transpose_for_context(context_layer[:, :, :, :head_size])
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
do_flash_attn = False
|
| 70 |
+
if context is not None:
|
| 71 |
+
query = torch.matmul(input, attn_qw)
|
| 72 |
+
key = torch.matmul(context, attn_kw)
|
| 73 |
+
value = torch.matmul(context, attn_vw)
|
| 74 |
+
else:
|
| 75 |
+
qkv = torch.matmul(input, attn_qkvw)
|
| 76 |
+
query, key, value = qkv.chunk(3, dim=-1)
|
| 77 |
+
query = query.contiguous()
|
| 78 |
+
key = key.contiguous()
|
| 79 |
+
value = value.contiguous()
|
| 80 |
+
query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn)
|
| 81 |
+
attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1)
|
| 82 |
+
context_layer = _transpose_for_context(torch.matmul(attention_scores, value))
|
| 83 |
+
|
| 84 |
+
output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False, rope_theta)
|
| 85 |
+
return output
|
| 86 |
+
|
| 87 |
+
output = selfAttention_fp(input, context, input_mask)
|
| 88 |
+
|
| 89 |
+
return output
|
| 90 |
+
|
| 91 |
+
@staticmethod
|
| 92 |
+
def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3):
|
| 93 |
+
raise RuntimeError('You are running with DeepSpeed Inference mode. \
|
| 94 |
+
Please switch to Training mode for running backward!')
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class DeepSpeedDiffusersAttention(nn.Module):
|
| 98 |
+
"""Initialize the DeepSpeed Transformer Layer.
|
| 99 |
+
Arguments:
|
| 100 |
+
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
|
| 101 |
+
layer_id will be 0,1,2...23 when each layer object is instantiated
|
| 102 |
+
config: An object of DeepSpeedInferenceConfig
|
| 103 |
+
"""
|
| 104 |
+
layer_id = 0
|
| 105 |
+
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
config,
|
| 109 |
+
):
|
| 110 |
+
super(DeepSpeedDiffusersAttention, self).__init__()
|
| 111 |
+
|
| 112 |
+
self.config = config
|
| 113 |
+
self.config.layer_id = DeepSpeedDiffusersAttention.layer_id
|
| 114 |
+
DeepSpeedDiffusersAttention.layer_id += 1
|
| 115 |
+
device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu'
|
| 116 |
+
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
|
| 117 |
+
|
| 118 |
+
data_type = self.config.dtype
|
| 119 |
+
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
|
| 120 |
+
global inference_module
|
| 121 |
+
if inference_module is None:
|
| 122 |
+
builder = InferenceBuilder()
|
| 123 |
+
inference_module = builder.load()
|
| 124 |
+
|
| 125 |
+
if DeepSpeedDiffusersAttention.layer_id == 1:
|
| 126 |
+
log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0])
|
| 127 |
+
|
| 128 |
+
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
|
| 129 |
+
qkv_size_per_partition,
|
| 130 |
+
dtype=data_type,
|
| 131 |
+
device=device),
|
| 132 |
+
requires_grad=False)
|
| 133 |
+
self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size,
|
| 134 |
+
self.config.hidden_size,
|
| 135 |
+
dtype=data_type,
|
| 136 |
+
device=device),
|
| 137 |
+
requires_grad=False)
|
| 138 |
+
self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size,
|
| 139 |
+
self.config.hidden_size,
|
| 140 |
+
dtype=data_type,
|
| 141 |
+
device=device),
|
| 142 |
+
requires_grad=False)
|
| 143 |
+
self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size,
|
| 144 |
+
self.config.hidden_size,
|
| 145 |
+
dtype=data_type,
|
| 146 |
+
device=device),
|
| 147 |
+
requires_grad=False)
|
| 148 |
+
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
|
| 149 |
+
requires_grad=False)
|
| 150 |
+
out_size_per_partition = self.config.hidden_size // self.config.mp_size
|
| 151 |
+
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
|
| 152 |
+
self.config.hidden_size,
|
| 153 |
+
dtype=data_type,
|
| 154 |
+
device=device),
|
| 155 |
+
requires_grad=False)
|
| 156 |
+
|
| 157 |
+
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
|
| 158 |
+
requires_grad=False)
|
| 159 |
+
self.do_out_bias = True
|
| 160 |
+
|
| 161 |
+
if triton_flash_attn is None:
|
| 162 |
+
load_triton_flash_attn()
|
| 163 |
+
self.triton_flash_attn_kernel = triton_flash_attn()
|
| 164 |
+
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
|
| 165 |
+
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
|
| 166 |
+
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
|
| 167 |
+
|
| 168 |
+
self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads))
|
| 169 |
+
|
| 170 |
+
if self.config.scale_attn_by_inverse_layer_idx is True:
|
| 171 |
+
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
|
| 172 |
+
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
|
| 173 |
+
|
| 174 |
+
if self.config.dtype in [torch.float16, torch.int8]:
|
| 175 |
+
self.score_context_func = inference_module.softmax_context_fp16
|
| 176 |
+
self.linear_func = inference_module.linear_layer_fp16
|
| 177 |
+
self.allocate_workspace = inference_module.allocate_workspace_fp16
|
| 178 |
+
else:
|
| 179 |
+
self.score_context_func = inference_module.softmax_context_fp32
|
| 180 |
+
self.linear_func = inference_module.linear_layer_fp32
|
| 181 |
+
self.allocate_workspace = inference_module.allocate_workspace_fp32
|
| 182 |
+
|
| 183 |
+
def forward(self, input, context=None, input_mask=None):
|
| 184 |
+
if self.config.layer_id == 0:
|
| 185 |
+
self.allocate_workspace(self.config.hidden_size, self.config.heads,
|
| 186 |
+
input.size()[1],
|
| 187 |
+
input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False,
|
| 188 |
+
0, self.config.max_out_tokens, self.config.min_out_tokens)
|
| 189 |
+
output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw,
|
| 190 |
+
self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb,
|
| 191 |
+
self.num_attention_heads_per_partition, self.norm_factor,
|
| 192 |
+
self.hidden_size_per_partition, self.attn_ow, self.attn_ob,
|
| 193 |
+
self.do_out_bias, self.score_context_func, self.linear_func,
|
| 194 |
+
self.triton_flash_attn_kernel, self.config.rope_theta)
|
| 195 |
+
|
| 196 |
+
return output
|