Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp +16 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp +312 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp +25 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu +168 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h +38 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp +76 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h +59 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp +126 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h +79 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp +104 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h +59 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp +45 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h +27 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp +125 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h +31 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp +298 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h +77 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp +135 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h +46 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp +46 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json +29 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/common/custom_cuda_kernel.cu +44 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention.cpp +62 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_back.cu +218 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_cu.cu +160 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h +250 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h +592 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h +251 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h +168 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h +119 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h +181 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h +714 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h +388 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h +191 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h +347 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h +1939 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm_kernel_utils.h +254 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h +691 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h +91 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h +1964 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h +886 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h +1938 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h +57 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h +269 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_backward.h +1965 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_forward.h +986 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h +148 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h +93 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/StopWatch.h +103 -0
- parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/Timer.h +51 -0
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "cpu_adam.h"
|
| 7 |
+
|
| 8 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 9 |
+
{
|
| 10 |
+
m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)");
|
| 11 |
+
m.def("adam_update_copy",
|
| 12 |
+
&ds_adam_step_plus_copy,
|
| 13 |
+
"DeepSpeed CPU Adam update and param copy (C++)");
|
| 14 |
+
m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)");
|
| 15 |
+
m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)");
|
| 16 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
#include <cassert>
|
| 8 |
+
#include <iostream>
|
| 9 |
+
#include <memory>
|
| 10 |
+
#include <type_traits>
|
| 11 |
+
#include <unordered_map>
|
| 12 |
+
#include "cpu_adam.h"
|
| 13 |
+
|
| 14 |
+
#if defined(__ENABLE_CUDA__)
|
| 15 |
+
#include <cuda_runtime_api.h>
|
| 16 |
+
#include "cublas_v2.h"
|
| 17 |
+
#include "cuda.h"
|
| 18 |
+
#include "curand.h"
|
| 19 |
+
#include "custom_cuda_layers.h"
|
| 20 |
+
#endif
|
| 21 |
+
|
| 22 |
+
static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
|
| 23 |
+
|
| 24 |
+
// C++ interface
|
| 25 |
+
|
| 26 |
+
void Adam_Optimizer::Step_1(float* _params,
|
| 27 |
+
float* grads,
|
| 28 |
+
float* _exp_avg,
|
| 29 |
+
float* _exp_avg_sq,
|
| 30 |
+
size_t _param_size,
|
| 31 |
+
ds_half_precision_t* dev_params,
|
| 32 |
+
bool half_precision)
|
| 33 |
+
{
|
| 34 |
+
size_t rounded_size = 0;
|
| 35 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 36 |
+
Step_AVX<1>(&rounded_size,
|
| 37 |
+
_params,
|
| 38 |
+
grads,
|
| 39 |
+
_exp_avg,
|
| 40 |
+
_exp_avg_sq,
|
| 41 |
+
_param_size,
|
| 42 |
+
dev_params,
|
| 43 |
+
half_precision);
|
| 44 |
+
#endif
|
| 45 |
+
if (_param_size > rounded_size) {
|
| 46 |
+
float betta1_minus1 = 1 - _betta1;
|
| 47 |
+
float betta2_minus1 = 1 - _betta2;
|
| 48 |
+
|
| 49 |
+
float step_size = -1 * _alpha / _bias_correction1;
|
| 50 |
+
float w_decay = -1 * _alpha * _weight_decay;
|
| 51 |
+
ds_half_precision_t* grads_cast_h;
|
| 52 |
+
ds_half_precision_t* params_cast_h;
|
| 53 |
+
if (half_precision) {
|
| 54 |
+
grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
|
| 55 |
+
params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
for (size_t t = rounded_size; t < _param_size; t += TILE) {
|
| 59 |
+
size_t copy_size = TILE;
|
| 60 |
+
if ((t + TILE) > _param_size) copy_size = _param_size - t;
|
| 61 |
+
size_t offset = copy_size + t;
|
| 62 |
+
#if defined(__ENABLE_CUDA__)
|
| 63 |
+
if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
|
| 64 |
+
#elif defined(__ENABLE_CANN__)
|
| 65 |
+
if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
|
| 66 |
+
#endif
|
| 67 |
+
#pragma omp parallel for
|
| 68 |
+
for (size_t k = t; k < offset; k++) {
|
| 69 |
+
float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
|
| 70 |
+
float param = half_precision ? (float)params_cast_h[k] : _params[k];
|
| 71 |
+
float momentum = _exp_avg[k];
|
| 72 |
+
float variance = _exp_avg_sq[k];
|
| 73 |
+
if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; }
|
| 74 |
+
momentum = momentum * _betta1;
|
| 75 |
+
momentum = grad * betta1_minus1 + momentum;
|
| 76 |
+
|
| 77 |
+
variance = variance * _betta2;
|
| 78 |
+
grad = grad * grad;
|
| 79 |
+
variance = grad * betta2_minus1 + variance;
|
| 80 |
+
|
| 81 |
+
grad = sqrt(variance);
|
| 82 |
+
grad = grad * _bias_correction2 + _eps;
|
| 83 |
+
grad = momentum / grad;
|
| 84 |
+
if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; }
|
| 85 |
+
param = grad * step_size + param;
|
| 86 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 87 |
+
if (dev_params) _doubled_buffer[_buf_index][k - t] = param;
|
| 88 |
+
#endif
|
| 89 |
+
if (half_precision)
|
| 90 |
+
params_cast_h[k] = (ds_half_precision_t)param;
|
| 91 |
+
else
|
| 92 |
+
_params[k] = param;
|
| 93 |
+
_exp_avg[k] = momentum;
|
| 94 |
+
_exp_avg_sq[k] = variance;
|
| 95 |
+
}
|
| 96 |
+
#if defined(__ENABLE_CUDA__)
|
| 97 |
+
if (dev_params) {
|
| 98 |
+
launch_param_update(
|
| 99 |
+
_doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]);
|
| 100 |
+
|
| 101 |
+
_buf_index = !_buf_index;
|
| 102 |
+
}
|
| 103 |
+
#elif defined(__ENABLE_CANN__)
|
| 104 |
+
if (dev_params) {
|
| 105 |
+
size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
|
| 106 |
+
aclrtMemcpy(dev_params + t,
|
| 107 |
+
memcpy_size,
|
| 108 |
+
_doubled_buffer[_buf_index],
|
| 109 |
+
memcpy_size,
|
| 110 |
+
aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
|
| 111 |
+
|
| 112 |
+
_buf_index = !_buf_index;
|
| 113 |
+
}
|
| 114 |
+
#endif
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
void Adam_Optimizer::Step_4(float* _params,
|
| 120 |
+
float* grads,
|
| 121 |
+
float* _exp_avg,
|
| 122 |
+
float* _exp_avg_sq,
|
| 123 |
+
size_t _param_size,
|
| 124 |
+
ds_half_precision_t* dev_params,
|
| 125 |
+
bool half_precision)
|
| 126 |
+
{
|
| 127 |
+
size_t rounded_size = 0;
|
| 128 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 129 |
+
Step_AVX<4>(&rounded_size,
|
| 130 |
+
_params,
|
| 131 |
+
grads,
|
| 132 |
+
_exp_avg,
|
| 133 |
+
_exp_avg_sq,
|
| 134 |
+
_param_size,
|
| 135 |
+
dev_params,
|
| 136 |
+
half_precision);
|
| 137 |
+
#endif
|
| 138 |
+
if (_param_size > rounded_size)
|
| 139 |
+
Step_1((_params + rounded_size),
|
| 140 |
+
(grads + rounded_size),
|
| 141 |
+
(_exp_avg + rounded_size),
|
| 142 |
+
(_exp_avg_sq + rounded_size),
|
| 143 |
+
(_param_size - rounded_size),
|
| 144 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 145 |
+
half_precision);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
int create_adam_optimizer(int optimizer_id,
|
| 149 |
+
float alpha,
|
| 150 |
+
float betta1,
|
| 151 |
+
float betta2,
|
| 152 |
+
float eps,
|
| 153 |
+
float weight_decay,
|
| 154 |
+
bool adamw_mode,
|
| 155 |
+
bool should_log)
|
| 156 |
+
{
|
| 157 |
+
auto opt =
|
| 158 |
+
std::make_shared<Adam_Optimizer>(alpha, betta1, betta2, eps, weight_decay, adamw_mode);
|
| 159 |
+
|
| 160 |
+
s_optimizers[optimizer_id] = opt;
|
| 161 |
+
|
| 162 |
+
if (should_log) {
|
| 163 |
+
std::string avx_type = "";
|
| 164 |
+
#if defined(__AVX512__)
|
| 165 |
+
avx_type = "AVX512";
|
| 166 |
+
#else
|
| 167 |
+
#if defined(__AVX256__)
|
| 168 |
+
avx_type = "AVX2";
|
| 169 |
+
#else
|
| 170 |
+
avx_type = "scalar";
|
| 171 |
+
#endif
|
| 172 |
+
#endif
|
| 173 |
+
|
| 174 |
+
printf("Adam Optimizer #%d is created with %s arithmetic capability.\n",
|
| 175 |
+
optimizer_id,
|
| 176 |
+
avx_type.c_str());
|
| 177 |
+
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
|
| 178 |
+
alpha,
|
| 179 |
+
betta1,
|
| 180 |
+
betta2,
|
| 181 |
+
weight_decay,
|
| 182 |
+
(int)adamw_mode);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
return 0;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
void Adam_Optimizer::Step_8(float* _params,
|
| 189 |
+
float* grads,
|
| 190 |
+
float* _exp_avg,
|
| 191 |
+
float* _exp_avg_sq,
|
| 192 |
+
size_t _param_size,
|
| 193 |
+
ds_half_precision_t* dev_params,
|
| 194 |
+
bool half_precision)
|
| 195 |
+
{
|
| 196 |
+
size_t rounded_size = 0;
|
| 197 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 198 |
+
Step_AVX<8>(&rounded_size,
|
| 199 |
+
_params,
|
| 200 |
+
grads,
|
| 201 |
+
_exp_avg,
|
| 202 |
+
_exp_avg_sq,
|
| 203 |
+
_param_size,
|
| 204 |
+
dev_params,
|
| 205 |
+
half_precision);
|
| 206 |
+
#endif
|
| 207 |
+
if (_param_size > rounded_size)
|
| 208 |
+
Step_4((_params + rounded_size),
|
| 209 |
+
(grads + rounded_size),
|
| 210 |
+
(_exp_avg + rounded_size),
|
| 211 |
+
(_exp_avg_sq + rounded_size),
|
| 212 |
+
(_param_size - rounded_size),
|
| 213 |
+
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
|
| 214 |
+
half_precision);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
int ds_adam_step(int optimizer_id,
|
| 218 |
+
size_t step,
|
| 219 |
+
float lr,
|
| 220 |
+
float beta1,
|
| 221 |
+
float beta2,
|
| 222 |
+
float epsilon,
|
| 223 |
+
float weight_decay,
|
| 224 |
+
bool bias_correction,
|
| 225 |
+
torch::Tensor& params,
|
| 226 |
+
torch::Tensor& grads,
|
| 227 |
+
torch::Tensor& exp_avg,
|
| 228 |
+
torch::Tensor& exp_avg_sq)
|
| 229 |
+
{
|
| 230 |
+
auto params_c = params.contiguous();
|
| 231 |
+
auto grads_c = grads.contiguous();
|
| 232 |
+
auto exp_avg_c = exp_avg.contiguous();
|
| 233 |
+
auto exp_avg_sq_c = exp_avg_sq.contiguous();
|
| 234 |
+
|
| 235 |
+
// assert(params.options().dtype() == grads.options().dtype());
|
| 236 |
+
|
| 237 |
+
float* params_ptr = (float*)params_c.data_ptr();
|
| 238 |
+
float* grads_ptr = (float*)grads_c.data_ptr();
|
| 239 |
+
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
|
| 240 |
+
float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
|
| 241 |
+
|
| 242 |
+
std::shared_ptr<Adam_Optimizer> opt =
|
| 243 |
+
std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
|
| 244 |
+
opt->IncrementStep(step, beta1, beta2);
|
| 245 |
+
opt->update_state(lr, epsilon, weight_decay, bias_correction);
|
| 246 |
+
|
| 247 |
+
opt->Step_8(params_ptr,
|
| 248 |
+
grads_ptr,
|
| 249 |
+
exp_avg_ptr,
|
| 250 |
+
exp_avg_sq_ptr,
|
| 251 |
+
params_c.numel(),
|
| 252 |
+
nullptr,
|
| 253 |
+
(params.options().dtype() == at::kHalf));
|
| 254 |
+
|
| 255 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 256 |
+
opt->SynchronizeStreams();
|
| 257 |
+
#endif
|
| 258 |
+
return 0;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
int ds_adam_step_plus_copy(int optimizer_id,
|
| 262 |
+
size_t step,
|
| 263 |
+
float lr,
|
| 264 |
+
float beta1,
|
| 265 |
+
float beta2,
|
| 266 |
+
float epsilon,
|
| 267 |
+
float weight_decay,
|
| 268 |
+
bool bias_correction,
|
| 269 |
+
torch::Tensor& params,
|
| 270 |
+
torch::Tensor& grads,
|
| 271 |
+
torch::Tensor& exp_avg,
|
| 272 |
+
torch::Tensor& exp_avg_sq,
|
| 273 |
+
torch::Tensor& device_params)
|
| 274 |
+
{
|
| 275 |
+
#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
|
| 276 |
+
auto params_c = params.contiguous();
|
| 277 |
+
auto device_params_c = device_params.contiguous();
|
| 278 |
+
auto exp_avg_c = exp_avg.contiguous();
|
| 279 |
+
auto exp_avg_sq_c = exp_avg_sq.contiguous();
|
| 280 |
+
auto grads_c = grads.contiguous();
|
| 281 |
+
|
| 282 |
+
float* params_ptr = (float*)params_c.data_ptr();
|
| 283 |
+
float* grads_ptr = (float*)grads_c.data_ptr();
|
| 284 |
+
ds_half_precision_t* device_params_ptr = (ds_half_precision_t*)device_params_c.data_ptr();
|
| 285 |
+
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
|
| 286 |
+
float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
|
| 287 |
+
|
| 288 |
+
std::shared_ptr<Adam_Optimizer> opt =
|
| 289 |
+
std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
|
| 290 |
+
opt->IncrementStep(step, beta1, beta2);
|
| 291 |
+
opt->update_state(lr, epsilon, weight_decay, bias_correction);
|
| 292 |
+
opt->Step_8(params_ptr,
|
| 293 |
+
grads_ptr,
|
| 294 |
+
exp_avg_ptr,
|
| 295 |
+
exp_avg_sq_ptr,
|
| 296 |
+
params_c.numel(),
|
| 297 |
+
device_params_ptr,
|
| 298 |
+
(params.options().dtype() == at::kHalf));
|
| 299 |
+
|
| 300 |
+
opt->SynchronizeStreams();
|
| 301 |
+
#else
|
| 302 |
+
assert(false);
|
| 303 |
+
#endif
|
| 304 |
+
return 0;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
int destroy_adam_optimizer(int optimizer_id)
|
| 308 |
+
{
|
| 309 |
+
s_optimizers.erase(optimizer_id);
|
| 310 |
+
|
| 311 |
+
return 0;
|
| 312 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
void multi_tensor_adam_cuda(int chunk_size,
|
| 9 |
+
at::Tensor noop_flag,
|
| 10 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 11 |
+
const float lr,
|
| 12 |
+
const float beta1,
|
| 13 |
+
const float beta2,
|
| 14 |
+
const float epsilon,
|
| 15 |
+
const int step,
|
| 16 |
+
const int mode,
|
| 17 |
+
const int bias_correction,
|
| 18 |
+
const float weight_decay);
|
| 19 |
+
|
| 20 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 21 |
+
{
|
| 22 |
+
m.def("multi_tensor_adam",
|
| 23 |
+
&multi_tensor_adam_cuda,
|
| 24 |
+
"Compute and apply gradient update to parameters for Adam optimizer");
|
| 25 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright NVIDIA/apex
|
| 8 |
+
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
|
| 9 |
+
*/
|
| 10 |
+
|
| 11 |
+
#include <ATen/ATen.h>
|
| 12 |
+
#include <ATen/AccumulateType.h>
|
| 13 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 14 |
+
#include <ATen/cuda/Exceptions.h>
|
| 15 |
+
// Another possibility:
|
| 16 |
+
// #include <torch/all.h>
|
| 17 |
+
|
| 18 |
+
#include <assert.h>
|
| 19 |
+
|
| 20 |
+
#include "multi_tensor_apply.cuh"
|
| 21 |
+
#include "type_shim.h"
|
| 22 |
+
|
| 23 |
+
#define BLOCK_SIZE 512
|
| 24 |
+
#define ILP 4
|
| 25 |
+
|
| 26 |
+
typedef enum : int {
|
| 27 |
+
ADAM_MODE_0 = 0, // L2 regularization mode
|
| 28 |
+
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
|
| 29 |
+
} adamMode_t;
|
| 30 |
+
|
| 31 |
+
using MATH_T = float;
|
| 32 |
+
|
| 33 |
+
template <typename T>
|
| 34 |
+
struct AdamFunctor {
|
| 35 |
+
__device__ __forceinline__ void operator()(int chunk_size,
|
| 36 |
+
volatile int* noop_gmem,
|
| 37 |
+
TensorListMetadata<4>& tl,
|
| 38 |
+
const float beta1,
|
| 39 |
+
const float beta2,
|
| 40 |
+
const float beta1_correction,
|
| 41 |
+
const float beta2_correction,
|
| 42 |
+
const float epsilon,
|
| 43 |
+
const float lr,
|
| 44 |
+
adamMode_t mode,
|
| 45 |
+
const float decay)
|
| 46 |
+
{
|
| 47 |
+
// I'd like this kernel to propagate infs/nans.
|
| 48 |
+
// if(*noop_gmem == 1)
|
| 49 |
+
// return;
|
| 50 |
+
|
| 51 |
+
int tensor_loc = tl.block_to_tensor[blockIdx.x];
|
| 52 |
+
|
| 53 |
+
// potentially use to pass in list of scalar
|
| 54 |
+
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
|
| 55 |
+
|
| 56 |
+
int chunk_idx = tl.block_to_chunk[blockIdx.x];
|
| 57 |
+
int n = tl.sizes[tensor_loc];
|
| 58 |
+
|
| 59 |
+
T* g = (T*)tl.addresses[0][tensor_loc];
|
| 60 |
+
g += chunk_idx * chunk_size;
|
| 61 |
+
|
| 62 |
+
T* p = (T*)tl.addresses[1][tensor_loc];
|
| 63 |
+
p += chunk_idx * chunk_size;
|
| 64 |
+
|
| 65 |
+
T* m = (T*)tl.addresses[2][tensor_loc];
|
| 66 |
+
m += chunk_idx * chunk_size;
|
| 67 |
+
|
| 68 |
+
T* v = (T*)tl.addresses[3][tensor_loc];
|
| 69 |
+
v += chunk_idx * chunk_size;
|
| 70 |
+
|
| 71 |
+
n -= chunk_idx * chunk_size;
|
| 72 |
+
|
| 73 |
+
// see note in multi_tensor_scale_kernel.cu
|
| 74 |
+
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) {
|
| 75 |
+
MATH_T r_g[ILP];
|
| 76 |
+
MATH_T r_p[ILP];
|
| 77 |
+
MATH_T r_m[ILP];
|
| 78 |
+
MATH_T r_v[ILP];
|
| 79 |
+
#pragma unroll
|
| 80 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 81 |
+
int i = i_start + threadIdx.x + ii * blockDim.x;
|
| 82 |
+
if (i < n && i < chunk_size) {
|
| 83 |
+
r_g[ii] = g[i];
|
| 84 |
+
r_p[ii] = p[i];
|
| 85 |
+
r_m[ii] = m[i];
|
| 86 |
+
r_v[ii] = v[i];
|
| 87 |
+
} else {
|
| 88 |
+
r_g[ii] = MATH_T(0);
|
| 89 |
+
r_p[ii] = MATH_T(0);
|
| 90 |
+
r_m[ii] = MATH_T(0);
|
| 91 |
+
r_v[ii] = MATH_T(0);
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
#pragma unroll
|
| 95 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 96 |
+
if (mode == ADAM_MODE_0) { // L2
|
| 97 |
+
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
|
| 98 |
+
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
|
| 99 |
+
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
|
| 100 |
+
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
|
| 101 |
+
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
|
| 102 |
+
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
|
| 103 |
+
MATH_T update = next_m_unbiased / denom;
|
| 104 |
+
r_p[ii] = r_p[ii] - (lr * update);
|
| 105 |
+
} else { // weight decay
|
| 106 |
+
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
|
| 107 |
+
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
|
| 108 |
+
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
|
| 109 |
+
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
|
| 110 |
+
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
|
| 111 |
+
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
|
| 112 |
+
r_p[ii] = r_p[ii] - (lr * update);
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
#pragma unroll
|
| 116 |
+
for (int ii = 0; ii < ILP; ii++) {
|
| 117 |
+
int i = i_start + threadIdx.x + ii * blockDim.x;
|
| 118 |
+
if (i < n && i < chunk_size) {
|
| 119 |
+
p[i] = r_p[ii];
|
| 120 |
+
m[i] = r_m[ii];
|
| 121 |
+
v[i] = r_v[ii];
|
| 122 |
+
}
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
void multi_tensor_adam_cuda(int chunk_size,
|
| 129 |
+
at::Tensor noop_flag,
|
| 130 |
+
std::vector<std::vector<at::Tensor>> tensor_lists,
|
| 131 |
+
const float lr,
|
| 132 |
+
const float beta1,
|
| 133 |
+
const float beta2,
|
| 134 |
+
const float epsilon,
|
| 135 |
+
const int step,
|
| 136 |
+
const int mode,
|
| 137 |
+
const int bias_correction,
|
| 138 |
+
const float weight_decay)
|
| 139 |
+
{
|
| 140 |
+
using namespace at;
|
| 141 |
+
|
| 142 |
+
// Handle bias correction mode
|
| 143 |
+
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
|
| 144 |
+
if (bias_correction == 1) {
|
| 145 |
+
bias_correction1 = 1 - std::pow(beta1, step);
|
| 146 |
+
bias_correction2 = 1 - std::pow(beta2, step);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// Assume single type across p,g,m1,m2 now
|
| 150 |
+
DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
|
| 151 |
+
0,
|
| 152 |
+
"adam",
|
| 153 |
+
multi_tensor_apply<4>(BLOCK_SIZE,
|
| 154 |
+
chunk_size,
|
| 155 |
+
noop_flag,
|
| 156 |
+
tensor_lists,
|
| 157 |
+
AdamFunctor<scalar_t_0>(),
|
| 158 |
+
beta1,
|
| 159 |
+
beta2,
|
| 160 |
+
bias_correction1,
|
| 161 |
+
bias_correction2,
|
| 162 |
+
epsilon,
|
| 163 |
+
lr,
|
| 164 |
+
(adamMode_t)mode,
|
| 165 |
+
weight_decay);)
|
| 166 |
+
|
| 167 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 168 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <deepspeed_aio_utils.h>
|
| 11 |
+
#include <stdlib.h>
|
| 12 |
+
#include <memory>
|
| 13 |
+
#include <string>
|
| 14 |
+
|
| 15 |
+
using namespace std;
|
| 16 |
+
|
| 17 |
+
void do_aio_operation_sequential(const bool read_op,
|
| 18 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 19 |
+
std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 20 |
+
deepspeed_aio_config_t* config,
|
| 21 |
+
deepspeed_aio_perf_t* perf);
|
| 22 |
+
|
| 23 |
+
void do_aio_operation_overlap(const bool read_op,
|
| 24 |
+
std::unique_ptr<aio_context>& aio_ctxt,
|
| 25 |
+
std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 26 |
+
deepspeed_aio_config_t* config,
|
| 27 |
+
deepspeed_aio_perf_t* perf);
|
| 28 |
+
|
| 29 |
+
int open_file(const char* filename, const bool read_op);
|
| 30 |
+
|
| 31 |
+
void report_file_error(const char* filename, const std::string file_op, const int error_code);
|
| 32 |
+
|
| 33 |
+
int regular_read(const char* filename, std::vector<char>& buffer);
|
| 34 |
+
|
| 35 |
+
bool validate_aio_operation(const bool read_op,
|
| 36 |
+
const char* filename,
|
| 37 |
+
void* aio_buffer,
|
| 38 |
+
const long long int num_bytes);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <cmath>
|
| 11 |
+
|
| 12 |
+
#include "deepspeed_aio_utils.h"
|
| 13 |
+
|
| 14 |
+
using namespace std;
|
| 15 |
+
|
| 16 |
+
const int c_block_size = 128 * 1024;
|
| 17 |
+
const int c_io_queue_depth = 8;
|
| 18 |
+
|
| 19 |
+
deepspeed_aio_config_t::deepspeed_aio_config_t()
|
| 20 |
+
: _block_size(c_block_size),
|
| 21 |
+
_queue_depth(c_io_queue_depth),
|
| 22 |
+
_single_submit(false),
|
| 23 |
+
_overlap_events(false),
|
| 24 |
+
_lock_memory(false)
|
| 25 |
+
{
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
deepspeed_aio_config_t::deepspeed_aio_config_t(const int block_size,
|
| 29 |
+
const int queue_depth,
|
| 30 |
+
const bool single_submit,
|
| 31 |
+
const bool overlap_events,
|
| 32 |
+
const bool lock_memory)
|
| 33 |
+
: _block_size(block_size),
|
| 34 |
+
_queue_depth(queue_depth),
|
| 35 |
+
_single_submit(single_submit),
|
| 36 |
+
_overlap_events(overlap_events),
|
| 37 |
+
_lock_memory(lock_memory)
|
| 38 |
+
{
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
void deepspeed_aio_latency_t::dump(const std::string tag)
|
| 42 |
+
{
|
| 43 |
+
std::cout << tag << _min_usec << " " << _max_usec << " " << _avg_usec << " " << std::endl;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
void deepspeed_aio_latency_t::accumulate(const struct deepspeed_aio_latency_t& other)
|
| 47 |
+
{
|
| 48 |
+
_min_usec += other._min_usec;
|
| 49 |
+
_max_usec += other._max_usec;
|
| 50 |
+
_avg_usec += other._avg_usec;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
void deepspeed_aio_latency_t::scale(const float scaler)
|
| 54 |
+
{
|
| 55 |
+
_min_usec *= scaler;
|
| 56 |
+
_max_usec *= scaler;
|
| 57 |
+
_avg_usec *= scaler;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
aio_context::aio_context(const int block_size, const int queue_depth)
|
| 61 |
+
{
|
| 62 |
+
_block_size = block_size;
|
| 63 |
+
_queue_depth = queue_depth;
|
| 64 |
+
for (auto i = 0; i < queue_depth; ++i) {
|
| 65 |
+
_iocbs.push_back((struct iocb*)calloc(1, sizeof(struct iocb)));
|
| 66 |
+
}
|
| 67 |
+
_io_events.resize(queue_depth);
|
| 68 |
+
io_queue_init(queue_depth, &_io_ctxt);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
aio_context::~aio_context()
|
| 72 |
+
{
|
| 73 |
+
for (auto& iocb : _iocbs) { free(iocb); }
|
| 74 |
+
_io_events.resize(0);
|
| 75 |
+
io_queue_release(_io_ctxt);
|
| 76 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <libaio.h>
|
| 11 |
+
#include <stdlib.h>
|
| 12 |
+
|
| 13 |
+
#include <string>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
using namespace std;
|
| 17 |
+
|
| 18 |
+
struct deepspeed_aio_latency_t {
|
| 19 |
+
double _min_usec;
|
| 20 |
+
double _max_usec;
|
| 21 |
+
double _avg_usec;
|
| 22 |
+
|
| 23 |
+
void dump(const std::string tag);
|
| 24 |
+
void accumulate(const deepspeed_aio_latency_t&);
|
| 25 |
+
void scale(const float value);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct deepspeed_aio_perf_t {
|
| 29 |
+
deepspeed_aio_latency_t _submit;
|
| 30 |
+
deepspeed_aio_latency_t _complete;
|
| 31 |
+
double _e2e_usec;
|
| 32 |
+
double _e2e_rate_GB;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
struct deepspeed_aio_config_t {
|
| 36 |
+
const int _block_size;
|
| 37 |
+
const int _queue_depth;
|
| 38 |
+
const bool _single_submit;
|
| 39 |
+
const bool _overlap_events;
|
| 40 |
+
const bool _lock_memory;
|
| 41 |
+
|
| 42 |
+
deepspeed_aio_config_t();
|
| 43 |
+
deepspeed_aio_config_t(const int block_size,
|
| 44 |
+
const int queue_depth,
|
| 45 |
+
const bool single_submit,
|
| 46 |
+
const bool overlap_events,
|
| 47 |
+
const bool lock_memory);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct aio_context {
|
| 51 |
+
io_context_t _io_ctxt;
|
| 52 |
+
std::vector<struct io_event> _io_events;
|
| 53 |
+
std::vector<struct iocb*> _iocbs;
|
| 54 |
+
int _block_size;
|
| 55 |
+
int _queue_depth;
|
| 56 |
+
|
| 57 |
+
aio_context(const int block_size, const int queue_depth);
|
| 58 |
+
~aio_context();
|
| 59 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <cmath>
|
| 11 |
+
#include <iostream>
|
| 12 |
+
|
| 13 |
+
#include "deepspeed_aio_utils.h"
|
| 14 |
+
|
| 15 |
+
using namespace std;
|
| 16 |
+
|
| 17 |
+
const int c_block_size = 128 * 1024;
|
| 18 |
+
const int c_io_queue_depth = 8;
|
| 19 |
+
|
| 20 |
+
io_xfer_ctxt::io_xfer_ctxt(const int fd,
|
| 21 |
+
const long long int file_offset,
|
| 22 |
+
const long long int num_bytes,
|
| 23 |
+
const void* buffer)
|
| 24 |
+
: _fd(fd), _base_offset(file_offset), _mem_buffer(buffer), _num_bytes(num_bytes)
|
| 25 |
+
{
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
io_prep_context::io_prep_context(const bool read_op,
|
| 29 |
+
const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 30 |
+
const size_t block_size,
|
| 31 |
+
const std::vector<struct iocb*>* iocbs)
|
| 32 |
+
: _read_op(read_op), _xfer_ctxt(xfer_ctxt), _block_size(block_size), _iocbs(iocbs)
|
| 33 |
+
{
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
void io_prep_context::prep_iocbs(const int n_iocbs,
|
| 37 |
+
const size_t num_bytes,
|
| 38 |
+
const void* start_buffer,
|
| 39 |
+
const long long int start_offset)
|
| 40 |
+
{
|
| 41 |
+
assert(static_cast<size_t>(n_iocbs) <= _iocbs->size());
|
| 42 |
+
for (auto i = 0; i < n_iocbs; ++i) {
|
| 43 |
+
const auto shift = i * _block_size;
|
| 44 |
+
const auto xfer_buffer = (char*)start_buffer + _xfer_ctxt->_base_offset + shift;
|
| 45 |
+
const auto xfer_offset = _xfer_ctxt->_base_offset + start_offset + shift;
|
| 46 |
+
auto byte_count = _block_size;
|
| 47 |
+
if ((shift + _block_size) > num_bytes) { byte_count = num_bytes - shift; }
|
| 48 |
+
|
| 49 |
+
if (_read_op) {
|
| 50 |
+
io_prep_pread(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset);
|
| 51 |
+
} else {
|
| 52 |
+
io_prep_pwrite(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset);
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
io_prep_generator::io_prep_generator(const bool read_op,
|
| 58 |
+
const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 59 |
+
const size_t block_size)
|
| 60 |
+
: _read_op(read_op),
|
| 61 |
+
_xfer_ctxt(xfer_ctxt),
|
| 62 |
+
_block_size(block_size),
|
| 63 |
+
_remaining_bytes(xfer_ctxt->_num_bytes),
|
| 64 |
+
_next_iocb_index(0)
|
| 65 |
+
{
|
| 66 |
+
_num_io_blocks =
|
| 67 |
+
static_cast<long long int>(ceil(static_cast<double>(xfer_ctxt->_num_bytes) / block_size));
|
| 68 |
+
_remaining_io_blocks = _num_io_blocks;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
int io_prep_generator::prep_iocbs(const int n_iocbs, std::vector<struct iocb*>* iocbs)
|
| 72 |
+
{
|
| 73 |
+
if ((_remaining_bytes) == 0 || (_remaining_io_blocks == 0)) {
|
| 74 |
+
assert(static_cast<long long int>(_remaining_bytes) == _remaining_io_blocks);
|
| 75 |
+
return 0;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
assert(static_cast<size_t>(n_iocbs) <= iocbs->size());
|
| 79 |
+
|
| 80 |
+
auto actual_n_iocbs = min(static_cast<long long int>(n_iocbs), _remaining_io_blocks);
|
| 81 |
+
for (auto i = 0; i < actual_n_iocbs; ++i, ++_next_iocb_index) {
|
| 82 |
+
const auto xfer_offset = _xfer_ctxt->_base_offset + (_next_iocb_index * _block_size);
|
| 83 |
+
const auto xfer_buffer = (char*)_xfer_ctxt->_mem_buffer + xfer_offset;
|
| 84 |
+
const auto num_bytes = min(static_cast<long long int>(_block_size), _remaining_bytes);
|
| 85 |
+
|
| 86 |
+
if (_read_op) {
|
| 87 |
+
io_prep_pread(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset);
|
| 88 |
+
} else {
|
| 89 |
+
io_prep_pwrite(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset);
|
| 90 |
+
}
|
| 91 |
+
_remaining_bytes -= num_bytes;
|
| 92 |
+
}
|
| 93 |
+
_remaining_io_blocks -= actual_n_iocbs;
|
| 94 |
+
|
| 95 |
+
return actual_n_iocbs;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
int get_file_size(const char* filename, long long int& size)
|
| 99 |
+
{
|
| 100 |
+
struct stat st;
|
| 101 |
+
if (stat(filename, &st) == -1) { return -1; }
|
| 102 |
+
size = st.st_size;
|
| 103 |
+
return 0;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
void* ds_page_aligned_alloc(const size_t size, const bool lock)
|
| 107 |
+
{
|
| 108 |
+
void* ptr;
|
| 109 |
+
int retval;
|
| 110 |
+
|
| 111 |
+
retval = posix_memalign(&ptr, (size_t)sysconf(_SC_PAGESIZE), size);
|
| 112 |
+
if (retval) { return nullptr; }
|
| 113 |
+
|
| 114 |
+
if (lock == false) { return ptr; }
|
| 115 |
+
|
| 116 |
+
auto mlock_ret = mlock(ptr, size);
|
| 117 |
+
if (mlock_ret != 0) {
|
| 118 |
+
auto mlock_error = errno;
|
| 119 |
+
std::cerr << "mlock failed to allocate " << size << " bytes with error no " << mlock_error
|
| 120 |
+
<< " msg " << strerror(mlock_error) << std::endl;
|
| 121 |
+
free(ptr);
|
| 122 |
+
return nullptr;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
return ptr;
|
| 126 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#pragma once
|
| 11 |
+
|
| 12 |
+
#include <assert.h>
|
| 13 |
+
#include <stdlib.h>
|
| 14 |
+
#include <string.h>
|
| 15 |
+
|
| 16 |
+
#include <fcntl.h>
|
| 17 |
+
#include <libaio.h>
|
| 18 |
+
#include <sys/mman.h>
|
| 19 |
+
#include <sys/stat.h>
|
| 20 |
+
#include <sys/types.h>
|
| 21 |
+
#include <unistd.h>
|
| 22 |
+
|
| 23 |
+
#include <deepspeed_aio_types.h>
|
| 24 |
+
#include <cstring>
|
| 25 |
+
#include <fstream>
|
| 26 |
+
#include <iostream>
|
| 27 |
+
#include <memory>
|
| 28 |
+
#include <string>
|
| 29 |
+
#include <vector>
|
| 30 |
+
|
| 31 |
+
struct io_xfer_ctxt {
|
| 32 |
+
const int _fd;
|
| 33 |
+
const long long int _base_offset;
|
| 34 |
+
const void* _mem_buffer;
|
| 35 |
+
const long long int _num_bytes;
|
| 36 |
+
|
| 37 |
+
io_xfer_ctxt(const int fd,
|
| 38 |
+
const long long int file_offset,
|
| 39 |
+
const long long int num_bytes,
|
| 40 |
+
const void* buffer);
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
struct io_prep_context {
|
| 44 |
+
const bool _read_op;
|
| 45 |
+
const std::unique_ptr<io_xfer_ctxt>& _xfer_ctxt;
|
| 46 |
+
const size_t _block_size;
|
| 47 |
+
const std::vector<struct iocb*>* _iocbs;
|
| 48 |
+
|
| 49 |
+
io_prep_context(const bool read_op,
|
| 50 |
+
const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 51 |
+
const size_t block_size,
|
| 52 |
+
const std::vector<struct iocb*>* iocbs);
|
| 53 |
+
|
| 54 |
+
void prep_iocbs(const int n_iocbs,
|
| 55 |
+
const size_t num_bytes,
|
| 56 |
+
const void* start_buffer,
|
| 57 |
+
const long long int start_offset);
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
struct io_prep_generator {
|
| 61 |
+
const bool _read_op;
|
| 62 |
+
const std::unique_ptr<io_xfer_ctxt>& _xfer_ctxt;
|
| 63 |
+
const size_t _block_size;
|
| 64 |
+
|
| 65 |
+
long long int _remaining_bytes;
|
| 66 |
+
long long int _num_io_blocks;
|
| 67 |
+
long long int _remaining_io_blocks;
|
| 68 |
+
long long int _next_iocb_index;
|
| 69 |
+
|
| 70 |
+
io_prep_generator(const bool read_op,
|
| 71 |
+
const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
|
| 72 |
+
const size_t block_size);
|
| 73 |
+
|
| 74 |
+
int prep_iocbs(const int n_iocbs, std::vector<struct iocb*>* iocbs);
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
void* ds_page_aligned_alloc(const size_t size, const bool lock = false);
|
| 78 |
+
|
| 79 |
+
int get_file_size(const char* filename, long long int& size);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "deepspeed_aio_thread.h"
|
| 11 |
+
|
| 12 |
+
#if defined(__ENABLE_CANN__)
|
| 13 |
+
#include "torch_npu/csrc/framework/utils/OpAdapter.h"
|
| 14 |
+
#include "torch_npu/csrc/framework/utils/UtilForOpAdapter.h"
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
using namespace std;
|
| 18 |
+
|
| 19 |
+
io_op_desc_t::io_op_desc_t(const bool read_op,
|
| 20 |
+
const torch::Tensor& buffer,
|
| 21 |
+
const int fd,
|
| 22 |
+
const char* filename,
|
| 23 |
+
const long long int num_bytes,
|
| 24 |
+
const bool validate)
|
| 25 |
+
: _read_op(read_op),
|
| 26 |
+
_buffer(buffer),
|
| 27 |
+
_fd(fd),
|
| 28 |
+
_filename(filename),
|
| 29 |
+
_num_bytes(num_bytes),
|
| 30 |
+
_validate(validate)
|
| 31 |
+
{
|
| 32 |
+
_cpu_buffer = (_buffer.is_cuda() || _buffer.is_xpu()
|
| 33 |
+
#if defined(__ENABLE_CANN__)
|
| 34 |
+
|| torch_npu::utils::is_npu(_buffer)
|
| 35 |
+
#endif
|
| 36 |
+
)
|
| 37 |
+
? _buffer.to(torch::kCPU).pin_memory()
|
| 38 |
+
: _buffer;
|
| 39 |
+
_contiguous_buffer = _cpu_buffer.contiguous();
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
char* io_op_desc_t::data_ptr() const { return (char*)_contiguous_buffer.data_ptr(); }
|
| 43 |
+
|
| 44 |
+
void io_op_desc_t::fini()
|
| 45 |
+
{
|
| 46 |
+
if (_read_op && _buffer.is_cuda()) { _buffer.copy_(_cpu_buffer.to(torch::kCUDA)); }
|
| 47 |
+
if (_read_op && _buffer.is_xpu()) { _buffer.copy_(_cpu_buffer.to(torch::kXPU)); }
|
| 48 |
+
#if defined(__ENABLE_CANN__)
|
| 49 |
+
if (_read_op && torch_npu::utils::is_npu(_buffer)) {
|
| 50 |
+
auto device = at::Device("npu:0");
|
| 51 |
+
_buffer.copy_(_cpu_buffer.to(device));
|
| 52 |
+
}
|
| 53 |
+
#endif
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
deepspeed_aio_thread_t::deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config)
|
| 57 |
+
: _tid(tid),
|
| 58 |
+
_aio_config(aio_config),
|
| 59 |
+
_aio_ctxt(new aio_context(aio_config._block_size, aio_config._queue_depth)),
|
| 60 |
+
_time_to_exit(false)
|
| 61 |
+
{
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
deepspeed_aio_thread_t::~deepspeed_aio_thread_t() {}
|
| 65 |
+
|
| 66 |
+
void deepspeed_aio_thread_t::run()
|
| 67 |
+
{
|
| 68 |
+
while (true) {
|
| 69 |
+
std::shared_ptr<struct io_op_desc_t> next_io_op = nullptr;
|
| 70 |
+
|
| 71 |
+
{
|
| 72 |
+
std::unique_lock<std::mutex> lock(_work_sync._mutex);
|
| 73 |
+
_work_sync._cond_var.wait(lock,
|
| 74 |
+
[this] { return (!_work_queue.empty() || _time_to_exit); });
|
| 75 |
+
if (!_work_queue.empty()) {
|
| 76 |
+
next_io_op = _work_queue.front();
|
| 77 |
+
_work_queue.pop();
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
if (next_io_op) {
|
| 82 |
+
const auto base_offset = next_io_op->_num_bytes * _tid;
|
| 83 |
+
|
| 84 |
+
std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(
|
| 85 |
+
next_io_op->_fd, base_offset, next_io_op->_num_bytes, next_io_op->data_ptr()));
|
| 86 |
+
|
| 87 |
+
if (_aio_config._overlap_events) {
|
| 88 |
+
do_aio_operation_overlap(
|
| 89 |
+
next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 90 |
+
} else {
|
| 91 |
+
do_aio_operation_sequential(
|
| 92 |
+
next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
{
|
| 96 |
+
std::lock_guard<std::mutex> lock(_complete_sync._mutex);
|
| 97 |
+
_complete_queue.push(next_io_op);
|
| 98 |
+
}
|
| 99 |
+
_complete_sync._cond_var.notify_one();
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
if (_time_to_exit) { break; }
|
| 103 |
+
}
|
| 104 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <condition_variable>
|
| 11 |
+
#include <memory>
|
| 12 |
+
#include <queue>
|
| 13 |
+
#include "deepspeed_py_aio.h"
|
| 14 |
+
|
| 15 |
+
struct io_op_desc_t {
|
| 16 |
+
const bool _read_op;
|
| 17 |
+
torch::Tensor _buffer;
|
| 18 |
+
int _fd;
|
| 19 |
+
const std::string _filename;
|
| 20 |
+
const long long int _num_bytes;
|
| 21 |
+
torch::Tensor _cpu_buffer;
|
| 22 |
+
torch::Tensor _contiguous_buffer;
|
| 23 |
+
const bool _validate;
|
| 24 |
+
|
| 25 |
+
io_op_desc_t(const bool read_op,
|
| 26 |
+
const torch::Tensor& buffer,
|
| 27 |
+
const int fd,
|
| 28 |
+
const char* filename,
|
| 29 |
+
const long long int num_bytes,
|
| 30 |
+
const bool validate);
|
| 31 |
+
|
| 32 |
+
char* data_ptr() const;
|
| 33 |
+
void fini();
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
struct thread_sync_t {
|
| 37 |
+
std::mutex _mutex;
|
| 38 |
+
std::condition_variable _cond_var;
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
struct deepspeed_aio_thread_t {
|
| 42 |
+
const int _tid;
|
| 43 |
+
deepspeed_aio_config_t& _aio_config;
|
| 44 |
+
|
| 45 |
+
std::unique_ptr<struct aio_context> _aio_ctxt;
|
| 46 |
+
std::queue<std::shared_ptr<struct io_op_desc_t>> _work_queue;
|
| 47 |
+
std::queue<std::shared_ptr<struct io_op_desc_t>> _complete_queue;
|
| 48 |
+
|
| 49 |
+
bool _time_to_exit;
|
| 50 |
+
|
| 51 |
+
struct thread_sync_t _work_sync;
|
| 52 |
+
struct thread_sync_t _complete_sync;
|
| 53 |
+
|
| 54 |
+
deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config);
|
| 55 |
+
|
| 56 |
+
~deepspeed_aio_thread_t();
|
| 57 |
+
|
| 58 |
+
void run();
|
| 59 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for managing CPU tensors occupying page-locked memory.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "deepspeed_pin_tensor.h"
|
| 11 |
+
|
| 12 |
+
using namespace std;
|
| 13 |
+
|
| 14 |
+
deepspeed_pin_tensor_t::~deepspeed_pin_tensor_t()
|
| 15 |
+
{
|
| 16 |
+
for (auto iter = _locked_tensors.begin(); iter != _locked_tensors.end(); ++iter) {
|
| 17 |
+
munlock(iter->first, iter->second);
|
| 18 |
+
}
|
| 19 |
+
_locked_tensors.clear();
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
torch::Tensor deepspeed_pin_tensor_t::alloc(const size_t num_elem, const at::ScalarType& elem_type)
|
| 23 |
+
{
|
| 24 |
+
const auto num_bytes = num_elem * elementSize(elem_type);
|
| 25 |
+
auto pinned_buffer = ds_page_aligned_alloc(num_bytes, true);
|
| 26 |
+
assert(nullptr != pinned_buffer);
|
| 27 |
+
|
| 28 |
+
_locked_tensors[pinned_buffer] = num_bytes;
|
| 29 |
+
|
| 30 |
+
auto options = torch::TensorOptions().dtype(elem_type).device(torch::kCPU);
|
| 31 |
+
|
| 32 |
+
return at::from_blob(pinned_buffer, static_cast<long int>(num_bytes), options);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
bool deepspeed_pin_tensor_t::free(torch::Tensor& locked_tensor)
|
| 36 |
+
{
|
| 37 |
+
auto addr = locked_tensor.data_ptr();
|
| 38 |
+
if (_locked_tensors.find(addr) != _locked_tensors.end()) {
|
| 39 |
+
munlock(addr, _locked_tensors[addr]);
|
| 40 |
+
_locked_tensors.erase(addr);
|
| 41 |
+
return true;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
return false;
|
| 45 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for managing CPU tensors occupying page-locked memory.
|
| 8 |
+
TODO: Implement a full-featured manager that
|
| 9 |
+
1. Avoid page-locked memory leaks
|
| 10 |
+
2. Minimize page-locked memory usage by reducing internal fragmentation
|
| 11 |
+
Functionality for managing CPU tensors occupying page-locked memory.
|
| 12 |
+
*/
|
| 13 |
+
|
| 14 |
+
#include <map>
|
| 15 |
+
#include "deepspeed_py_aio.h"
|
| 16 |
+
|
| 17 |
+
struct deepspeed_pin_tensor_t {
|
| 18 |
+
std::map<void*, size_t> _locked_tensors;
|
| 19 |
+
|
| 20 |
+
deepspeed_pin_tensor_t() = default;
|
| 21 |
+
|
| 22 |
+
~deepspeed_pin_tensor_t();
|
| 23 |
+
|
| 24 |
+
torch::Tensor alloc(const size_t num_elem, const at::ScalarType& elem_type);
|
| 25 |
+
|
| 26 |
+
bool free(torch::Tensor& locked_tensor);
|
| 27 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright 2020 The Microsoft DeepSpeed Team
|
| 8 |
+
Licensed under the MIT license.
|
| 9 |
+
|
| 10 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 11 |
+
*/
|
| 12 |
+
|
| 13 |
+
#include <assert.h>
|
| 14 |
+
#include <stdlib.h>
|
| 15 |
+
#include <string.h>
|
| 16 |
+
|
| 17 |
+
#include <fcntl.h>
|
| 18 |
+
#include <sys/mman.h>
|
| 19 |
+
#include <sys/stat.h>
|
| 20 |
+
#include <sys/types.h>
|
| 21 |
+
#include <unistd.h>
|
| 22 |
+
|
| 23 |
+
#include <cassert>
|
| 24 |
+
#include <chrono>
|
| 25 |
+
#include <cstring>
|
| 26 |
+
#include <fstream>
|
| 27 |
+
#include <iostream>
|
| 28 |
+
#include <memory>
|
| 29 |
+
#include <string>
|
| 30 |
+
#include <vector>
|
| 31 |
+
|
| 32 |
+
#include "deepspeed_py_aio.h"
|
| 33 |
+
|
| 34 |
+
using namespace std;
|
| 35 |
+
using namespace std::chrono;
|
| 36 |
+
|
| 37 |
+
#define DEBUG_DS_AIO_READ 0
|
| 38 |
+
#define DEBUG_DS_AIO_WRITE 0
|
| 39 |
+
|
| 40 |
+
static const std::string c_library_name = "deepspeed_aio";
|
| 41 |
+
|
| 42 |
+
int deepspeed_py_aio_write(const torch::Tensor& buffer,
|
| 43 |
+
const char* filename,
|
| 44 |
+
const int block_size,
|
| 45 |
+
const int queue_depth,
|
| 46 |
+
const bool single_submit,
|
| 47 |
+
const bool overlap_events,
|
| 48 |
+
const bool validate)
|
| 49 |
+
{
|
| 50 |
+
const auto start_time = std::chrono::high_resolution_clock::now();
|
| 51 |
+
deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false);
|
| 52 |
+
|
| 53 |
+
const auto fd = open_file(filename, false);
|
| 54 |
+
if (fd == -1) { return -1; }
|
| 55 |
+
|
| 56 |
+
auto write_buffer = (char*)buffer.data_ptr();
|
| 57 |
+
const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
|
| 58 |
+
std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer));
|
| 59 |
+
std::unique_ptr<aio_context> aio_ctxt(new aio_context(config._block_size, config._queue_depth));
|
| 60 |
+
|
| 61 |
+
if (config._overlap_events) {
|
| 62 |
+
do_aio_operation_overlap(false, aio_ctxt, xfer_ctxt, &config, nullptr);
|
| 63 |
+
} else {
|
| 64 |
+
do_aio_operation_sequential(false, aio_ctxt, xfer_ctxt, &config, nullptr);
|
| 65 |
+
}
|
| 66 |
+
const std::chrono::duration<double> aio_time =
|
| 67 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 68 |
+
|
| 69 |
+
close(fd);
|
| 70 |
+
|
| 71 |
+
if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); }
|
| 72 |
+
|
| 73 |
+
const std::chrono::duration<double> fn_time =
|
| 74 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 75 |
+
std::cout << "Elapsed time(usec): "
|
| 76 |
+
<< "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
|
| 77 |
+
<< std::endl;
|
| 78 |
+
return 0;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
int deepspeed_py_aio_read(torch::Tensor& buffer,
|
| 82 |
+
const char* filename,
|
| 83 |
+
const int block_size,
|
| 84 |
+
const int queue_depth,
|
| 85 |
+
const bool single_submit,
|
| 86 |
+
const bool overlap_events,
|
| 87 |
+
const bool validate)
|
| 88 |
+
{
|
| 89 |
+
const auto start_time = std::chrono::high_resolution_clock::now();
|
| 90 |
+
long long num_file_bytes;
|
| 91 |
+
if (-1 == get_file_size(filename, num_file_bytes)) {
|
| 92 |
+
const auto error_code = errno;
|
| 93 |
+
report_file_error(filename, " fstat for read", error_code);
|
| 94 |
+
return -1;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false);
|
| 98 |
+
const auto fd = open_file(filename, true);
|
| 99 |
+
if (fd == -1) { return -1; }
|
| 100 |
+
|
| 101 |
+
auto read_buffer = (char*)buffer.data_ptr();
|
| 102 |
+
assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
|
| 103 |
+
|
| 104 |
+
std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer));
|
| 105 |
+
std::unique_ptr<aio_context> aio_ctxt(new aio_context(config._block_size, config._queue_depth));
|
| 106 |
+
|
| 107 |
+
if (config._overlap_events) {
|
| 108 |
+
do_aio_operation_overlap(true, aio_ctxt, xfer_ctxt, &config, nullptr);
|
| 109 |
+
} else {
|
| 110 |
+
do_aio_operation_sequential(true, aio_ctxt, xfer_ctxt, &config, nullptr);
|
| 111 |
+
}
|
| 112 |
+
const std::chrono::duration<double> aio_time =
|
| 113 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 114 |
+
|
| 115 |
+
close(fd);
|
| 116 |
+
|
| 117 |
+
if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); }
|
| 118 |
+
|
| 119 |
+
const std::chrono::duration<double> fn_time =
|
| 120 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 121 |
+
std::cout << "Elapsed time(usec): "
|
| 122 |
+
<< "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
|
| 123 |
+
<< std::endl;
|
| 124 |
+
return 0;
|
| 125 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright 2020 The Microsoft DeepSpeed Team
|
| 8 |
+
Licensed under the MIT license.
|
| 9 |
+
|
| 10 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 11 |
+
*/
|
| 12 |
+
|
| 13 |
+
#include <deepspeed_aio_common.h>
|
| 14 |
+
#include <stdlib.h>
|
| 15 |
+
#include <torch/extension.h>
|
| 16 |
+
|
| 17 |
+
int deepspeed_py_aio_write(const torch::Tensor& buffer,
|
| 18 |
+
const char* filename,
|
| 19 |
+
const int block_size,
|
| 20 |
+
const int queue_depth,
|
| 21 |
+
const bool single_submit,
|
| 22 |
+
const bool overlap_events,
|
| 23 |
+
const bool validate);
|
| 24 |
+
|
| 25 |
+
int deepspeed_py_aio_read(torch::Tensor& buffer,
|
| 26 |
+
const char* filename,
|
| 27 |
+
const int block_size,
|
| 28 |
+
const int queue_depth,
|
| 29 |
+
const bool single_submit,
|
| 30 |
+
const bool overlap_events,
|
| 31 |
+
const bool validate);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright 2020 The Microsoft DeepSpeed Team
|
| 8 |
+
Licensed under the MIT license.
|
| 9 |
+
|
| 10 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 11 |
+
*/
|
| 12 |
+
|
| 13 |
+
#include "deepspeed_py_aio_handle.h"
|
| 14 |
+
|
| 15 |
+
using namespace std;
|
| 16 |
+
|
| 17 |
+
static void _start_aio_thread(std::shared_ptr<struct deepspeed_aio_thread_t> ctxt) { ctxt->run(); }
|
| 18 |
+
|
| 19 |
+
deepspeed_aio_handle_t::deepspeed_aio_handle_t(const int block_size,
|
| 20 |
+
const int queue_depth,
|
| 21 |
+
const bool single_submit,
|
| 22 |
+
const bool overlap_events,
|
| 23 |
+
const int num_threads)
|
| 24 |
+
: _aio_ctxt(new aio_context(block_size, queue_depth)),
|
| 25 |
+
_single_submit(single_submit),
|
| 26 |
+
_overlap_events(overlap_events),
|
| 27 |
+
_num_threads(num_threads),
|
| 28 |
+
_aio_config(block_size, queue_depth, single_submit, overlap_events, false),
|
| 29 |
+
_num_pending_ops(0),
|
| 30 |
+
_pinned_tensor_mgr(new deepspeed_pin_tensor_t())
|
| 31 |
+
{
|
| 32 |
+
for (auto i = 0; i < num_threads; ++i) {
|
| 33 |
+
_thread_contexts.push_back(std::make_shared<deepspeed_aio_thread_t>(i, _aio_config));
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
for (auto& ctxt : _thread_contexts) {
|
| 37 |
+
_threads.push_back(std::thread(_start_aio_thread, ctxt));
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
deepspeed_aio_handle_t::~deepspeed_aio_handle_t()
|
| 42 |
+
{
|
| 43 |
+
_stop_threads();
|
| 44 |
+
for (auto& thr : _threads) { thr.join(); }
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
const int deepspeed_aio_handle_t::get_block_size() const
|
| 48 |
+
{
|
| 49 |
+
return _aio_ctxt ? _aio_ctxt->_block_size : -1;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
const int deepspeed_aio_handle_t::get_queue_depth() const
|
| 53 |
+
{
|
| 54 |
+
return _aio_ctxt ? _aio_ctxt->_queue_depth : -1;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
const bool deepspeed_aio_handle_t::get_single_submit() const { return _single_submit; }
|
| 58 |
+
|
| 59 |
+
const bool deepspeed_aio_handle_t::get_overlap_events() const { return _overlap_events; }
|
| 60 |
+
|
| 61 |
+
const int deepspeed_aio_handle_t::get_thread_count() const { return _num_threads; }
|
| 62 |
+
|
| 63 |
+
int deepspeed_aio_handle_t::read(torch::Tensor& buffer, const char* filename, const bool validate)
|
| 64 |
+
{
|
| 65 |
+
const auto start_time = std::chrono::high_resolution_clock::now();
|
| 66 |
+
|
| 67 |
+
assert(_aio_ctxt);
|
| 68 |
+
|
| 69 |
+
long long num_file_bytes;
|
| 70 |
+
if (-1 == get_file_size(filename, num_file_bytes)) {
|
| 71 |
+
const auto error_code = errno;
|
| 72 |
+
report_file_error(filename, " fstat for read", error_code);
|
| 73 |
+
return -1;
|
| 74 |
+
}
|
| 75 |
+
assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
|
| 76 |
+
|
| 77 |
+
const auto fd = open_file(filename, true);
|
| 78 |
+
if (fd == -1) { return -1; }
|
| 79 |
+
|
| 80 |
+
auto read_buffer = (char*)buffer.data_ptr();
|
| 81 |
+
std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer));
|
| 82 |
+
|
| 83 |
+
if (_aio_config._overlap_events) {
|
| 84 |
+
do_aio_operation_overlap(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 85 |
+
} else {
|
| 86 |
+
do_aio_operation_sequential(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
close(fd);
|
| 90 |
+
const std::chrono::duration<double> aio_time =
|
| 91 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 92 |
+
|
| 93 |
+
if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); }
|
| 94 |
+
const std::chrono::duration<double> fn_time =
|
| 95 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 96 |
+
std::cout << "Elapsed time(usec): "
|
| 97 |
+
<< "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
|
| 98 |
+
<< std::endl;
|
| 99 |
+
return 0;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
int deepspeed_aio_handle_t::write(const torch::Tensor& buffer,
|
| 103 |
+
const char* filename,
|
| 104 |
+
const bool validate)
|
| 105 |
+
{
|
| 106 |
+
assert(_aio_ctxt);
|
| 107 |
+
|
| 108 |
+
const auto start_time = std::chrono::high_resolution_clock::now();
|
| 109 |
+
|
| 110 |
+
const auto fd = open_file(filename, false);
|
| 111 |
+
if (fd == -1) { return -1; }
|
| 112 |
+
|
| 113 |
+
auto write_buffer = (char*)buffer.data_ptr();
|
| 114 |
+
const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
|
| 115 |
+
std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer));
|
| 116 |
+
|
| 117 |
+
if (_aio_config._overlap_events) {
|
| 118 |
+
do_aio_operation_overlap(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 119 |
+
} else {
|
| 120 |
+
do_aio_operation_sequential(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
|
| 121 |
+
}
|
| 122 |
+
const std::chrono::duration<double> aio_time =
|
| 123 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 124 |
+
|
| 125 |
+
close(fd);
|
| 126 |
+
|
| 127 |
+
if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); }
|
| 128 |
+
|
| 129 |
+
const std::chrono::duration<double> fn_time =
|
| 130 |
+
std::chrono::high_resolution_clock::now() - start_time;
|
| 131 |
+
std::cout << "Elapsed time(usec): "
|
| 132 |
+
<< "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
|
| 133 |
+
<< std::endl;
|
| 134 |
+
return 0;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
void deepspeed_aio_handle_t::_schedule_aio_work(std::shared_ptr<struct io_op_desc_t> scheduled_op)
|
| 138 |
+
{
|
| 139 |
+
for (auto& ctxt : _thread_contexts) {
|
| 140 |
+
{
|
| 141 |
+
std::lock_guard<std::mutex> lock(ctxt->_work_sync._mutex);
|
| 142 |
+
ctxt->_work_queue.push(scheduled_op);
|
| 143 |
+
}
|
| 144 |
+
ctxt->_work_sync._cond_var.notify_one();
|
| 145 |
+
}
|
| 146 |
+
_num_pending_ops++;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
std::shared_ptr<struct io_op_desc_t> deepspeed_aio_handle_t::_wait_for_aio_work()
|
| 150 |
+
{
|
| 151 |
+
std::shared_ptr<struct io_op_desc_t> completed_op = nullptr;
|
| 152 |
+
for (auto& ctxt : _thread_contexts) {
|
| 153 |
+
std::unique_lock<std::mutex> lock(ctxt->_complete_sync._mutex);
|
| 154 |
+
ctxt->_complete_sync._cond_var.wait(lock,
|
| 155 |
+
[ctxt] { return !ctxt->_complete_queue.empty(); });
|
| 156 |
+
completed_op = ctxt->_complete_queue.front();
|
| 157 |
+
ctxt->_complete_queue.pop();
|
| 158 |
+
}
|
| 159 |
+
return completed_op;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
void deepspeed_aio_handle_t::_stop_threads()
|
| 163 |
+
{
|
| 164 |
+
assert(0 == _num_pending_ops);
|
| 165 |
+
for (auto& ctxt : _thread_contexts) {
|
| 166 |
+
{
|
| 167 |
+
std::lock_guard<std::mutex> lock(ctxt->_work_sync._mutex);
|
| 168 |
+
ctxt->_time_to_exit = true;
|
| 169 |
+
}
|
| 170 |
+
ctxt->_work_sync._cond_var.notify_one();
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
int deepspeed_aio_handle_t::wait()
|
| 175 |
+
{
|
| 176 |
+
assert(_num_pending_ops > 0);
|
| 177 |
+
auto num_completed_ops = 0;
|
| 178 |
+
|
| 179 |
+
while (_num_pending_ops > 0) {
|
| 180 |
+
auto completed_op = _wait_for_aio_work();
|
| 181 |
+
|
| 182 |
+
completed_op->fini();
|
| 183 |
+
|
| 184 |
+
close(completed_op->_fd);
|
| 185 |
+
|
| 186 |
+
if (completed_op->_validate) {
|
| 187 |
+
validate_aio_operation(completed_op->_read_op,
|
| 188 |
+
completed_op->_filename.c_str(),
|
| 189 |
+
completed_op->data_ptr(),
|
| 190 |
+
_num_threads * completed_op->_num_bytes);
|
| 191 |
+
}
|
| 192 |
+
--_num_pending_ops;
|
| 193 |
+
++num_completed_ops;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
return num_completed_ops;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
bool deepspeed_aio_handle_t::_is_valid_parallel_aio_op(const bool read_op,
|
| 200 |
+
const long long int num_bytes)
|
| 201 |
+
{
|
| 202 |
+
const auto op_string = read_op ? "Read" : "Write";
|
| 203 |
+
if (num_bytes % get_thread_count()) {
|
| 204 |
+
std::cout << "deepspeed_aio failure: parallel " << op_string << " num_bytes = " << num_bytes
|
| 205 |
+
<< " not divisible by thread count = " << get_thread_count() << std::endl;
|
| 206 |
+
return false;
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
return true;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
int deepspeed_aio_handle_t::pread(const torch::Tensor& buffer,
|
| 213 |
+
const char* filename,
|
| 214 |
+
const bool validate,
|
| 215 |
+
const bool async)
|
| 216 |
+
{
|
| 217 |
+
long long num_file_bytes;
|
| 218 |
+
if (-1 == get_file_size(filename, num_file_bytes)) {
|
| 219 |
+
const auto error_code = errno;
|
| 220 |
+
report_file_error(filename, " fstat for read", error_code);
|
| 221 |
+
return -1;
|
| 222 |
+
}
|
| 223 |
+
const auto buffer_bytes = static_cast<long long int>(buffer.nbytes());
|
| 224 |
+
if (buffer_bytes != num_file_bytes) {
|
| 225 |
+
std::cout << filename << ": buffer nbytes != file bytes " << buffer_bytes
|
| 226 |
+
<< " != " << num_file_bytes << std::endl;
|
| 227 |
+
}
|
| 228 |
+
assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
|
| 229 |
+
assert((num_file_bytes % _num_threads) == 0);
|
| 230 |
+
|
| 231 |
+
if (!_is_valid_parallel_aio_op(true, num_file_bytes)) { return -1; }
|
| 232 |
+
|
| 233 |
+
const auto fd = open_file(filename, true);
|
| 234 |
+
if (fd == -1) { return -1; }
|
| 235 |
+
|
| 236 |
+
auto scheduled_op = std::make_shared<io_op_desc_t>(
|
| 237 |
+
true, buffer, fd, filename, (num_file_bytes / _num_threads), validate);
|
| 238 |
+
|
| 239 |
+
_schedule_aio_work(scheduled_op);
|
| 240 |
+
|
| 241 |
+
if (async) { return 0; }
|
| 242 |
+
|
| 243 |
+
return wait();
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
int deepspeed_aio_handle_t::pwrite(const torch::Tensor& buffer,
|
| 247 |
+
const char* filename,
|
| 248 |
+
const bool validate,
|
| 249 |
+
const bool async)
|
| 250 |
+
{
|
| 251 |
+
const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
|
| 252 |
+
assert((num_write_bytes % _num_threads) == 0);
|
| 253 |
+
|
| 254 |
+
if (!_is_valid_parallel_aio_op(false, num_write_bytes)) { return -1; }
|
| 255 |
+
|
| 256 |
+
const auto fd = open_file(filename, false);
|
| 257 |
+
if (fd == -1) { return -1; }
|
| 258 |
+
|
| 259 |
+
auto scheduled_op = std::make_shared<io_op_desc_t>(
|
| 260 |
+
false, buffer, fd, filename, (num_write_bytes / _num_threads), validate);
|
| 261 |
+
|
| 262 |
+
_schedule_aio_work(scheduled_op);
|
| 263 |
+
|
| 264 |
+
if (async) { return 0; }
|
| 265 |
+
|
| 266 |
+
return wait();
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
int deepspeed_aio_handle_t::sync_pread(torch::Tensor& buffer, const char* filename)
|
| 270 |
+
{
|
| 271 |
+
return pread(buffer, filename, false, false);
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
int deepspeed_aio_handle_t::sync_pwrite(const torch::Tensor& buffer, const char* filename)
|
| 275 |
+
{
|
| 276 |
+
return pwrite(buffer, filename, false, false);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
int deepspeed_aio_handle_t::async_pread(torch::Tensor& buffer, const char* filename)
|
| 280 |
+
{
|
| 281 |
+
return pread(buffer, filename, false, true);
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
int deepspeed_aio_handle_t::async_pwrite(const torch::Tensor& buffer, const char* filename)
|
| 285 |
+
{
|
| 286 |
+
return pwrite(buffer, filename, false, true);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
at::Tensor deepspeed_aio_handle_t::new_cpu_locked_tensor(const size_t num_elem,
|
| 290 |
+
const torch::Tensor& example_tensor)
|
| 291 |
+
{
|
| 292 |
+
return _pinned_tensor_mgr->alloc(num_elem, example_tensor.scalar_type());
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
bool deepspeed_aio_handle_t::free_cpu_locked_tensor(torch::Tensor& locked_tensor)
|
| 296 |
+
{
|
| 297 |
+
return _pinned_tensor_mgr->free(locked_tensor);
|
| 298 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <condition_variable>
|
| 11 |
+
#include <memory>
|
| 12 |
+
#include "deepspeed_aio_thread.h"
|
| 13 |
+
#include "deepspeed_pin_tensor.h"
|
| 14 |
+
|
| 15 |
+
struct deepspeed_aio_handle_t {
|
| 16 |
+
std::unique_ptr<struct aio_context> _aio_ctxt;
|
| 17 |
+
const bool _single_submit;
|
| 18 |
+
const bool _overlap_events;
|
| 19 |
+
const int _num_threads;
|
| 20 |
+
deepspeed_aio_config_t _aio_config;
|
| 21 |
+
|
| 22 |
+
std::vector<std::shared_ptr<struct deepspeed_aio_thread_t>> _thread_contexts;
|
| 23 |
+
std::vector<std::thread> _threads;
|
| 24 |
+
int _num_pending_ops;
|
| 25 |
+
std::unique_ptr<struct deepspeed_pin_tensor_t> _pinned_tensor_mgr;
|
| 26 |
+
|
| 27 |
+
deepspeed_aio_handle_t(const int block_size,
|
| 28 |
+
const int queue_depth,
|
| 29 |
+
const bool single_submit,
|
| 30 |
+
const bool overlap_events,
|
| 31 |
+
const int num_threads);
|
| 32 |
+
|
| 33 |
+
~deepspeed_aio_handle_t();
|
| 34 |
+
|
| 35 |
+
const int get_block_size() const;
|
| 36 |
+
const int get_queue_depth() const;
|
| 37 |
+
const bool get_single_submit() const;
|
| 38 |
+
const bool get_overlap_events() const;
|
| 39 |
+
const int get_thread_count() const;
|
| 40 |
+
|
| 41 |
+
int read(torch::Tensor& buffer, const char* filename, const bool validate);
|
| 42 |
+
|
| 43 |
+
int write(const torch::Tensor& buffer, const char* filename, const bool validate);
|
| 44 |
+
|
| 45 |
+
int pread(const torch::Tensor& buffer,
|
| 46 |
+
const char* filename,
|
| 47 |
+
const bool validate,
|
| 48 |
+
const bool async);
|
| 49 |
+
|
| 50 |
+
int pwrite(const torch::Tensor& buffer,
|
| 51 |
+
const char* filename,
|
| 52 |
+
const bool validate,
|
| 53 |
+
const bool async);
|
| 54 |
+
|
| 55 |
+
int sync_pread(torch::Tensor& buffer, const char* filename);
|
| 56 |
+
|
| 57 |
+
int sync_pwrite(const torch::Tensor& buffer, const char* filename);
|
| 58 |
+
|
| 59 |
+
int async_pread(torch::Tensor& buffer, const char* filename);
|
| 60 |
+
|
| 61 |
+
int async_pwrite(const torch::Tensor& buffer, const char* filename);
|
| 62 |
+
|
| 63 |
+
// TODO: Make API's args to be shape and dtype.
|
| 64 |
+
torch::Tensor new_cpu_locked_tensor(const size_t num_elem, const torch::Tensor& example_tensor);
|
| 65 |
+
|
| 66 |
+
bool free_cpu_locked_tensor(torch::Tensor&);
|
| 67 |
+
|
| 68 |
+
int wait();
|
| 69 |
+
|
| 70 |
+
void _stop_threads();
|
| 71 |
+
|
| 72 |
+
void _schedule_aio_work(std::shared_ptr<struct io_op_desc_t> scheduled_op);
|
| 73 |
+
|
| 74 |
+
std::shared_ptr<struct io_op_desc_t> _wait_for_aio_work();
|
| 75 |
+
|
| 76 |
+
bool _is_valid_parallel_aio_op(const bool read_op, const long long int num_bytes);
|
| 77 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include "deepspeed_py_copy.h"
|
| 11 |
+
#include <omp.h>
|
| 12 |
+
|
| 13 |
+
#define ROUND_DOWN(size, step) ((size) & ~((step)-1))
|
| 14 |
+
|
| 15 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 16 |
+
union AVX_Data {
|
| 17 |
+
#if defined(__AVX512__)
|
| 18 |
+
__m512 data;
|
| 19 |
+
#else
|
| 20 |
+
__m256 data;
|
| 21 |
+
#endif
|
| 22 |
+
};
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
static void helper_memcpy_1(float* dest, float* src, size_t param_size)
|
| 26 |
+
{
|
| 27 |
+
size_t rounded_size = 0;
|
| 28 |
+
|
| 29 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 30 |
+
|
| 31 |
+
rounded_size = ROUND_DOWN(param_size, SIMD_WIDTH);
|
| 32 |
+
|
| 33 |
+
for (size_t t = 0; t < rounded_size; t += TILE) {
|
| 34 |
+
size_t copy_size = TILE;
|
| 35 |
+
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
|
| 36 |
+
size_t offset = copy_size + t;
|
| 37 |
+
#pragma omp parallel for
|
| 38 |
+
for (size_t i = t; i < offset; i += SIMD_WIDTH) {
|
| 39 |
+
AVX_Data src_4;
|
| 40 |
+
src_4.data = SIMD_LOAD(src + i);
|
| 41 |
+
|
| 42 |
+
SIMD_STORE(dest + i, src_4.data);
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
#endif
|
| 47 |
+
|
| 48 |
+
if (param_size > rounded_size) {
|
| 49 |
+
#pragma omp parallel for
|
| 50 |
+
for (size_t k = rounded_size; k < param_size; k++) { dest[k] = src[k]; }
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
static void helper_memcpy_4(float* dest, float* src, size_t param_size)
|
| 55 |
+
{
|
| 56 |
+
size_t rounded_size = 0;
|
| 57 |
+
|
| 58 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 59 |
+
|
| 60 |
+
rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2));
|
| 61 |
+
|
| 62 |
+
for (size_t t = 0; t < rounded_size; t += TILE) {
|
| 63 |
+
size_t copy_size = TILE;
|
| 64 |
+
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
|
| 65 |
+
size_t offset = copy_size + t;
|
| 66 |
+
#pragma omp parallel for
|
| 67 |
+
for (size_t i = t; i < offset; i += (SIMD_WIDTH << 2)) {
|
| 68 |
+
AVX_Data src_4[4];
|
| 69 |
+
src_4[0].data = SIMD_LOAD(src + i);
|
| 70 |
+
src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH);
|
| 71 |
+
src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1));
|
| 72 |
+
src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3);
|
| 73 |
+
|
| 74 |
+
SIMD_STORE(dest + i, src_4[0].data);
|
| 75 |
+
SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data);
|
| 76 |
+
SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data);
|
| 77 |
+
SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data);
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
#endif
|
| 81 |
+
if (param_size > rounded_size)
|
| 82 |
+
helper_memcpy_1((dest + rounded_size), (src + rounded_size), (param_size - rounded_size));
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
static void helper_mempcy_8(float* dest, float* src, size_t param_size)
|
| 86 |
+
{
|
| 87 |
+
size_t rounded_size = 0;
|
| 88 |
+
|
| 89 |
+
#if defined(__AVX512__) or defined(__AVX256__)
|
| 90 |
+
|
| 91 |
+
rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2));
|
| 92 |
+
|
| 93 |
+
for (size_t t = 0; t < rounded_size; t += TILE) {
|
| 94 |
+
size_t copy_size = TILE;
|
| 95 |
+
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
|
| 96 |
+
size_t offset = copy_size + t;
|
| 97 |
+
#pragma omp parallel for
|
| 98 |
+
for (size_t i = t; i < offset; i += (SIMD_WIDTH << 3)) {
|
| 99 |
+
AVX_Data src_4[8];
|
| 100 |
+
src_4[0].data = SIMD_LOAD(src + i);
|
| 101 |
+
src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH);
|
| 102 |
+
src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1));
|
| 103 |
+
src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3);
|
| 104 |
+
src_4[4].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 2));
|
| 105 |
+
src_4[5].data = SIMD_LOAD(src + i + SIMD_WIDTH * 5);
|
| 106 |
+
src_4[6].data = SIMD_LOAD(src + i + SIMD_WIDTH * 6);
|
| 107 |
+
src_4[7].data = SIMD_LOAD(src + i + SIMD_WIDTH * 7);
|
| 108 |
+
|
| 109 |
+
SIMD_STORE(dest + i, src_4[0].data);
|
| 110 |
+
SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data);
|
| 111 |
+
SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data);
|
| 112 |
+
SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data);
|
| 113 |
+
SIMD_STORE(dest + i + (SIMD_WIDTH << 2), src_4[4].data);
|
| 114 |
+
SIMD_STORE(dest + i + SIMD_WIDTH * 5, src_4[5].data);
|
| 115 |
+
SIMD_STORE(dest + i + SIMD_WIDTH * 6, src_4[6].data);
|
| 116 |
+
SIMD_STORE(dest + i + SIMD_WIDTH * 7, src_4[7].data);
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
#endif
|
| 120 |
+
if (param_size > rounded_size)
|
| 121 |
+
helper_memcpy_4((dest + rounded_size), (src + rounded_size), (param_size - rounded_size));
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src)
|
| 125 |
+
{
|
| 126 |
+
auto dest_c = dest.contiguous();
|
| 127 |
+
auto src_c = src.contiguous();
|
| 128 |
+
|
| 129 |
+
float* dest_ptr = (float*)dest_c.data_ptr();
|
| 130 |
+
float* src_ptr = (float*)src_c.data_ptr();
|
| 131 |
+
|
| 132 |
+
helper_mempcy_8(dest_ptr, src_ptr, dest_c.size(0));
|
| 133 |
+
|
| 134 |
+
return 0;
|
| 135 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Copyright 2020 The Microsoft DeepSpeed Team
|
| 8 |
+
Licensed under the MIT license.
|
| 9 |
+
|
| 10 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 11 |
+
*/
|
| 12 |
+
|
| 13 |
+
#if (__x86_64__ || __i386__)
|
| 14 |
+
#include <cpuid.h>
|
| 15 |
+
#include <x86intrin.h>
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
#include <deepspeed_aio_common.h>
|
| 19 |
+
#include <stdlib.h>
|
| 20 |
+
#include <torch/extension.h>
|
| 21 |
+
|
| 22 |
+
#define TILE (1024 * 1024 * 1024)
|
| 23 |
+
|
| 24 |
+
#if defined(__AVX512__)
|
| 25 |
+
#define SIMD_STORE(a, d) _mm512_storeu_ps(a, d)
|
| 26 |
+
#define SIMD_LOAD(x) _mm512_loadu_ps(x)
|
| 27 |
+
#define SIMD_SET(x) _mm512_set1_ps(x)
|
| 28 |
+
#define SIMD_MUL(x, y) _mm512_mul_ps(x, y)
|
| 29 |
+
#define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c)
|
| 30 |
+
#define SIMD_SQRT(x) _mm512_sqrt_ps(x)
|
| 31 |
+
#define SIMD_DIV(x, y) _mm512_div_ps(x, y)
|
| 32 |
+
#define SIMD_WIDTH 16
|
| 33 |
+
#else
|
| 34 |
+
#if defined(__AVX256__)
|
| 35 |
+
#define SIMD_STORE(a, d) _mm256_storeu_ps(a, d)
|
| 36 |
+
#define SIMD_LOAD(x) _mm256_loadu_ps(x)
|
| 37 |
+
#define SIMD_SET(x) _mm256_set1_ps(x)
|
| 38 |
+
#define SIMD_MUL(x, y) _mm256_mul_ps(x, y)
|
| 39 |
+
#define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c)
|
| 40 |
+
#define SIMD_SQRT(x) _mm256_sqrt_ps(x)
|
| 41 |
+
#define SIMD_DIV(x, y) _mm256_div_ps(x, y)
|
| 42 |
+
#define SIMD_WIDTH 8
|
| 43 |
+
#endif
|
| 44 |
+
#endif
|
| 45 |
+
|
| 46 |
+
int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
/*
|
| 7 |
+
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
|
| 8 |
+
*/
|
| 9 |
+
|
| 10 |
+
#include <torch/extension.h>
|
| 11 |
+
#include "deepspeed_py_aio_handle.h"
|
| 12 |
+
#include "deepspeed_py_copy.h"
|
| 13 |
+
|
| 14 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 15 |
+
{
|
| 16 |
+
m.def("aio_read", &deepspeed_py_aio_read, "DeepSpeed Asynchronous I/O Read");
|
| 17 |
+
|
| 18 |
+
m.def("aio_write", &deepspeed_py_aio_write, "DeepSpeed Asynchronous I/O Write");
|
| 19 |
+
|
| 20 |
+
m.def("deepspeed_memcpy", &deepspeed_py_memcpy, "DeepSpeed Memory Copy");
|
| 21 |
+
|
| 22 |
+
py::class_<deepspeed_aio_handle_t>(m, "aio_handle")
|
| 23 |
+
.def(py::init<const int, const int, const bool, const bool, const int>())
|
| 24 |
+
|
| 25 |
+
.def("get_block_size", &deepspeed_aio_handle_t::get_block_size)
|
| 26 |
+
.def("get_queue_depth", &deepspeed_aio_handle_t::get_queue_depth)
|
| 27 |
+
.def("get_single_submit", &deepspeed_aio_handle_t::get_single_submit)
|
| 28 |
+
.def("get_overlap_events", &deepspeed_aio_handle_t::get_overlap_events)
|
| 29 |
+
.def("get_thread_count", &deepspeed_aio_handle_t::get_thread_count)
|
| 30 |
+
|
| 31 |
+
.def("read", &deepspeed_aio_handle_t::read)
|
| 32 |
+
.def("write", &deepspeed_aio_handle_t::write)
|
| 33 |
+
|
| 34 |
+
.def("pread", &deepspeed_aio_handle_t::pread)
|
| 35 |
+
.def("pwrite", &deepspeed_aio_handle_t::pwrite)
|
| 36 |
+
|
| 37 |
+
.def("sync_pread", &deepspeed_aio_handle_t::sync_pread)
|
| 38 |
+
.def("sync_pwrite", &deepspeed_aio_handle_t::sync_pwrite)
|
| 39 |
+
.def("async_pread", &deepspeed_aio_handle_t::async_pread)
|
| 40 |
+
.def("async_pwrite", &deepspeed_aio_handle_t::async_pwrite)
|
| 41 |
+
|
| 42 |
+
.def("new_cpu_locked_tensor", &deepspeed_aio_handle_t::new_cpu_locked_tensor)
|
| 43 |
+
.def("free_cpu_locked_tensor", &deepspeed_aio_handle_t::free_cpu_locked_tensor)
|
| 44 |
+
|
| 45 |
+
.def("wait", &deepspeed_aio_handle_t::wait);
|
| 46 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"block_size": [
|
| 3 |
+
"128K",
|
| 4 |
+
"256K",
|
| 5 |
+
"1M"
|
| 6 |
+
],
|
| 7 |
+
"queue_depth": [
|
| 8 |
+
4,
|
| 9 |
+
16,
|
| 10 |
+
32
|
| 11 |
+
],
|
| 12 |
+
"io_parallel": [
|
| 13 |
+
1,
|
| 14 |
+
2,
|
| 15 |
+
4,
|
| 16 |
+
8
|
| 17 |
+
],
|
| 18 |
+
"single_submit": [
|
| 19 |
+
true,
|
| 20 |
+
false
|
| 21 |
+
],
|
| 22 |
+
"overlap_events": [
|
| 23 |
+
true,
|
| 24 |
+
false
|
| 25 |
+
],
|
| 26 |
+
"threads": [
|
| 27 |
+
1
|
| 28 |
+
]
|
| 29 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/common/custom_cuda_kernel.cu
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include "custom_cuda_layers.h"
|
| 7 |
+
|
| 8 |
+
__global__ void param_update_kernel(const float* input, __half* output, int size)
|
| 9 |
+
{
|
| 10 |
+
int id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 11 |
+
|
| 12 |
+
if (id < size) { output[id] = (__half)input[id]; }
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream)
|
| 16 |
+
{
|
| 17 |
+
int threads = 1024;
|
| 18 |
+
|
| 19 |
+
dim3 grid_dim((size - 1) / threads + 1);
|
| 20 |
+
dim3 block_dim(threads);
|
| 21 |
+
|
| 22 |
+
param_update_kernel<<<grid_dim, block_dim, 0, stream>>>(input, output, size);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
__global__ void param_update_kernel_half(const float* input, __half* output, int size)
|
| 26 |
+
{
|
| 27 |
+
int id = blockIdx.x * blockDim.x + threadIdx.x;
|
| 28 |
+
__half2* output_cast = reinterpret_cast<__half2*>(output);
|
| 29 |
+
if (id < size) {
|
| 30 |
+
float input_f = input[id];
|
| 31 |
+
__half2* input_h = reinterpret_cast<__half2*>(&input_f);
|
| 32 |
+
output_cast[id] = *input_h;
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream)
|
| 37 |
+
{
|
| 38 |
+
int threads = 1024;
|
| 39 |
+
size /= 2;
|
| 40 |
+
dim3 grid_dim((size - 1) / threads + 1);
|
| 41 |
+
dim3 block_dim(threads);
|
| 42 |
+
|
| 43 |
+
param_update_kernel_half<<<grid_dim, block_dim, 0, stream>>>(input, output, size);
|
| 44 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention.cpp
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <torch/extension.h>
|
| 7 |
+
|
| 8 |
+
void attention_impl(torch::Tensor& q,
|
| 9 |
+
torch::Tensor& k,
|
| 10 |
+
torch::Tensor& v,
|
| 11 |
+
torch::Tensor& bias1,
|
| 12 |
+
torch::Tensor& bias2,
|
| 13 |
+
torch::Tensor& o,
|
| 14 |
+
torch::Tensor& lse);
|
| 15 |
+
void attention(torch::Tensor& q,
|
| 16 |
+
torch::Tensor& k,
|
| 17 |
+
torch::Tensor& v,
|
| 18 |
+
torch::Tensor& bias1,
|
| 19 |
+
torch::Tensor& bias2,
|
| 20 |
+
torch::Tensor& o,
|
| 21 |
+
torch::Tensor& lse)
|
| 22 |
+
{
|
| 23 |
+
attention_impl(q, k, v, bias1, bias2, o, lse);
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
void attention_back_impl(torch::Tensor& go,
|
| 27 |
+
torch::Tensor& q,
|
| 28 |
+
torch::Tensor& k,
|
| 29 |
+
torch::Tensor& v,
|
| 30 |
+
torch::Tensor& o,
|
| 31 |
+
torch::Tensor& lse,
|
| 32 |
+
torch::Tensor& delta,
|
| 33 |
+
torch::Tensor& bias1,
|
| 34 |
+
torch::Tensor& bias2,
|
| 35 |
+
torch::Tensor& gq,
|
| 36 |
+
torch::Tensor& gk,
|
| 37 |
+
torch::Tensor& gv,
|
| 38 |
+
torch::Tensor& gb1,
|
| 39 |
+
torch::Tensor& gb2);
|
| 40 |
+
void attention_bwd(torch::Tensor& go,
|
| 41 |
+
torch::Tensor& q,
|
| 42 |
+
torch::Tensor& k,
|
| 43 |
+
torch::Tensor& v,
|
| 44 |
+
torch::Tensor& o,
|
| 45 |
+
torch::Tensor& lse,
|
| 46 |
+
torch::Tensor& delta,
|
| 47 |
+
torch::Tensor& bias1,
|
| 48 |
+
torch::Tensor& bias2,
|
| 49 |
+
torch::Tensor& gq,
|
| 50 |
+
torch::Tensor& gk,
|
| 51 |
+
torch::Tensor& gv,
|
| 52 |
+
torch::Tensor& gb1,
|
| 53 |
+
torch::Tensor& gb2)
|
| 54 |
+
{
|
| 55 |
+
attention_back_impl(go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
| 59 |
+
{
|
| 60 |
+
m.def("attention", &attention, "");
|
| 61 |
+
m.def("attention_bwd", &attention_bwd, "");
|
| 62 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_back.cu
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 7 |
+
#include <torch/extension.h>
|
| 8 |
+
#include <type_traits>
|
| 9 |
+
#include "gemm_kernel_utils.h"
|
| 10 |
+
#include "kernel_backward.h"
|
| 11 |
+
#include "transform/bias_broadcast.h"
|
| 12 |
+
|
| 13 |
+
constexpr auto kBlockSizeI = 64;
|
| 14 |
+
constexpr auto kBlockSizeJ = 64;
|
| 15 |
+
|
| 16 |
+
template <typename arch,
|
| 17 |
+
typename scalar_t,
|
| 18 |
+
typename torch_scalar_t,
|
| 19 |
+
template <typename, typename, typename>
|
| 20 |
+
class Broadcast1_,
|
| 21 |
+
template <typename, typename, typename>
|
| 22 |
+
class Broadcast2_>
|
| 23 |
+
typename std::enable_if<!CheckArch<arch, scalar_t>::value>::type attention_back_impl_template(
|
| 24 |
+
torch::Tensor& go,
|
| 25 |
+
torch::Tensor& q,
|
| 26 |
+
torch::Tensor& k,
|
| 27 |
+
torch::Tensor& v,
|
| 28 |
+
torch::Tensor& o,
|
| 29 |
+
torch::Tensor& lse,
|
| 30 |
+
torch::Tensor& delta,
|
| 31 |
+
torch::Tensor& bias1,
|
| 32 |
+
torch::Tensor& bias2,
|
| 33 |
+
torch::Tensor& gq,
|
| 34 |
+
torch::Tensor& gk,
|
| 35 |
+
torch::Tensor& gv,
|
| 36 |
+
torch::Tensor& gb1,
|
| 37 |
+
torch::Tensor& gb2)
|
| 38 |
+
{
|
| 39 |
+
EVOFORMER_CHECK(false, "Unsupported GPU and data type combination")
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
template <typename arch,
|
| 43 |
+
typename scalar_t,
|
| 44 |
+
typename torch_scalar_t,
|
| 45 |
+
template <typename, typename, typename>
|
| 46 |
+
class Broadcast1_,
|
| 47 |
+
template <typename, typename, typename>
|
| 48 |
+
class Broadcast2_>
|
| 49 |
+
typename std::enable_if<CheckArch<arch, scalar_t>::value>::type attention_back_impl_template(
|
| 50 |
+
torch::Tensor& go,
|
| 51 |
+
torch::Tensor& q,
|
| 52 |
+
torch::Tensor& k,
|
| 53 |
+
torch::Tensor& v,
|
| 54 |
+
torch::Tensor& o,
|
| 55 |
+
torch::Tensor& lse,
|
| 56 |
+
torch::Tensor& delta,
|
| 57 |
+
torch::Tensor& bias1,
|
| 58 |
+
torch::Tensor& bias2,
|
| 59 |
+
torch::Tensor& gq,
|
| 60 |
+
torch::Tensor& gk,
|
| 61 |
+
torch::Tensor& gv,
|
| 62 |
+
torch::Tensor& gb1,
|
| 63 |
+
torch::Tensor& gb2)
|
| 64 |
+
{
|
| 65 |
+
constexpr bool kPreload_ = arch::kMinComputeCapability >= 80;
|
| 66 |
+
using Kernel = AttentionBackwardKernel<arch,
|
| 67 |
+
scalar_t, // scalar_t
|
| 68 |
+
true, // kIsAligned_
|
| 69 |
+
false, // kApplyDropout_
|
| 70 |
+
kPreload_, // kPreload_
|
| 71 |
+
kBlockSizeI, // kBlockSizeI_,
|
| 72 |
+
kBlockSizeJ, // kBlockSizeJ_,
|
| 73 |
+
64, // kMaxK
|
| 74 |
+
Broadcast1_,
|
| 75 |
+
Broadcast2_>;
|
| 76 |
+
int head_size = q.size(-1);
|
| 77 |
+
int head_number = q.size(-2);
|
| 78 |
+
int seq_length = q.size(-3);
|
| 79 |
+
auto q_view = q.view({-1, seq_length, head_number, head_size});
|
| 80 |
+
auto k_view = k.view({-1, seq_length, head_number, head_size});
|
| 81 |
+
auto v_view = v.view({-1, seq_length, head_number, head_size});
|
| 82 |
+
auto o_view = o.view({-1, seq_length, head_number, head_size});
|
| 83 |
+
auto do_view = go.view({-1, seq_length, head_number, head_size});
|
| 84 |
+
auto dk_view = gk.view({-1, seq_length, head_number, head_size});
|
| 85 |
+
auto dv_view = gv.view({-1, seq_length, head_number, head_size});
|
| 86 |
+
auto dq_view = gq.view({-1, seq_length, head_number, head_size});
|
| 87 |
+
auto q_ptr = reinterpret_cast<scalar_t*>(q.data_ptr<torch_scalar_t>());
|
| 88 |
+
auto k_ptr = reinterpret_cast<scalar_t*>(k.data_ptr<torch_scalar_t>());
|
| 89 |
+
auto v_ptr = reinterpret_cast<scalar_t*>(v.data_ptr<torch_scalar_t>());
|
| 90 |
+
auto o_ptr = reinterpret_cast<scalar_t*>(o.data_ptr<torch_scalar_t>());
|
| 91 |
+
auto do_ptr = reinterpret_cast<scalar_t*>(go.data_ptr<torch_scalar_t>());
|
| 92 |
+
auto dk_ptr = reinterpret_cast<scalar_t*>(gk.data_ptr<torch_scalar_t>());
|
| 93 |
+
auto dv_ptr = reinterpret_cast<scalar_t*>(gv.data_ptr<torch_scalar_t>());
|
| 94 |
+
auto dq_ptr = reinterpret_cast<scalar_t*>(gq.data_ptr<torch_scalar_t>());
|
| 95 |
+
auto db1_ptr = gb1.size(0) > 0 ? reinterpret_cast<float*>(gb1.data_ptr<float>()) : nullptr;
|
| 96 |
+
auto db2_ptr = gb2.size(0) > 0 ? reinterpret_cast<float*>(gb2.data_ptr<float>()) : nullptr;
|
| 97 |
+
auto lse_ptr = reinterpret_cast<float*>(lse.data_ptr<float>());
|
| 98 |
+
auto delta_ptr = reinterpret_cast<float*>(delta.data_ptr<float>());
|
| 99 |
+
auto bias1_ptr = reinterpret_cast<scalar_t*>(bias1.data_ptr<torch_scalar_t>());
|
| 100 |
+
auto bias2_ptr = reinterpret_cast<scalar_t*>(bias2.data_ptr<torch_scalar_t>());
|
| 101 |
+
static_assert(Kernel::kKernelComputesDelta, "Kernel must compute delta");
|
| 102 |
+
|
| 103 |
+
typename Kernel::Params p;
|
| 104 |
+
p.query_ptr = q_ptr;
|
| 105 |
+
p.key_ptr = k_ptr;
|
| 106 |
+
p.value_ptr = v_ptr;
|
| 107 |
+
p.logsumexp_ptr = lse_ptr;
|
| 108 |
+
p.output_ptr = o_ptr;
|
| 109 |
+
p.grad_output_ptr = do_ptr;
|
| 110 |
+
p.delta_ptr = delta_ptr;
|
| 111 |
+
p.grad_query_ptr = dq_ptr;
|
| 112 |
+
p.grad_key_ptr = dk_ptr;
|
| 113 |
+
p.grad_value_ptr = dv_ptr;
|
| 114 |
+
|
| 115 |
+
p.grad_bias1_ptr = db1_ptr;
|
| 116 |
+
p.grad_bias2_ptr = db2_ptr;
|
| 117 |
+
p.B = q.size(0);
|
| 118 |
+
p.N = q.size(1);
|
| 119 |
+
p.bias1_ptr = bias1.size(0) ? bias1_ptr : nullptr;
|
| 120 |
+
p.bias2_ptr = bias2.size(0) ? bias2_ptr : nullptr;
|
| 121 |
+
|
| 122 |
+
p.scale = 1.0f / sqrtf(head_size);
|
| 123 |
+
|
| 124 |
+
p.head_dim = head_size;
|
| 125 |
+
p.head_dim_value = head_size;
|
| 126 |
+
p.num_queries = seq_length;
|
| 127 |
+
p.num_keys = seq_length;
|
| 128 |
+
p.num_heads = head_number;
|
| 129 |
+
|
| 130 |
+
p.q_strideM = q_view.stride(-3);
|
| 131 |
+
p.k_strideM = k_view.stride(-3);
|
| 132 |
+
p.v_strideM = v_view.stride(-3);
|
| 133 |
+
p.gO_strideM = do_view.stride(-3);
|
| 134 |
+
p.o_strideH = o_view.stride(-2);
|
| 135 |
+
p.q_strideH = q_view.stride(-2);
|
| 136 |
+
p.k_strideH = k_view.stride(-2);
|
| 137 |
+
p.v_strideH = v_view.stride(-2);
|
| 138 |
+
p.o_strideB = o_view.stride(-4);
|
| 139 |
+
p.q_strideB = q_view.stride(-4);
|
| 140 |
+
p.k_strideB = k_view.stride(-4);
|
| 141 |
+
p.v_strideB = v_view.stride(-4);
|
| 142 |
+
p.lse_strideB = lse.stride(-3);
|
| 143 |
+
p.lse_strideH = lse.stride(-2);
|
| 144 |
+
p.delta_strideB = delta.stride(-3);
|
| 145 |
+
p.delta_strideH = delta.stride(-2);
|
| 146 |
+
p.num_batches = q_view.size(-4);
|
| 147 |
+
|
| 148 |
+
p.gO_strideB = do_view.stride(-4);
|
| 149 |
+
p.gQ_strideB = dq_view.stride(-4);
|
| 150 |
+
p.gK_strideB = dk_view.stride(-4);
|
| 151 |
+
p.gV_strideB = dv_view.stride(-4);
|
| 152 |
+
p.gO_strideH = do_view.stride(-2);
|
| 153 |
+
p.gQ_strideH = dq_view.stride(-2);
|
| 154 |
+
p.gK_strideH = dk_view.stride(-2);
|
| 155 |
+
p.gV_strideH = dv_view.stride(-2);
|
| 156 |
+
|
| 157 |
+
torch::Tensor workspace = torch::empty(p.workspace_size() / 4, lse.options());
|
| 158 |
+
p.workspace = workspace.data_ptr<float>();
|
| 159 |
+
|
| 160 |
+
auto kernel_fn = attention_kernel_backward_batched_impl<Kernel>;
|
| 161 |
+
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
|
| 162 |
+
cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, int(smem_bytes));
|
| 163 |
+
if (!Kernel::check_supported(p)) { throw std::runtime_error("Unsupported parameters"); }
|
| 164 |
+
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
#define CODE(scalar_t, torch_scalar_t) \
|
| 168 |
+
do { \
|
| 169 |
+
if (bias1.size(0) == 0 && bias2.size(0) == 0) { \
|
| 170 |
+
attention_back_impl_template<ArchTag, \
|
| 171 |
+
scalar_t, \
|
| 172 |
+
torch_scalar_t, \
|
| 173 |
+
BroadcastNoLoad, \
|
| 174 |
+
BroadcastNoLoad>( \
|
| 175 |
+
go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \
|
| 176 |
+
} else if (bias1.size(0) > 0 && bias2.size(0) > 0) { \
|
| 177 |
+
attention_back_impl_template<ArchTag, \
|
| 178 |
+
scalar_t, \
|
| 179 |
+
torch_scalar_t, \
|
| 180 |
+
BroadcastA, \
|
| 181 |
+
BroadcastB>( \
|
| 182 |
+
go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \
|
| 183 |
+
} else if (bias1.size(0) > 0) { \
|
| 184 |
+
attention_back_impl_template<ArchTag, \
|
| 185 |
+
scalar_t, \
|
| 186 |
+
torch_scalar_t, \
|
| 187 |
+
BroadcastA, \
|
| 188 |
+
BroadcastNoLoad>( \
|
| 189 |
+
go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \
|
| 190 |
+
} else { \
|
| 191 |
+
attention_back_impl_template<ArchTag, \
|
| 192 |
+
scalar_t, \
|
| 193 |
+
torch_scalar_t, \
|
| 194 |
+
BroadcastNoLoad, \
|
| 195 |
+
BroadcastB>( \
|
| 196 |
+
go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \
|
| 197 |
+
} \
|
| 198 |
+
} while (0)
|
| 199 |
+
|
| 200 |
+
void attention_back_impl(torch::Tensor& go,
|
| 201 |
+
torch::Tensor& q,
|
| 202 |
+
torch::Tensor& k,
|
| 203 |
+
torch::Tensor& v,
|
| 204 |
+
torch::Tensor& o,
|
| 205 |
+
torch::Tensor& lse,
|
| 206 |
+
torch::Tensor& delta,
|
| 207 |
+
torch::Tensor& bias1,
|
| 208 |
+
torch::Tensor& bias2,
|
| 209 |
+
torch::Tensor& gq,
|
| 210 |
+
torch::Tensor& gk,
|
| 211 |
+
torch::Tensor& gv,
|
| 212 |
+
torch::Tensor& gb1,
|
| 213 |
+
torch::Tensor& gb2)
|
| 214 |
+
{
|
| 215 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 216 |
+
DISPATCH_ARCHTAG(prop->major * 10 + prop->minor,
|
| 217 |
+
DISPATCH_TYPES(q, { CODE(scalar_t, torch_scalar_t); }));
|
| 218 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_cu.cu
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 7 |
+
#include <torch/extension.h>
|
| 8 |
+
#include "gemm_kernel_utils.h"
|
| 9 |
+
#include "kernel_forward.h"
|
| 10 |
+
#include "transform/bias_broadcast.h"
|
| 11 |
+
|
| 12 |
+
template <typename arch,
|
| 13 |
+
typename scalar_t,
|
| 14 |
+
typename torch_scalar_t,
|
| 15 |
+
template <typename, typename, typename>
|
| 16 |
+
class Broadcast1_,
|
| 17 |
+
template <typename, typename, typename>
|
| 18 |
+
class Broadcast2_>
|
| 19 |
+
typename std::enable_if<!CheckArch<arch, scalar_t>::value>::type attention_impl_template(
|
| 20 |
+
torch::Tensor& q,
|
| 21 |
+
torch::Tensor& k,
|
| 22 |
+
torch::Tensor& v,
|
| 23 |
+
torch::Tensor& bias1,
|
| 24 |
+
torch::Tensor& bias2,
|
| 25 |
+
torch::Tensor& o,
|
| 26 |
+
float* lse_ptr)
|
| 27 |
+
{
|
| 28 |
+
EVOFORMER_CHECK(false, "Unsupported GPU and data type combination")
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
template <typename arch,
|
| 32 |
+
typename scalar_t,
|
| 33 |
+
typename torch_scalar_t,
|
| 34 |
+
template <typename, typename, typename>
|
| 35 |
+
class Broadcast1_,
|
| 36 |
+
template <typename, typename, typename>
|
| 37 |
+
class Broadcast2_>
|
| 38 |
+
typename std::enable_if<CheckArch<arch, scalar_t>::value>::type attention_impl_template(
|
| 39 |
+
torch::Tensor& q,
|
| 40 |
+
torch::Tensor& k,
|
| 41 |
+
torch::Tensor& v,
|
| 42 |
+
torch::Tensor& bias1,
|
| 43 |
+
torch::Tensor& bias2,
|
| 44 |
+
torch::Tensor& o,
|
| 45 |
+
float* lse_ptr)
|
| 46 |
+
{
|
| 47 |
+
// Attention definition goes here, replaced with BroadcastType1 and
|
| 48 |
+
// BroadcastType2
|
| 49 |
+
using Attention = AttentionKernel<scalar_t, /* scalar_t */
|
| 50 |
+
arch, /* ArchTag */
|
| 51 |
+
true, /* Memory is aligned */
|
| 52 |
+
64,
|
| 53 |
+
64,
|
| 54 |
+
true,
|
| 55 |
+
true, /* Supports bias */
|
| 56 |
+
Broadcast1_,
|
| 57 |
+
Broadcast2_>;
|
| 58 |
+
|
| 59 |
+
static_assert(!Attention::kNeedsOutputAccumulatorBuffer,
|
| 60 |
+
"This test does not support output accumulator buffer");
|
| 61 |
+
int head_size = q.size(-1);
|
| 62 |
+
int head_number = q.size(-2);
|
| 63 |
+
int seq_length = q.size(-3);
|
| 64 |
+
auto q_view = q.view({-1, seq_length, head_number, head_size});
|
| 65 |
+
auto k_view = k.view({-1, seq_length, head_number, head_size});
|
| 66 |
+
auto v_view = v.view({-1, seq_length, head_number, head_size});
|
| 67 |
+
auto o_view = o.view({-1, seq_length, head_number, head_size});
|
| 68 |
+
int batch_size = q_view.size(0);
|
| 69 |
+
auto q_ptr = reinterpret_cast<scalar_t*>(q.data_ptr<torch_scalar_t>());
|
| 70 |
+
auto k_ptr = reinterpret_cast<scalar_t*>(k.data_ptr<torch_scalar_t>());
|
| 71 |
+
auto v_ptr = reinterpret_cast<scalar_t*>(v.data_ptr<torch_scalar_t>());
|
| 72 |
+
auto o_ptr = reinterpret_cast<scalar_t*>(o.data_ptr<torch_scalar_t>());
|
| 73 |
+
|
| 74 |
+
auto bias1_ptr = reinterpret_cast<scalar_t*>(bias1.data_ptr<torch_scalar_t>());
|
| 75 |
+
auto bias2_ptr = reinterpret_cast<scalar_t*>(bias2.data_ptr<torch_scalar_t>());
|
| 76 |
+
|
| 77 |
+
typename Attention::Params p;
|
| 78 |
+
{ // set parameters
|
| 79 |
+
p.query_ptr = q_ptr;
|
| 80 |
+
p.key_ptr = k_ptr;
|
| 81 |
+
p.value_ptr = v_ptr;
|
| 82 |
+
p.logsumexp_ptr = lse_ptr; // Only needed for bw
|
| 83 |
+
p.output_accum_ptr = nullptr;
|
| 84 |
+
p.output_ptr = o_ptr;
|
| 85 |
+
p.scale = 1.0f / sqrt(float(head_size));
|
| 86 |
+
|
| 87 |
+
p.bias1_ptr = bias1_ptr;
|
| 88 |
+
p.bias2_ptr = bias2_ptr;
|
| 89 |
+
p.B = q.size(0);
|
| 90 |
+
p.N = q.size(1);
|
| 91 |
+
|
| 92 |
+
p.num_heads = head_number;
|
| 93 |
+
p.num_batches = batch_size;
|
| 94 |
+
p.head_dim = head_size;
|
| 95 |
+
p.head_dim_value = head_size;
|
| 96 |
+
p.num_queries = seq_length;
|
| 97 |
+
p.num_keys = seq_length;
|
| 98 |
+
|
| 99 |
+
// All tensors are in BMHK shapes
|
| 100 |
+
p.q_strideH = q_view.stride(-2);
|
| 101 |
+
p.k_strideH = k_view.stride(-2);
|
| 102 |
+
p.v_strideH = v_view.stride(-2);
|
| 103 |
+
p.q_strideM = q_view.stride(-3);
|
| 104 |
+
p.k_strideM = k_view.stride(-3);
|
| 105 |
+
p.v_strideM = v_view.stride(-3);
|
| 106 |
+
p.o_strideM = o_view.stride(-3);
|
| 107 |
+
p.q_strideB = q_view.stride(-4);
|
| 108 |
+
p.k_strideB = k_view.stride(-4);
|
| 109 |
+
p.v_strideB = v_view.stride(-4);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
|
| 113 |
+
int smem_bytes = sizeof(typename Attention::SharedStorage);
|
| 114 |
+
if (smem_bytes > 0xc000) {
|
| 115 |
+
cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
|
| 116 |
+
}
|
| 117 |
+
if (!Attention::check_supported(p)) { throw std::runtime_error("Parameters not supported"); }
|
| 118 |
+
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
#define CODE(scalar_t, torch_scalar_t) \
|
| 122 |
+
do { \
|
| 123 |
+
if (bias1.size(0) == 0 && bias2.size(0) == 0) { \
|
| 124 |
+
attention_impl_template<ArchTag, \
|
| 125 |
+
scalar_t, \
|
| 126 |
+
torch_scalar_t, \
|
| 127 |
+
BroadcastNoLoad, \
|
| 128 |
+
BroadcastNoLoad>(q, k, v, bias1, bias2, o, lse_ptr); \
|
| 129 |
+
} else if (bias1.size(0) == 0) { \
|
| 130 |
+
attention_impl_template<ArchTag, \
|
| 131 |
+
scalar_t, \
|
| 132 |
+
torch_scalar_t, \
|
| 133 |
+
BroadcastNoLoad, \
|
| 134 |
+
BroadcastB>(q, k, v, bias1, bias2, o, lse_ptr); \
|
| 135 |
+
} else if (bias2.size(0) == 0) { \
|
| 136 |
+
attention_impl_template<ArchTag, \
|
| 137 |
+
scalar_t, \
|
| 138 |
+
torch_scalar_t, \
|
| 139 |
+
BroadcastA, \
|
| 140 |
+
BroadcastNoLoad>(q, k, v, bias1, bias2, o, lse_ptr); \
|
| 141 |
+
} else { \
|
| 142 |
+
attention_impl_template<ArchTag, scalar_t, torch_scalar_t, BroadcastA, BroadcastB>( \
|
| 143 |
+
q, k, v, bias1, bias2, o, lse_ptr); \
|
| 144 |
+
} \
|
| 145 |
+
} while (0)
|
| 146 |
+
|
| 147 |
+
// Function to select and call the correct template based on biases sizes
|
| 148 |
+
void attention_impl(torch::Tensor& q,
|
| 149 |
+
torch::Tensor& k,
|
| 150 |
+
torch::Tensor& v,
|
| 151 |
+
torch::Tensor& bias1,
|
| 152 |
+
torch::Tensor& bias2,
|
| 153 |
+
torch::Tensor& o,
|
| 154 |
+
torch::Tensor& lse)
|
| 155 |
+
{
|
| 156 |
+
auto lse_ptr = lse.size(0) == 0 ? nullptr : reinterpret_cast<float*>(lse.data_ptr<float>());
|
| 157 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 158 |
+
DISPATCH_ARCHTAG(prop->major * 10 + prop->minor,
|
| 159 |
+
DISPATCH_TYPES(q, { CODE(scalar_t, torch_scalar_t); }));
|
| 160 |
+
}
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
|
| 8 |
+
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
|
| 9 |
+
#include "../iterators/predicated_tile_iterator_atomic.h"
|
| 10 |
+
#include "cutlass/epilogue/threadblock/epilogue.h"
|
| 11 |
+
|
| 12 |
+
namespace cutlass {
|
| 13 |
+
namespace epilogue {
|
| 14 |
+
namespace threadblock {
|
| 15 |
+
template <int Rank,
|
| 16 |
+
typename Shape_,
|
| 17 |
+
typename WarpMmaTensorOp_,
|
| 18 |
+
int PartitionsK,
|
| 19 |
+
typename OutputOp_,
|
| 20 |
+
int ElementsPerAccess>
|
| 21 |
+
struct EpilogueTensorOpAffineRankN : public DefaultEpilogueTensorOpAffineRankN<Rank,
|
| 22 |
+
Shape_,
|
| 23 |
+
WarpMmaTensorOp_,
|
| 24 |
+
PartitionsK,
|
| 25 |
+
OutputOp_,
|
| 26 |
+
ElementsPerAccess> {
|
| 27 |
+
using Base = DefaultEpilogueTensorOpAffineRankN<Rank,
|
| 28 |
+
Shape_,
|
| 29 |
+
WarpMmaTensorOp_,
|
| 30 |
+
PartitionsK,
|
| 31 |
+
OutputOp_,
|
| 32 |
+
ElementsPerAccess>;
|
| 33 |
+
using OutputTileIterator =
|
| 34 |
+
cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic<
|
| 35 |
+
typename Base::OutputTileThreadMap,
|
| 36 |
+
typename Base::ElementOutput,
|
| 37 |
+
Rank>;
|
| 38 |
+
|
| 39 |
+
using Epilogue =
|
| 40 |
+
cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
|
| 41 |
+
typename Base::WarpMmaTensorOp,
|
| 42 |
+
Base::kPartitionsK,
|
| 43 |
+
OutputTileIterator,
|
| 44 |
+
typename Base::AccumulatorFragmentIterator,
|
| 45 |
+
typename Base::WarpTileIterator,
|
| 46 |
+
typename Base::SharedLoadIterator,
|
| 47 |
+
typename Base::OutputOp,
|
| 48 |
+
typename Base::Padding,
|
| 49 |
+
Base::kFragmentsPerIteration>;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
template <int Rank,
|
| 53 |
+
typename Shape_,
|
| 54 |
+
typename WarpMmaTensorOp_,
|
| 55 |
+
int PartitionsK,
|
| 56 |
+
typename OutputOp_,
|
| 57 |
+
int ElementsPerAccess>
|
| 58 |
+
struct EpilogueVoltaTensorOpAffineRankN
|
| 59 |
+
: public DefaultEpilogueVoltaTensorOpAffineRankN<Rank,
|
| 60 |
+
Shape_,
|
| 61 |
+
WarpMmaTensorOp_,
|
| 62 |
+
PartitionsK,
|
| 63 |
+
OutputOp_,
|
| 64 |
+
ElementsPerAccess> {
|
| 65 |
+
using Base = DefaultEpilogueVoltaTensorOpAffineRankN<Rank,
|
| 66 |
+
Shape_,
|
| 67 |
+
WarpMmaTensorOp_,
|
| 68 |
+
PartitionsK,
|
| 69 |
+
OutputOp_,
|
| 70 |
+
ElementsPerAccess>;
|
| 71 |
+
using OutputTileIterator =
|
| 72 |
+
cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic<
|
| 73 |
+
typename Base::OutputTileThreadMap,
|
| 74 |
+
typename Base::ElementOutput,
|
| 75 |
+
Rank>;
|
| 76 |
+
|
| 77 |
+
using Epilogue =
|
| 78 |
+
cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
|
| 79 |
+
typename Base::WarpMmaTensorOp,
|
| 80 |
+
Base::kPartitionsK,
|
| 81 |
+
OutputTileIterator,
|
| 82 |
+
typename Base::AccumulatorFragmentIterator,
|
| 83 |
+
typename Base::WarpTileIterator,
|
| 84 |
+
typename Base::SharedLoadIterator,
|
| 85 |
+
typename Base::OutputOp,
|
| 86 |
+
typename Base::Padding>;
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
template <typename Shape_,
|
| 90 |
+
typename WarpMmaTensorOp_,
|
| 91 |
+
int PartitionsK,
|
| 92 |
+
typename OutputOp_,
|
| 93 |
+
int ElementsPerAccess,
|
| 94 |
+
bool ScatterD = false,
|
| 95 |
+
typename PermuteDLayout = layout::NoPermute>
|
| 96 |
+
struct EpilogueTensorOp : public DefaultEpilogueTensorOp<Shape_,
|
| 97 |
+
WarpMmaTensorOp_,
|
| 98 |
+
PartitionsK,
|
| 99 |
+
OutputOp_,
|
| 100 |
+
ElementsPerAccess,
|
| 101 |
+
ScatterD,
|
| 102 |
+
PermuteDLayout> {
|
| 103 |
+
using Base = DefaultEpilogueTensorOp<Shape_,
|
| 104 |
+
WarpMmaTensorOp_,
|
| 105 |
+
PartitionsK,
|
| 106 |
+
OutputOp_,
|
| 107 |
+
ElementsPerAccess,
|
| 108 |
+
ScatterD,
|
| 109 |
+
PermuteDLayout>;
|
| 110 |
+
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic<
|
| 111 |
+
typename Base::OutputTileThreadMap,
|
| 112 |
+
typename Base::ElementOutput,
|
| 113 |
+
ScatterD,
|
| 114 |
+
PermuteDLayout>;
|
| 115 |
+
using Epilogue =
|
| 116 |
+
cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
|
| 117 |
+
typename Base::WarpMmaTensorOp,
|
| 118 |
+
Base::kPartitionsK,
|
| 119 |
+
OutputTileIterator,
|
| 120 |
+
typename Base::AccumulatorFragmentIterator,
|
| 121 |
+
typename Base::WarpTileIterator,
|
| 122 |
+
typename Base::SharedLoadIterator,
|
| 123 |
+
typename Base::OutputOp,
|
| 124 |
+
typename Base::Padding,
|
| 125 |
+
Base::kFragmentsPerIteration>;
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
template <typename Shape_,
|
| 129 |
+
typename WarpMmaTensorOp_,
|
| 130 |
+
int PartitionsK,
|
| 131 |
+
typename OutputOp_,
|
| 132 |
+
int ElementsPerAccess,
|
| 133 |
+
bool ScatterD = false,
|
| 134 |
+
typename PermuteDLayout = layout::NoPermute>
|
| 135 |
+
struct EpilogueVoltaTensorOp : public DefaultEpilogueVoltaTensorOp<Shape_,
|
| 136 |
+
WarpMmaTensorOp_,
|
| 137 |
+
PartitionsK,
|
| 138 |
+
OutputOp_,
|
| 139 |
+
ElementsPerAccess,
|
| 140 |
+
ScatterD,
|
| 141 |
+
PermuteDLayout> {
|
| 142 |
+
using Base = DefaultEpilogueVoltaTensorOp<Shape_,
|
| 143 |
+
WarpMmaTensorOp_,
|
| 144 |
+
PartitionsK,
|
| 145 |
+
OutputOp_,
|
| 146 |
+
ElementsPerAccess,
|
| 147 |
+
ScatterD,
|
| 148 |
+
PermuteDLayout>;
|
| 149 |
+
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic<
|
| 150 |
+
typename Base::OutputTileThreadMap,
|
| 151 |
+
typename Base::ElementOutput,
|
| 152 |
+
ScatterD,
|
| 153 |
+
PermuteDLayout>;
|
| 154 |
+
using Epilogue =
|
| 155 |
+
cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
|
| 156 |
+
typename Base::WarpMmaTensorOp,
|
| 157 |
+
Base::kPartitionsK,
|
| 158 |
+
OutputTileIterator,
|
| 159 |
+
typename Base::AccumulatorFragmentIterator,
|
| 160 |
+
typename Base::WarpTileIterator,
|
| 161 |
+
typename Base::SharedLoadIterator,
|
| 162 |
+
typename Base::OutputOp,
|
| 163 |
+
typename Base::Padding>;
|
| 164 |
+
};
|
| 165 |
+
} // namespace threadblock
|
| 166 |
+
} // namespace epilogue
|
| 167 |
+
} // namespace cutlass
|
| 168 |
+
|
| 169 |
+
template <typename Arch_,
|
| 170 |
+
typename Shape_,
|
| 171 |
+
typename WarpMmaTensorOp_,
|
| 172 |
+
int PartitionsK,
|
| 173 |
+
typename OutputOp_,
|
| 174 |
+
int ElementsPerAccess,
|
| 175 |
+
bool ScatterD = false,
|
| 176 |
+
typename PermuteDLayout = cutlass::layout::NoPermute>
|
| 177 |
+
struct BiasGradEpilogue {
|
| 178 |
+
using Epilogue =
|
| 179 |
+
typename cutlass::epilogue::threadblock::EpilogueTensorOp<Shape_,
|
| 180 |
+
WarpMmaTensorOp_,
|
| 181 |
+
PartitionsK,
|
| 182 |
+
OutputOp_,
|
| 183 |
+
ElementsPerAccess,
|
| 184 |
+
ScatterD,
|
| 185 |
+
PermuteDLayout>::Epilogue;
|
| 186 |
+
};
|
| 187 |
+
|
| 188 |
+
template <typename Shape_,
|
| 189 |
+
typename WarpMmaTensorOp_,
|
| 190 |
+
int PartitionsK,
|
| 191 |
+
typename OutputOp_,
|
| 192 |
+
int ElementsPerAccess,
|
| 193 |
+
bool ScatterD,
|
| 194 |
+
typename PermuteDLayout>
|
| 195 |
+
struct BiasGradEpilogue<cutlass::arch::Sm70,
|
| 196 |
+
Shape_,
|
| 197 |
+
WarpMmaTensorOp_,
|
| 198 |
+
PartitionsK,
|
| 199 |
+
OutputOp_,
|
| 200 |
+
ElementsPerAccess,
|
| 201 |
+
ScatterD,
|
| 202 |
+
PermuteDLayout> {
|
| 203 |
+
using Epilogue =
|
| 204 |
+
typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOp<Shape_,
|
| 205 |
+
WarpMmaTensorOp_,
|
| 206 |
+
PartitionsK,
|
| 207 |
+
OutputOp_,
|
| 208 |
+
ElementsPerAccess,
|
| 209 |
+
ScatterD,
|
| 210 |
+
PermuteDLayout>::Epilogue;
|
| 211 |
+
};
|
| 212 |
+
|
| 213 |
+
template <typename Arch_,
|
| 214 |
+
int Rank,
|
| 215 |
+
typename Shape_,
|
| 216 |
+
typename WarpMmaTensorOp_,
|
| 217 |
+
int PartitionsK,
|
| 218 |
+
typename OutputOp_,
|
| 219 |
+
int ElementsPerAccess>
|
| 220 |
+
struct BiasGradEpilogueAffineRankN {
|
| 221 |
+
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueTensorOpAffineRankN<
|
| 222 |
+
Rank,
|
| 223 |
+
Shape_,
|
| 224 |
+
WarpMmaTensorOp_,
|
| 225 |
+
PartitionsK,
|
| 226 |
+
OutputOp_,
|
| 227 |
+
ElementsPerAccess>::Epilogue;
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
template <int Rank,
|
| 231 |
+
typename Shape_,
|
| 232 |
+
typename WarpMmaTensorOp_,
|
| 233 |
+
int PartitionsK,
|
| 234 |
+
typename OutputOp_,
|
| 235 |
+
int ElementsPerAccess>
|
| 236 |
+
struct BiasGradEpilogueAffineRankN<cutlass::arch::Sm70,
|
| 237 |
+
Rank,
|
| 238 |
+
Shape_,
|
| 239 |
+
WarpMmaTensorOp_,
|
| 240 |
+
PartitionsK,
|
| 241 |
+
OutputOp_,
|
| 242 |
+
ElementsPerAccess> {
|
| 243 |
+
using Epilogue = typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOpAffineRankN<
|
| 244 |
+
Rank,
|
| 245 |
+
Shape_,
|
| 246 |
+
WarpMmaTensorOp_,
|
| 247 |
+
PartitionsK,
|
| 248 |
+
OutputOp_,
|
| 249 |
+
ElementsPerAccess>::Epilogue;
|
| 250 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h
ADDED
|
@@ -0,0 +1,592 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
/*! \file
|
| 38 |
+
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
|
| 39 |
+
|
| 40 |
+
File copied from "cutlass/epilogue/threadblock/epilogue.h"
|
| 41 |
+
then modified to:
|
| 42 |
+
(1) load 2 source fragments at the same time (pipelining)
|
| 43 |
+
(2) support reading from a different dtype
|
| 44 |
+
(3) pass the row id to the OutputOp if it takes it
|
| 45 |
+
(see MemoryEfficientAttentionNormalize)
|
| 46 |
+
Note that in general the fragment passed to the OutputOp could
|
| 47 |
+
span multiple rows but it does not happen with the configurations we have
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#pragma once
|
| 51 |
+
|
| 52 |
+
#if defined(__CUDACC_RTC__)
|
| 53 |
+
#include <cuda/std/cassert>
|
| 54 |
+
#else
|
| 55 |
+
#include <assert.h>
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
#include "cutlass/aligned_buffer.h"
|
| 59 |
+
#include "cutlass/array.h"
|
| 60 |
+
#include "cutlass/cutlass.h"
|
| 61 |
+
#include "cutlass/functional.h"
|
| 62 |
+
#include "cutlass/layout/tensor.h"
|
| 63 |
+
#include "cutlass/layout/vector.h"
|
| 64 |
+
#include "cutlass/numeric_types.h"
|
| 65 |
+
#include "cutlass/tensor_coord.h"
|
| 66 |
+
|
| 67 |
+
#include "cutlass/gemm/gemm.h"
|
| 68 |
+
|
| 69 |
+
#include "cutlass/transform/pitch_linear_thread_map.h"
|
| 70 |
+
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
|
| 71 |
+
|
| 72 |
+
#include "cutlass/epilogue/threadblock/epilogue_base.h"
|
| 73 |
+
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
|
| 74 |
+
#include "cutlass/numeric_types.h"
|
| 75 |
+
|
| 76 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 77 |
+
|
| 78 |
+
namespace cutlass {
|
| 79 |
+
namespace epilogue {
|
| 80 |
+
namespace threadblock {
|
| 81 |
+
|
| 82 |
+
template <typename Op>
|
| 83 |
+
struct ApplyEpilogueOp {
|
| 84 |
+
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
|
| 85 |
+
Op const& output_op,
|
| 86 |
+
int row_id,
|
| 87 |
+
typename Op::FragmentAccumulator const& accum,
|
| 88 |
+
typename Op::FragmentOutput const& source)
|
| 89 |
+
{
|
| 90 |
+
return output_op(accum, source);
|
| 91 |
+
}
|
| 92 |
+
static CUTLASS_DEVICE typename Op::FragmentOutput
|
| 93 |
+
apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum)
|
| 94 |
+
{
|
| 95 |
+
return output_op(accum);
|
| 96 |
+
}
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 100 |
+
|
| 101 |
+
/// Epilogue operator
|
| 102 |
+
template <typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
|
| 103 |
+
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept:
|
| 104 |
+
///< gemm::warp::MmaTensorOp)
|
| 105 |
+
int PartitionsK, ///< Number of partitions of the K dimension
|
| 106 |
+
typename OutputTileIterator_, ///< Tile iterator writing output tensors
|
| 107 |
+
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting
|
| 108 |
+
///< accumulators
|
| 109 |
+
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing
|
| 110 |
+
///< accumulators to SMEM
|
| 111 |
+
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading
|
| 112 |
+
///< from SMEM
|
| 113 |
+
typename OutputOp_, ///< Output operator
|
| 114 |
+
typename Padding_, ///< Padding added to SMEM allocation to avoid bank
|
| 115 |
+
///< conflicts (concept: MatrixShape)
|
| 116 |
+
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
|
| 117 |
+
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is
|
| 118 |
+
///< large
|
| 119 |
+
(!IsEpilogueFunctorHeavy<OutputOp_>::value),
|
| 120 |
+
typename OutputTileSourceIterator_ =
|
| 121 |
+
OutputTileIterator_ ///< Tile iterator reading tensors
|
| 122 |
+
>
|
| 123 |
+
class EpiloguePipelined : public EpilogueBase<Shape_,
|
| 124 |
+
typename WarpMmaOperator_::Shape,
|
| 125 |
+
PartitionsK,
|
| 126 |
+
AccumulatorFragmentIterator_,
|
| 127 |
+
WarpTileIterator_,
|
| 128 |
+
Padding_,
|
| 129 |
+
FragmentsPerPartition> {
|
| 130 |
+
public:
|
| 131 |
+
using Base = EpilogueBase<Shape_,
|
| 132 |
+
typename WarpMmaOperator_::Shape,
|
| 133 |
+
PartitionsK,
|
| 134 |
+
AccumulatorFragmentIterator_,
|
| 135 |
+
WarpTileIterator_,
|
| 136 |
+
Padding_,
|
| 137 |
+
FragmentsPerPartition>;
|
| 138 |
+
|
| 139 |
+
using Shape = Shape_;
|
| 140 |
+
using WarpMmaOperator = WarpMmaOperator_;
|
| 141 |
+
static int const kPartitionsK = PartitionsK;
|
| 142 |
+
using OutputTileIterator = OutputTileIterator_;
|
| 143 |
+
using OutputTileSourceIterator = OutputTileSourceIterator_;
|
| 144 |
+
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
|
| 145 |
+
using WarpTileIterator = WarpTileIterator_;
|
| 146 |
+
using SharedLoadIterator = SharedLoadIterator_;
|
| 147 |
+
using OutputOp = OutputOp_;
|
| 148 |
+
using Padding = Padding_;
|
| 149 |
+
|
| 150 |
+
using Layout = layout::RowMajor;
|
| 151 |
+
using LongIndex = typename Layout::LongIndex;
|
| 152 |
+
|
| 153 |
+
/// The complete warp-level accumulator tile
|
| 154 |
+
using AccumulatorTile = typename Base::AccumulatorTile;
|
| 155 |
+
|
| 156 |
+
/// Accumulator element
|
| 157 |
+
using ElementAccumulator = typename WarpTileIterator::Element;
|
| 158 |
+
|
| 159 |
+
/// Output element
|
| 160 |
+
using ElementOutput = typename OutputTileIterator::Element;
|
| 161 |
+
using ElementSource = typename OutputTileSourceIterator::Element;
|
| 162 |
+
|
| 163 |
+
/// Output access size
|
| 164 |
+
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
|
| 165 |
+
|
| 166 |
+
/// Tensor reference to destination tensor
|
| 167 |
+
using TensorRef = typename OutputTileIterator::TensorRef;
|
| 168 |
+
|
| 169 |
+
/// Tensor reference to sync tensor
|
| 170 |
+
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
|
| 171 |
+
|
| 172 |
+
/// Const tensor reference to source tensor
|
| 173 |
+
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
|
| 174 |
+
|
| 175 |
+
/// Array type used to output
|
| 176 |
+
using OutputAccessType =
|
| 177 |
+
Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
|
| 178 |
+
using SourceAccessType = Array<typename OutputTileSourceIterator::Element,
|
| 179 |
+
OutputTileSourceIterator::kElementsPerAccess>;
|
| 180 |
+
|
| 181 |
+
/// Array type used by output functor
|
| 182 |
+
using AccumulatorAccessType =
|
| 183 |
+
Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
|
| 184 |
+
|
| 185 |
+
/// Number of warps
|
| 186 |
+
using WarpCount = typename Base::WarpCount;
|
| 187 |
+
|
| 188 |
+
static int constexpr kSmemTiles =
|
| 189 |
+
Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
|
| 190 |
+
static int constexpr kSmemPointerOffset =
|
| 191 |
+
Base::SharedStorage::StorageShape::kCount / kSmemTiles;
|
| 192 |
+
|
| 193 |
+
public:
|
| 194 |
+
static_assert(OutputTileSourceIterator::Fragment::kElements ==
|
| 195 |
+
OutputTileIterator::Fragment::kElements,
|
| 196 |
+
"Mismatch between input tile and output tile iterator (kElements)");
|
| 197 |
+
static_assert(OutputTileSourceIterator::kIterations == OutputTileIterator::kIterations,
|
| 198 |
+
"Mismatch between input tile and output tile iterator (kIterations)");
|
| 199 |
+
static_assert(SharedLoadIterator::Fragment::kElements ==
|
| 200 |
+
OutputTileIterator::Fragment::kElements,
|
| 201 |
+
"Mismatch between shared load iterator and output tile iterator.");
|
| 202 |
+
|
| 203 |
+
static_assert(OutputTileIterator::kElementsPerAccess,
|
| 204 |
+
"OutputTileIterator::kElementsPerAccess must not be zero.");
|
| 205 |
+
|
| 206 |
+
static_assert(!(OutputTileIterator::Fragment::kElements %
|
| 207 |
+
OutputTileIterator::kElementsPerAccess),
|
| 208 |
+
"Divisibility");
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
/// Loads fragment from shared memory aligned with output tensor
|
| 212 |
+
SharedLoadIterator shared_load_iterator_;
|
| 213 |
+
|
| 214 |
+
public:
|
| 215 |
+
/// Constructor
|
| 216 |
+
CUTLASS_DEVICE
|
| 217 |
+
EpiloguePipelined(typename Base::SharedStorage& shared_storage, ///< Shared storage object
|
| 218 |
+
int thread_idx, ///< ID of a thread within the threadblock
|
| 219 |
+
int warp_idx, ///< ID of warp within threadblock
|
| 220 |
+
int lane_idx ///< Id of thread within warp
|
| 221 |
+
)
|
| 222 |
+
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
|
| 223 |
+
shared_load_iterator_(shared_storage.reference(), thread_idx)
|
| 224 |
+
{
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
/// Streams the result to global memory
|
| 228 |
+
CUTLASS_DEVICE
|
| 229 |
+
void operator()(OutputOp const& output_op, ///< Output operator
|
| 230 |
+
OutputTileIterator destination_iterator, ///< Tile iterator for destination
|
| 231 |
+
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
|
| 232 |
+
OutputTileSourceIterator source_iterator)
|
| 233 |
+
{ ///< Threadblock tile coordinate in GEMM (in units
|
| 234 |
+
///< of threadblock tiles)
|
| 235 |
+
|
| 236 |
+
if (!output_op.is_source_needed()) {
|
| 237 |
+
compute_source_not_needed_(output_op, destination_iterator, accumulators);
|
| 238 |
+
} else {
|
| 239 |
+
compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
CUTLASS_DEVICE
|
| 243 |
+
void operator()(OutputOp const& output_op, ///< Output operator
|
| 244 |
+
OutputTileIterator destination_iterator, ///< Tile iterator for destination
|
| 245 |
+
AccumulatorTile const& accumulators)
|
| 246 |
+
{ ///< Complete warp-level accumulator tile
|
| 247 |
+
compute_source_not_needed_(output_op, destination_iterator, accumulators);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
private:
|
| 251 |
+
template <class Seq>
|
| 252 |
+
struct acc2smem_source_not_needed;
|
| 253 |
+
|
| 254 |
+
template <size_t... Seq>
|
| 255 |
+
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
|
| 256 |
+
template <int Advance>
|
| 257 |
+
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
|
| 258 |
+
WarpTileIterator& warp_tile_iterator)
|
| 259 |
+
{
|
| 260 |
+
CUTLASS_PRAGMA_UNROLL
|
| 261 |
+
for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; }
|
| 262 |
+
|
| 263 |
+
CUTLASS_PRAGMA_UNROLL
|
| 264 |
+
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
|
| 265 |
+
typename AccumulatorFragmentIterator::Fragment accum_fragment;
|
| 266 |
+
|
| 267 |
+
accum_fragment_iterator.load(accum_fragment);
|
| 268 |
+
++accum_fragment_iterator;
|
| 269 |
+
|
| 270 |
+
warp_tile_iterator.store(accum_fragment);
|
| 271 |
+
if (p < Base::kFragmentsPerIteration - 1) {
|
| 272 |
+
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
if (Base::kFragmentsPerIteration > 1) {
|
| 277 |
+
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
|
| 278 |
+
(1 - Base::kFragmentsPerIteration));
|
| 279 |
+
}
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
CUTLASS_DEVICE
|
| 283 |
+
static void push(size_t pos,
|
| 284 |
+
AccumulatorFragmentIterator const& iterator_begin,
|
| 285 |
+
WarpTileIterator& warp_tile_iterator)
|
| 286 |
+
{
|
| 287 |
+
int dummy[] = {
|
| 288 |
+
(pos == (Seq * Base::kFragmentsPerIteration)) &&
|
| 289 |
+
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator),
|
| 290 |
+
0)...};
|
| 291 |
+
|
| 292 |
+
CUTLASS_UNUSED(dummy[0]);
|
| 293 |
+
}
|
| 294 |
+
};
|
| 295 |
+
|
| 296 |
+
static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1,
|
| 297 |
+
"One of these must be exactly 1.");
|
| 298 |
+
|
| 299 |
+
/// Streams the result to global memory
|
| 300 |
+
CUTLASS_DEVICE
|
| 301 |
+
void compute_source_not_needed_(
|
| 302 |
+
OutputOp const& output_op, ///< Output operator
|
| 303 |
+
OutputTileIterator destination_iterator, ///< Tile iterator for destination
|
| 304 |
+
AccumulatorTile const& accumulators ///< Complete warp-level accumulator tile
|
| 305 |
+
)
|
| 306 |
+
{
|
| 307 |
+
//
|
| 308 |
+
// Iterator over warp-level accumulator fragment
|
| 309 |
+
//
|
| 310 |
+
|
| 311 |
+
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
|
| 312 |
+
|
| 313 |
+
//
|
| 314 |
+
// Iterate over accumulator tile
|
| 315 |
+
//
|
| 316 |
+
|
| 317 |
+
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration \
|
| 318 |
+
: 1)
|
| 319 |
+
for (int iter = 0; iter < OutputTileIterator::kIterations;
|
| 320 |
+
iter += Base::kFragmentsPerIteration) {
|
| 321 |
+
//
|
| 322 |
+
// Convert and store fragment
|
| 323 |
+
//
|
| 324 |
+
|
| 325 |
+
__syncthreads();
|
| 326 |
+
|
| 327 |
+
acc2smem_source_not_needed<cutlass::make_index_sequence<
|
| 328 |
+
OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::
|
| 329 |
+
push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
|
| 330 |
+
|
| 331 |
+
__syncthreads();
|
| 332 |
+
|
| 333 |
+
//
|
| 334 |
+
// Load fragments from shared memory
|
| 335 |
+
//
|
| 336 |
+
|
| 337 |
+
CUTLASS_PRAGMA_UNROLL
|
| 338 |
+
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
|
| 339 |
+
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
|
| 340 |
+
|
| 341 |
+
shared_load_iterator_.load(aligned_accum_fragment[0]);
|
| 342 |
+
|
| 343 |
+
if (p < Base::kFragmentsPerIteration - 1) {
|
| 344 |
+
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
|
| 345 |
+
} else if (kPartitionsK > 1) {
|
| 346 |
+
plus<typename SharedLoadIterator::Fragment> add_fragments;
|
| 347 |
+
|
| 348 |
+
CUTLASS_PRAGMA_UNROLL
|
| 349 |
+
for (int i = 1; i < kPartitionsK; ++i) {
|
| 350 |
+
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
|
| 351 |
+
shared_load_iterator_.load(aligned_accum_fragment[i]);
|
| 352 |
+
aligned_accum_fragment[0] =
|
| 353 |
+
add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) *
|
| 357 |
+
kSmemPointerOffset);
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
//
|
| 361 |
+
// Compute the output result
|
| 362 |
+
//
|
| 363 |
+
|
| 364 |
+
typename OutputTileIterator::Fragment output_fragment;
|
| 365 |
+
|
| 366 |
+
apply_output_operator_source_not_needed_(destination_iterator.thread_start_row(),
|
| 367 |
+
output_fragment,
|
| 368 |
+
output_op,
|
| 369 |
+
aligned_accum_fragment[0]);
|
| 370 |
+
|
| 371 |
+
//
|
| 372 |
+
// Store the final result
|
| 373 |
+
//
|
| 374 |
+
|
| 375 |
+
destination_iterator.store(output_fragment);
|
| 376 |
+
++destination_iterator;
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
if (Base::kFragmentsPerIteration > 1) {
|
| 380 |
+
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset *
|
| 381 |
+
(1 - Base::kFragmentsPerIteration));
|
| 382 |
+
}
|
| 383 |
+
}
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
template <class Seq>
|
| 387 |
+
struct acc2smem_source_needed;
|
| 388 |
+
|
| 389 |
+
template <size_t... Seq>
|
| 390 |
+
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
|
| 391 |
+
template <int Advance>
|
| 392 |
+
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
|
| 393 |
+
WarpTileIterator& warp_tile_iterator)
|
| 394 |
+
{
|
| 395 |
+
CUTLASS_PRAGMA_UNROLL
|
| 396 |
+
for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; }
|
| 397 |
+
|
| 398 |
+
typename AccumulatorFragmentIterator::Fragment accum_fragment;
|
| 399 |
+
accum_fragment_iterator.load(accum_fragment);
|
| 400 |
+
warp_tile_iterator.store(accum_fragment);
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
CUTLASS_DEVICE
|
| 404 |
+
static void push(size_t pos,
|
| 405 |
+
AccumulatorFragmentIterator const& iterator_begin,
|
| 406 |
+
WarpTileIterator& warp_tile_iterator)
|
| 407 |
+
{
|
| 408 |
+
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
|
| 409 |
+
}
|
| 410 |
+
};
|
| 411 |
+
|
| 412 |
+
/// Streams the result to global memory
|
| 413 |
+
CUTLASS_DEVICE
|
| 414 |
+
void compute_source_needed_(
|
| 415 |
+
OutputOp const& output_op, ///< Output operator
|
| 416 |
+
OutputTileIterator destination_iterator, ///< Tile iterator for destination
|
| 417 |
+
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
|
| 418 |
+
OutputTileSourceIterator source_iterator ///< Threadblock tile coordinate in GEMM (in units
|
| 419 |
+
///< of threadblock tiles)
|
| 420 |
+
)
|
| 421 |
+
{
|
| 422 |
+
typename OutputTileSourceIterator::Fragment source_fragment[2];
|
| 423 |
+
|
| 424 |
+
source_fragment[0].clear();
|
| 425 |
+
source_iterator.load(source_fragment[0]);
|
| 426 |
+
++source_iterator;
|
| 427 |
+
source_fragment[1].clear();
|
| 428 |
+
|
| 429 |
+
//
|
| 430 |
+
// Iterator over warp-level accumulator fragment
|
| 431 |
+
//
|
| 432 |
+
|
| 433 |
+
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
|
| 434 |
+
|
| 435 |
+
//
|
| 436 |
+
// Iterate over accumulator tile
|
| 437 |
+
//
|
| 438 |
+
|
| 439 |
+
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
|
| 440 |
+
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
|
| 441 |
+
if (iter > 0) { __syncthreads(); }
|
| 442 |
+
//
|
| 443 |
+
// Load the source for next iteration (pipelining)
|
| 444 |
+
//
|
| 445 |
+
|
| 446 |
+
if (iter + 1 < OutputTileIterator::kIterations) {
|
| 447 |
+
source_iterator.load(source_fragment[(iter + 1) % 2]);
|
| 448 |
+
}
|
| 449 |
+
++source_iterator;
|
| 450 |
+
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::
|
| 451 |
+
push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
|
| 452 |
+
|
| 453 |
+
__syncthreads();
|
| 454 |
+
|
| 455 |
+
//
|
| 456 |
+
// Load fragments from shared memory
|
| 457 |
+
//
|
| 458 |
+
|
| 459 |
+
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
|
| 460 |
+
|
| 461 |
+
shared_load_iterator_.load(aligned_accum_fragment[0]);
|
| 462 |
+
|
| 463 |
+
// If the number of k-slices is > 1 - perform a reduction amongst the
|
| 464 |
+
// k-slices
|
| 465 |
+
if (kPartitionsK > 1) {
|
| 466 |
+
plus<typename SharedLoadIterator::Fragment> add_fragments;
|
| 467 |
+
|
| 468 |
+
CUTLASS_PRAGMA_UNROLL
|
| 469 |
+
for (int i = 1; i < kPartitionsK; ++i) {
|
| 470 |
+
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
|
| 471 |
+
shared_load_iterator_.load(aligned_accum_fragment[i]);
|
| 472 |
+
aligned_accum_fragment[0] =
|
| 473 |
+
add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
//
|
| 480 |
+
// Compute the output result
|
| 481 |
+
//
|
| 482 |
+
|
| 483 |
+
typename OutputTileIterator::Fragment output_fragment;
|
| 484 |
+
|
| 485 |
+
apply_output_operator_(destination_iterator.thread_start_row(),
|
| 486 |
+
output_fragment,
|
| 487 |
+
output_op,
|
| 488 |
+
aligned_accum_fragment[0],
|
| 489 |
+
source_fragment[iter % 2]);
|
| 490 |
+
|
| 491 |
+
//
|
| 492 |
+
// Store the final result
|
| 493 |
+
//
|
| 494 |
+
|
| 495 |
+
destination_iterator.store(output_fragment);
|
| 496 |
+
++destination_iterator;
|
| 497 |
+
}
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
/// Helper to invoke the output functor over each vector of output
|
| 501 |
+
CUTLASS_DEVICE
|
| 502 |
+
void apply_output_operator_(int begin_row,
|
| 503 |
+
typename OutputTileIterator::Fragment& output_fragment,
|
| 504 |
+
OutputOp const& output_op, ///< Output operator
|
| 505 |
+
typename SharedLoadIterator::Fragment const& aligned_accum_fragment,
|
| 506 |
+
typename OutputTileSourceIterator::Fragment const& source_fragment)
|
| 507 |
+
{
|
| 508 |
+
OutputAccessType* output_frag_ptr = reinterpret_cast<OutputAccessType*>(&output_fragment);
|
| 509 |
+
|
| 510 |
+
AccumulatorAccessType const* compute_frag_ptr =
|
| 511 |
+
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
|
| 512 |
+
|
| 513 |
+
SourceAccessType const* source_frag_ptr =
|
| 514 |
+
reinterpret_cast<SourceAccessType const*>(&source_fragment);
|
| 515 |
+
|
| 516 |
+
int const kOutputOpIterations =
|
| 517 |
+
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
|
| 518 |
+
|
| 519 |
+
CUTLASS_PRAGMA_UNROLL
|
| 520 |
+
for (int i = 0; i < kOutputOpIterations; ++i) {
|
| 521 |
+
// Call the output operator
|
| 522 |
+
output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
|
| 523 |
+
output_op,
|
| 524 |
+
begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
|
| 525 |
+
compute_frag_ptr[i],
|
| 526 |
+
source_frag_ptr[i]);
|
| 527 |
+
}
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
/// Helper to invoke the output functor over each vector of output
|
| 531 |
+
CUTLASS_DEVICE
|
| 532 |
+
void apply_output_operator_source_not_needed_(
|
| 533 |
+
int begin_row,
|
| 534 |
+
typename OutputTileIterator::Fragment& output_fragment,
|
| 535 |
+
OutputOp const& output_op, ///< Output operator
|
| 536 |
+
typename SharedLoadIterator::Fragment const& aligned_accum_fragment)
|
| 537 |
+
{
|
| 538 |
+
OutputAccessType* output_frag_ptr = reinterpret_cast<OutputAccessType*>(&output_fragment);
|
| 539 |
+
|
| 540 |
+
AccumulatorAccessType const* compute_frag_ptr =
|
| 541 |
+
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
|
| 542 |
+
|
| 543 |
+
int const kOutputOpIterations =
|
| 544 |
+
OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
|
| 545 |
+
|
| 546 |
+
CUTLASS_PRAGMA_UNROLL
|
| 547 |
+
for (int i = 0; i < kOutputOpIterations; ++i) {
|
| 548 |
+
// Call the output operator
|
| 549 |
+
output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
|
| 550 |
+
output_op,
|
| 551 |
+
begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
|
| 552 |
+
compute_frag_ptr[i]);
|
| 553 |
+
}
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
// This should be constexpr, but it's only supported on c++14
|
| 557 |
+
static int CUTLASS_HOST_DEVICE getRowOffset(int i)
|
| 558 |
+
{
|
| 559 |
+
using ThreadMap = typename OutputTileIterator::ThreadMap;
|
| 560 |
+
|
| 561 |
+
CUTLASS_PRAGMA_UNROLL
|
| 562 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 563 |
+
CUTLASS_PRAGMA_UNROLL
|
| 564 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 565 |
+
CUTLASS_PRAGMA_UNROLL
|
| 566 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 567 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 568 |
+
group * ThreadMap::Delta::kGroup +
|
| 569 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 570 |
+
int frag_row_idx =
|
| 571 |
+
(row + ThreadMap::Iterations::kRow *
|
| 572 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 573 |
+
CUTLASS_PRAGMA_UNROLL
|
| 574 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 575 |
+
int frag_idx = ThreadMap::kElementsPerAccess *
|
| 576 |
+
(frag_row_idx * ThreadMap::Iterations::kColumn + column);
|
| 577 |
+
if (i < frag_idx + ThreadMap::kElementsPerAccess) { return row_offset; }
|
| 578 |
+
}
|
| 579 |
+
}
|
| 580 |
+
}
|
| 581 |
+
}
|
| 582 |
+
return -1;
|
| 583 |
+
}
|
| 584 |
+
};
|
| 585 |
+
|
| 586 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 587 |
+
|
| 588 |
+
} // namespace threadblock
|
| 589 |
+
} // namespace epilogue
|
| 590 |
+
} // namespace cutlass
|
| 591 |
+
|
| 592 |
+
////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
/*! \file
|
| 38 |
+
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
|
| 39 |
+
|
| 40 |
+
The epilogue rearranges the result of a matrix product through shared memory
|
| 41 |
+
to match canonical tensor layouts in global memory. Epilogues support
|
| 42 |
+
conversion and reduction operations.
|
| 43 |
+
|
| 44 |
+
This is a copy of cutlass/epilogue/threadblock/epilogue.h that can
|
| 45 |
+
handle "row_id" as a first argument, as uses it to get the corresponding
|
| 46 |
+
`m_prime` / `s_prime` to rescale the output.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#pragma once
|
| 50 |
+
|
| 51 |
+
#if defined(__CUDACC_RTC__)
|
| 52 |
+
#include <cuda/std/cassert>
|
| 53 |
+
#else
|
| 54 |
+
#include <assert.h>
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#include "cutlass/aligned_buffer.h"
|
| 58 |
+
#include "cutlass/array.h"
|
| 59 |
+
#include "cutlass/cutlass.h"
|
| 60 |
+
#include "cutlass/functional.h"
|
| 61 |
+
#include "cutlass/layout/tensor.h"
|
| 62 |
+
#include "cutlass/layout/vector.h"
|
| 63 |
+
#include "cutlass/numeric_types.h"
|
| 64 |
+
#include "cutlass/tensor_coord.h"
|
| 65 |
+
|
| 66 |
+
#include "cutlass/gemm/gemm.h"
|
| 67 |
+
|
| 68 |
+
#include "cutlass/transform/pitch_linear_thread_map.h"
|
| 69 |
+
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
|
| 70 |
+
|
| 71 |
+
#include "cutlass/epilogue/threadblock/epilogue_base.h"
|
| 72 |
+
#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
|
| 73 |
+
#include "cutlass/numeric_types.h"
|
| 74 |
+
|
| 75 |
+
#include "cutlass/array.h"
|
| 76 |
+
#include "cutlass/cutlass.h"
|
| 77 |
+
#include "cutlass/epilogue/thread/scale_type.h"
|
| 78 |
+
#include "cutlass/functional.h"
|
| 79 |
+
#include "cutlass/numeric_conversion.h"
|
| 80 |
+
#include "cutlass/numeric_types.h"
|
| 81 |
+
#include "epilogue_pipelined.h"
|
| 82 |
+
|
| 83 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 84 |
+
|
| 85 |
+
namespace cutlass {
|
| 86 |
+
namespace epilogue {
|
| 87 |
+
namespace thread {
|
| 88 |
+
|
| 89 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 90 |
+
|
| 91 |
+
/// Applies a linear combination operator to an array of elements.
|
| 92 |
+
// output <- alpha * accumulator + beta * source
|
| 93 |
+
// with:
|
| 94 |
+
// alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise)
|
| 95 |
+
// beta = alpha / m_prime (renormalize the output when the max changes)
|
| 96 |
+
// source is the current output
|
| 97 |
+
template <typename ElementOutput_, ///< Data type used to store tensors
|
| 98 |
+
typename ElementSource_, //< Data type for source (usually matches
|
| 99 |
+
//`ElementOutput`)
|
| 100 |
+
int Count, ///< Number of elements computed per operation.
|
| 101 |
+
///< Usually it is 128/sizeof_bits<ElementOutput_>,
|
| 102 |
+
///< but we use 64 or 32 sometimes when there are not enough data
|
| 103 |
+
///< to store
|
| 104 |
+
typename ElementAccumulator_, ///< Accumulator data type
|
| 105 |
+
typename ElementCompute_, ///< Data type used to compute linear combination
|
| 106 |
+
bool isFirst,
|
| 107 |
+
bool isLast,
|
| 108 |
+
typename FragmentAlphaBeta_,
|
| 109 |
+
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
|
| 110 |
+
class MemoryEfficientAttentionNormalize {
|
| 111 |
+
public:
|
| 112 |
+
using ElementOutput = ElementOutput_;
|
| 113 |
+
using ElementSource = ElementSource_;
|
| 114 |
+
using ElementAccumulator = ElementAccumulator_;
|
| 115 |
+
using ElementCompute = ElementCompute_;
|
| 116 |
+
|
| 117 |
+
static int const kCount = Count;
|
| 118 |
+
|
| 119 |
+
using FragmentOutput = Array<ElementOutput, kCount>;
|
| 120 |
+
using FragmentSource = Array<ElementSource, kCount>;
|
| 121 |
+
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
|
| 122 |
+
using ComputeFragment = Array<ElementCompute, kCount>;
|
| 123 |
+
using FragmentAlphaBeta = FragmentAlphaBeta_;
|
| 124 |
+
|
| 125 |
+
static FloatRoundStyle const kRound = Round;
|
| 126 |
+
|
| 127 |
+
private:
|
| 128 |
+
//
|
| 129 |
+
// Data members
|
| 130 |
+
//
|
| 131 |
+
|
| 132 |
+
FragmentAlphaBeta const& s_prime_;
|
| 133 |
+
FragmentAlphaBeta const& m_prime_;
|
| 134 |
+
|
| 135 |
+
public:
|
| 136 |
+
/// Constructs the function object, possibly loading from pointers in host
|
| 137 |
+
/// memory
|
| 138 |
+
CUTLASS_HOST_DEVICE
|
| 139 |
+
MemoryEfficientAttentionNormalize(FragmentAlphaBeta const& s_prime,
|
| 140 |
+
FragmentAlphaBeta const& m_prime)
|
| 141 |
+
: s_prime_(s_prime), m_prime_(m_prime)
|
| 142 |
+
{
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
/// Returns true if source is needed
|
| 146 |
+
CUTLASS_HOST_DEVICE
|
| 147 |
+
bool is_source_needed() const { return !isFirst; }
|
| 148 |
+
|
| 149 |
+
/// Functionally required for serial reduction in the epilogue
|
| 150 |
+
CUTLASS_HOST_DEVICE
|
| 151 |
+
void set_k_partition(int k_partition, int k_partition_count) {}
|
| 152 |
+
|
| 153 |
+
/// Computes linear scaling: D = alpha * accumulator + beta * source
|
| 154 |
+
CUTLASS_HOST_DEVICE
|
| 155 |
+
FragmentOutput operator()(int row,
|
| 156 |
+
FragmentAccumulator const& accumulator,
|
| 157 |
+
FragmentSource const& source) const
|
| 158 |
+
{
|
| 159 |
+
assert(!isFirst);
|
| 160 |
+
|
| 161 |
+
// Convert source to internal compute numeric type
|
| 162 |
+
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
|
| 163 |
+
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
|
| 164 |
+
accumulator_converter;
|
| 165 |
+
|
| 166 |
+
// Convert to destination numeric type
|
| 167 |
+
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
|
| 168 |
+
|
| 169 |
+
ComputeFragment converted_source = source_converter(source);
|
| 170 |
+
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
|
| 171 |
+
|
| 172 |
+
// Perform binary operations
|
| 173 |
+
ComputeFragment intermediate;
|
| 174 |
+
|
| 175 |
+
multiplies<ComputeFragment> mul_add_source;
|
| 176 |
+
multiply_add<ComputeFragment> mul_add_accumulator;
|
| 177 |
+
|
| 178 |
+
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
|
| 179 |
+
ElementCompute beta = alpha * m_prime_[row];
|
| 180 |
+
|
| 181 |
+
intermediate = mul_add_source(beta, converted_source); // X = beta * C
|
| 182 |
+
|
| 183 |
+
intermediate = mul_add_accumulator(
|
| 184 |
+
alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
|
| 185 |
+
|
| 186 |
+
return destination_converter(intermediate);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
/// Computes linear scaling: D = alpha * accumulator
|
| 190 |
+
CUTLASS_HOST_DEVICE
|
| 191 |
+
FragmentOutput operator()(int row, FragmentAccumulator const& accumulator) const
|
| 192 |
+
{
|
| 193 |
+
assert(isFirst);
|
| 194 |
+
|
| 195 |
+
// Convert source to internal compute numeric type
|
| 196 |
+
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
|
| 197 |
+
accumulator_converter;
|
| 198 |
+
|
| 199 |
+
// Convert to destination numeric type
|
| 200 |
+
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
|
| 201 |
+
|
| 202 |
+
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
|
| 203 |
+
|
| 204 |
+
ComputeFragment intermediate;
|
| 205 |
+
multiplies<ComputeFragment> mul_accumulator;
|
| 206 |
+
|
| 207 |
+
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
|
| 208 |
+
|
| 209 |
+
intermediate = mul_accumulator(alpha, converted_accumulator); // X = alpha * C + uniform
|
| 210 |
+
|
| 211 |
+
return destination_converter(intermediate);
|
| 212 |
+
}
|
| 213 |
+
};
|
| 214 |
+
|
| 215 |
+
} // namespace thread
|
| 216 |
+
|
| 217 |
+
namespace threadblock {
|
| 218 |
+
template <typename EO,
|
| 219 |
+
typename ES,
|
| 220 |
+
int Count,
|
| 221 |
+
typename EA,
|
| 222 |
+
typename EC,
|
| 223 |
+
bool F,
|
| 224 |
+
bool L,
|
| 225 |
+
typename FAB,
|
| 226 |
+
FloatRoundStyle R>
|
| 227 |
+
struct ApplyEpilogueOp<
|
| 228 |
+
thread::MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>> {
|
| 229 |
+
using Op = thread::MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>;
|
| 230 |
+
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
|
| 231 |
+
Op const& output_op,
|
| 232 |
+
int row_id,
|
| 233 |
+
typename Op::FragmentAccumulator const& accum,
|
| 234 |
+
typename Op::FragmentSource const& source)
|
| 235 |
+
{
|
| 236 |
+
return output_op(row_id, accum, source);
|
| 237 |
+
}
|
| 238 |
+
static CUTLASS_DEVICE typename Op::FragmentOutput
|
| 239 |
+
apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum)
|
| 240 |
+
{
|
| 241 |
+
return output_op(row_id, accum);
|
| 242 |
+
}
|
| 243 |
+
};
|
| 244 |
+
|
| 245 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 246 |
+
|
| 247 |
+
} // namespace threadblock
|
| 248 |
+
} // namespace epilogue
|
| 249 |
+
} // namespace cutlass
|
| 250 |
+
|
| 251 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Functor performing linear combination operations used by epilogues.
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
#pragma once
|
| 43 |
+
|
| 44 |
+
#include <cuda_fp16.h>
|
| 45 |
+
|
| 46 |
+
#include "cutlass/array.h"
|
| 47 |
+
#include "cutlass/cutlass.h"
|
| 48 |
+
#include "cutlass/epilogue/thread/activation.h"
|
| 49 |
+
#include "cutlass/functional.h"
|
| 50 |
+
#include "cutlass/numeric_conversion.h"
|
| 51 |
+
#include "cutlass/numeric_types.h"
|
| 52 |
+
|
| 53 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 54 |
+
|
| 55 |
+
namespace cutlass {
|
| 56 |
+
namespace epilogue {
|
| 57 |
+
namespace thread {
|
| 58 |
+
|
| 59 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 60 |
+
|
| 61 |
+
namespace detail {
|
| 62 |
+
|
| 63 |
+
template <typename Element, int ElementsPerAccess>
|
| 64 |
+
struct ArrayExponential {
|
| 65 |
+
CUTLASS_HOST_DEVICE
|
| 66 |
+
Array<Element, ElementsPerAccess> operator()(
|
| 67 |
+
Array<Element, ElementsPerAccess> const& input) const
|
| 68 |
+
{
|
| 69 |
+
Array<Element, ElementsPerAccess> result;
|
| 70 |
+
|
| 71 |
+
CUTLASS_PRAGMA_UNROLL
|
| 72 |
+
for (int i = 0; i < ElementsPerAccess; ++i) { result[i] = expf(input[i]); }
|
| 73 |
+
|
| 74 |
+
return result;
|
| 75 |
+
}
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
template <int ElementsPerAccess>
|
| 79 |
+
struct ArrayExponential<half_t, ElementsPerAccess> {
|
| 80 |
+
CUTLASS_DEVICE
|
| 81 |
+
Array<half_t, ElementsPerAccess> operator()(Array<half_t, ElementsPerAccess> const& input) const
|
| 82 |
+
{
|
| 83 |
+
Array<half_t, ElementsPerAccess> result;
|
| 84 |
+
|
| 85 |
+
int const kVectorCount = ElementsPerAccess / 2;
|
| 86 |
+
|
| 87 |
+
__half2 const* input_ptr = reinterpret_cast<__half2 const*>(input.raw_data());
|
| 88 |
+
__half2* res_ptr = reinterpret_cast<__half2*>(result.raw_data());
|
| 89 |
+
|
| 90 |
+
CUTLASS_PRAGMA_UNROLL
|
| 91 |
+
for (int i = 0; i < kVectorCount; ++i) { res_ptr[i] = h2exp(input_ptr[i]); }
|
| 92 |
+
|
| 93 |
+
return result;
|
| 94 |
+
}
|
| 95 |
+
};
|
| 96 |
+
} // namespace detail
|
| 97 |
+
|
| 98 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 99 |
+
|
| 100 |
+
/// Applies:
|
| 101 |
+
/// output <- (input - lse).exp()
|
| 102 |
+
template <typename ElementOutput_, // output
|
| 103 |
+
typename ElementLSE_, // accumulator from LSE
|
| 104 |
+
typename ElementAccumulator_, // accumulator from matmul
|
| 105 |
+
typename ElementCompute_, // intermediate compute (and exp calculation)
|
| 106 |
+
int ElementsPerAccess>
|
| 107 |
+
class ApplyLogSumExp {
|
| 108 |
+
public:
|
| 109 |
+
using ElementOutput = ElementOutput_;
|
| 110 |
+
using ElementAccumulator = ElementAccumulator_;
|
| 111 |
+
using ElementCompute = ElementCompute_;
|
| 112 |
+
using ElementLSE = ElementLSE_;
|
| 113 |
+
|
| 114 |
+
static int const kElementsPerAccess = ElementsPerAccess;
|
| 115 |
+
static int const kCount = kElementsPerAccess;
|
| 116 |
+
static const ScaleType::Kind kScale = cutlass::epilogue::thread::ScaleType::NoBetaScaling;
|
| 117 |
+
|
| 118 |
+
using FragmentOutput = Array<ElementOutput, kCount>;
|
| 119 |
+
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
|
| 120 |
+
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
|
| 121 |
+
using FragmentLSE = Array<ElementLSE, kElementsPerAccess>;
|
| 122 |
+
using FragmentScaleBias = FragmentLSE; // Used by epilogue_smem_accumulator.h
|
| 123 |
+
|
| 124 |
+
public:
|
| 125 |
+
//
|
| 126 |
+
// Methods
|
| 127 |
+
//
|
| 128 |
+
|
| 129 |
+
CUTLASS_HOST_DEVICE
|
| 130 |
+
ApplyLogSumExp() {}
|
| 131 |
+
|
| 132 |
+
/// Returns true if source is needed
|
| 133 |
+
CUTLASS_HOST_DEVICE
|
| 134 |
+
bool is_source_needed() const { return true; }
|
| 135 |
+
|
| 136 |
+
/// Functionally required for serial reduction in the epilogue
|
| 137 |
+
CUTLASS_HOST_DEVICE
|
| 138 |
+
void set_k_partition(int k_partition, int k_partition_count) {}
|
| 139 |
+
|
| 140 |
+
CUTLASS_HOST_DEVICE
|
| 141 |
+
FragmentOutput operator()(FragmentAccumulator const& AB,
|
| 142 |
+
FragmentLSE const& scale_unused,
|
| 143 |
+
// bias used as LSE
|
| 144 |
+
FragmentLSE const& bias) const
|
| 145 |
+
{
|
| 146 |
+
FragmentCompute frag_AB =
|
| 147 |
+
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
|
| 148 |
+
FragmentCompute frag_lse_compute =
|
| 149 |
+
NumericArrayConverter<ElementCompute, ElementLSE, kElementsPerAccess>()(bias);
|
| 150 |
+
FragmentCompute frag_compute;
|
| 151 |
+
|
| 152 |
+
minus<FragmentCompute> minus_lse;
|
| 153 |
+
detail::ArrayExponential<ElementCompute, kElementsPerAccess> apply_exp;
|
| 154 |
+
frag_compute = minus_lse(frag_AB, frag_lse_compute);
|
| 155 |
+
frag_compute = apply_exp(frag_compute);
|
| 156 |
+
|
| 157 |
+
return NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess>()(
|
| 158 |
+
frag_compute);
|
| 159 |
+
}
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 163 |
+
|
| 164 |
+
} // namespace thread
|
| 165 |
+
} // namespace epilogue
|
| 166 |
+
} // namespace cutlass
|
| 167 |
+
|
| 168 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include "custom_mma_multistage.h"
|
| 40 |
+
#include "custom_mma_pipelined.h"
|
| 41 |
+
#include "cutlass/gemm/threadblock/mma_multistage.h"
|
| 42 |
+
#include "cutlass/gemm/threadblock/mma_pipelined.h"
|
| 43 |
+
|
| 44 |
+
template <typename Mma, int kMaxK>
|
| 45 |
+
struct MakeCustomMma;
|
| 46 |
+
|
| 47 |
+
template <typename Shape,
|
| 48 |
+
typename IteratorA,
|
| 49 |
+
typename SmemIteratorA,
|
| 50 |
+
cutlass::arch::CacheOperation::Kind CacheOpA,
|
| 51 |
+
typename IteratorB,
|
| 52 |
+
typename SmemIteratorB,
|
| 53 |
+
cutlass::arch::CacheOperation::Kind CacheOpB,
|
| 54 |
+
typename ElementC,
|
| 55 |
+
typename LayoutC,
|
| 56 |
+
typename Policy,
|
| 57 |
+
int Stages,
|
| 58 |
+
cutlass::gemm::SharedMemoryClearOption SharedMemoryClear,
|
| 59 |
+
int kMaxK>
|
| 60 |
+
struct MakeCustomMma<cutlass::gemm::threadblock::MmaMultistage<Shape,
|
| 61 |
+
IteratorA,
|
| 62 |
+
SmemIteratorA,
|
| 63 |
+
CacheOpA,
|
| 64 |
+
IteratorB,
|
| 65 |
+
SmemIteratorB,
|
| 66 |
+
CacheOpB,
|
| 67 |
+
ElementC,
|
| 68 |
+
LayoutC,
|
| 69 |
+
Policy,
|
| 70 |
+
Stages,
|
| 71 |
+
SharedMemoryClear>,
|
| 72 |
+
kMaxK> {
|
| 73 |
+
// Reduce the number of stages if we don't need that many
|
| 74 |
+
static int constexpr kStages =
|
| 75 |
+
kMaxK == cutlass::platform::numeric_limits<int>::max()
|
| 76 |
+
? Stages
|
| 77 |
+
: cutlass::const_min(Stages, (kMaxK + int(Shape::kK) - 1) / int(Shape::kK));
|
| 78 |
+
using Mma = cutlass::gemm::threadblock::CustomMmaMultistage<Shape,
|
| 79 |
+
IteratorA,
|
| 80 |
+
SmemIteratorA,
|
| 81 |
+
CacheOpA,
|
| 82 |
+
IteratorB,
|
| 83 |
+
SmemIteratorB,
|
| 84 |
+
CacheOpB,
|
| 85 |
+
ElementC,
|
| 86 |
+
LayoutC,
|
| 87 |
+
Policy,
|
| 88 |
+
kStages,
|
| 89 |
+
SharedMemoryClear,
|
| 90 |
+
kMaxK>;
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
template <typename Shape,
|
| 94 |
+
typename IteratorA,
|
| 95 |
+
typename SmemIteratorA,
|
| 96 |
+
typename IteratorB,
|
| 97 |
+
typename SmemIteratorB,
|
| 98 |
+
typename ElementC,
|
| 99 |
+
typename LayoutC,
|
| 100 |
+
typename Policy,
|
| 101 |
+
int kMaxK>
|
| 102 |
+
struct MakeCustomMma<cutlass::gemm::threadblock::MmaPipelined<Shape,
|
| 103 |
+
IteratorA,
|
| 104 |
+
SmemIteratorA,
|
| 105 |
+
IteratorB,
|
| 106 |
+
SmemIteratorB,
|
| 107 |
+
ElementC,
|
| 108 |
+
LayoutC,
|
| 109 |
+
Policy>,
|
| 110 |
+
kMaxK> {
|
| 111 |
+
using Mma = cutlass::gemm::threadblock::CustomMmaPipelined<Shape,
|
| 112 |
+
IteratorA,
|
| 113 |
+
SmemIteratorA,
|
| 114 |
+
IteratorB,
|
| 115 |
+
SmemIteratorB,
|
| 116 |
+
ElementC,
|
| 117 |
+
LayoutC,
|
| 118 |
+
Policy>;
|
| 119 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
#pragma once
|
| 43 |
+
|
| 44 |
+
#include "cutlass/aligned_buffer.h"
|
| 45 |
+
#include "cutlass/arch/memory.h"
|
| 46 |
+
#include "cutlass/array.h"
|
| 47 |
+
#include "cutlass/cutlass.h"
|
| 48 |
+
#include "cutlass/gemm/gemm.h"
|
| 49 |
+
#include "cutlass/gemm/threadblock/mma_base.h"
|
| 50 |
+
#include "cutlass/matrix_shape.h"
|
| 51 |
+
#include "cutlass/numeric_types.h"
|
| 52 |
+
|
| 53 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 54 |
+
|
| 55 |
+
namespace cutlass {
|
| 56 |
+
namespace gemm {
|
| 57 |
+
namespace threadblock {
|
| 58 |
+
|
| 59 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 60 |
+
|
| 61 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 62 |
+
/// instructions.
|
| 63 |
+
template <
|
| 64 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 65 |
+
typename Shape_,
|
| 66 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 67 |
+
typename Policy_,
|
| 68 |
+
/// Number of stages,
|
| 69 |
+
int Stages,
|
| 70 |
+
/// Used for partial specialization
|
| 71 |
+
typename Enable = bool>
|
| 72 |
+
class CustomMmaBase {
|
| 73 |
+
public:
|
| 74 |
+
///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 75 |
+
using Shape = Shape_;
|
| 76 |
+
|
| 77 |
+
///< Policy describing tuning details
|
| 78 |
+
using Policy = Policy_;
|
| 79 |
+
|
| 80 |
+
//
|
| 81 |
+
// Dependent types
|
| 82 |
+
//
|
| 83 |
+
|
| 84 |
+
/// Warp-level Mma
|
| 85 |
+
using Operator = typename Policy::Operator;
|
| 86 |
+
|
| 87 |
+
/// Shape describing the overall GEMM computed from shared memory
|
| 88 |
+
/// by each warp.
|
| 89 |
+
using WarpGemm = typename Policy::Operator::Shape;
|
| 90 |
+
|
| 91 |
+
/// Shape describing the number of warps filling the CTA
|
| 92 |
+
using WarpCount =
|
| 93 |
+
GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>;
|
| 94 |
+
|
| 95 |
+
/// Number of warp-level GEMM oeprations
|
| 96 |
+
static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK);
|
| 97 |
+
|
| 98 |
+
/// Number of stages
|
| 99 |
+
static int const kStages = Stages;
|
| 100 |
+
|
| 101 |
+
//
|
| 102 |
+
// Nested structs
|
| 103 |
+
//
|
| 104 |
+
|
| 105 |
+
/// Shared storage object needed by threadblock-scoped GEMM
|
| 106 |
+
template <typename Element, typename OperandShape, typename OperandLayout>
|
| 107 |
+
struct OperandSharedStorage {
|
| 108 |
+
AlignedBuffer<Element, OperandShape::kCount> buffer;
|
| 109 |
+
using TensorRef = TensorRef<Element, OperandLayout>;
|
| 110 |
+
|
| 111 |
+
CUTLASS_DEVICE
|
| 112 |
+
static OperandLayout Layout()
|
| 113 |
+
{
|
| 114 |
+
return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn});
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
/// Returns a TensorRef to the operand
|
| 118 |
+
CUTLASS_HOST_DEVICE
|
| 119 |
+
TensorRef ref() { return TensorRef{buffer.data(), Layout()}; }
|
| 120 |
+
};
|
| 121 |
+
|
| 122 |
+
/// Shape of the A matrix operand in shared memory
|
| 123 |
+
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
|
| 124 |
+
Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
|
| 125 |
+
|
| 126 |
+
/// Shape of the B matrix operand in shared memory
|
| 127 |
+
using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
|
| 128 |
+
Shape::kN + Policy::SmemPaddingB::kColumn>;
|
| 129 |
+
|
| 130 |
+
using SharedStorageA =
|
| 131 |
+
OperandSharedStorage<typename Operator::ElementA, ShapeA, typename Operator::LayoutA>;
|
| 132 |
+
using SharedStorageB =
|
| 133 |
+
OperandSharedStorage<typename Operator::ElementB, ShapeB, typename Operator::LayoutB>;
|
| 134 |
+
using TensorRefA = typename SharedStorageA::TensorRef;
|
| 135 |
+
using TensorRefB = typename SharedStorageB::TensorRef;
|
| 136 |
+
|
| 137 |
+
struct SharedStorage {
|
| 138 |
+
/// Buffer for A operand
|
| 139 |
+
SharedStorageA operand_A;
|
| 140 |
+
|
| 141 |
+
/// Buffer for B operand
|
| 142 |
+
SharedStorageB operand_B;
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
protected:
|
| 146 |
+
//
|
| 147 |
+
// Data members
|
| 148 |
+
//
|
| 149 |
+
|
| 150 |
+
/// Iterator to load a warp-scoped tile of A operand from shared memory
|
| 151 |
+
typename Operator::IteratorA warp_tile_iterator_A_;
|
| 152 |
+
|
| 153 |
+
/// Iterator to load a warp-scoped tile of B operand from shared memory
|
| 154 |
+
typename Operator::IteratorB warp_tile_iterator_B_;
|
| 155 |
+
|
| 156 |
+
public:
|
| 157 |
+
/// Construct from tensor references
|
| 158 |
+
CUTLASS_DEVICE
|
| 159 |
+
CustomMmaBase(
|
| 160 |
+
///< Shared storage needed for internal use by threadblock-scoped GEMM
|
| 161 |
+
SharedStorageA& shared_storageA,
|
| 162 |
+
SharedStorageB& shared_storageB,
|
| 163 |
+
///< ID within the threadblock
|
| 164 |
+
int thread_idx,
|
| 165 |
+
///< ID of warp
|
| 166 |
+
int warp_idx,
|
| 167 |
+
///< ID of each thread within a warp
|
| 168 |
+
int lane_idx)
|
| 169 |
+
: warp_tile_iterator_A_(shared_storageA.ref(), lane_idx),
|
| 170 |
+
warp_tile_iterator_B_(shared_storageB.ref(), lane_idx)
|
| 171 |
+
{
|
| 172 |
+
}
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 176 |
+
|
| 177 |
+
} // namespace threadblock
|
| 178 |
+
} // namespace gemm
|
| 179 |
+
} // namespace cutlass
|
| 180 |
+
|
| 181 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h
ADDED
|
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
#pragma once
|
| 43 |
+
|
| 44 |
+
#include "cutlass/aligned_buffer.h"
|
| 45 |
+
#include "cutlass/arch/cache_operation.h"
|
| 46 |
+
#include "cutlass/arch/memory.h"
|
| 47 |
+
#include "cutlass/array.h"
|
| 48 |
+
#include "cutlass/cutlass.h"
|
| 49 |
+
#include "cutlass/gemm/gemm.h"
|
| 50 |
+
#include "cutlass/matrix_shape.h"
|
| 51 |
+
#include "cutlass/numeric_types.h"
|
| 52 |
+
|
| 53 |
+
#include "custom_mma_base.h"
|
| 54 |
+
|
| 55 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 56 |
+
|
| 57 |
+
namespace cutlass {
|
| 58 |
+
namespace gemm {
|
| 59 |
+
namespace threadblock {
|
| 60 |
+
|
| 61 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 62 |
+
|
| 63 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 64 |
+
/// instructions.
|
| 65 |
+
template <
|
| 66 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 67 |
+
typename Shape_,
|
| 68 |
+
/// Iterates over tiles of A operand in global memory
|
| 69 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 70 |
+
// MaskedTileIterator)
|
| 71 |
+
typename IteratorA_,
|
| 72 |
+
/// Iterates over tiles of A operand in shared memory
|
| 73 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 74 |
+
typename SmemIteratorA_,
|
| 75 |
+
/// Cache operation for operand A
|
| 76 |
+
cutlass::arch::CacheOperation::Kind CacheOpA,
|
| 77 |
+
/// Iterates over tiles of B operand in global memory
|
| 78 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 79 |
+
// MaskedTileIterator)
|
| 80 |
+
typename IteratorB_,
|
| 81 |
+
/// Iterates over tiles of B operand in shared memory
|
| 82 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 83 |
+
typename SmemIteratorB_,
|
| 84 |
+
/// Cache operation for operand B
|
| 85 |
+
cutlass::arch::CacheOperation::Kind CacheOpB,
|
| 86 |
+
/// Data type of accumulator matrix
|
| 87 |
+
typename ElementC_,
|
| 88 |
+
/// Data type of accumulator matrix
|
| 89 |
+
typename LayoutC_,
|
| 90 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 91 |
+
typename Policy_,
|
| 92 |
+
/// Number of stages,
|
| 93 |
+
int Stages,
|
| 94 |
+
/// Use zfill or predicate for out-of-bound cp.async
|
| 95 |
+
SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
|
| 96 |
+
/// Upper boundon the K dimension
|
| 97 |
+
int kMaxK = cutlass::platform::numeric_limits<int>::max(),
|
| 98 |
+
/// Used for partial specialization
|
| 99 |
+
typename Enable = bool>
|
| 100 |
+
class CustomMmaMultistage : public CustomMmaBase<Shape_, Policy_, Stages> {
|
| 101 |
+
public:
|
| 102 |
+
///< Base class
|
| 103 |
+
using Base = CustomMmaBase<Shape_, Policy_, Stages>;
|
| 104 |
+
///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 105 |
+
using Shape = Shape_;
|
| 106 |
+
///< Iterates over tiles of A operand in global memory
|
| 107 |
+
using IteratorA = IteratorA_;
|
| 108 |
+
///< Iterates over tiles of B operand in global memory
|
| 109 |
+
using IteratorB = IteratorB_;
|
| 110 |
+
///< Data type of accumulator matrix
|
| 111 |
+
using ElementC = ElementC_;
|
| 112 |
+
///< Layout of accumulator matrix
|
| 113 |
+
using LayoutC = LayoutC_;
|
| 114 |
+
///< Policy describing tuning details
|
| 115 |
+
using Policy = Policy_;
|
| 116 |
+
|
| 117 |
+
using SmemIteratorA = SmemIteratorA_;
|
| 118 |
+
using SmemIteratorB = SmemIteratorB_;
|
| 119 |
+
|
| 120 |
+
static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
|
| 121 |
+
static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
|
| 122 |
+
|
| 123 |
+
//
|
| 124 |
+
// Dependent types
|
| 125 |
+
//
|
| 126 |
+
|
| 127 |
+
/// Fragment of accumulator tile
|
| 128 |
+
using FragmentC = typename Policy::Operator::FragmentC;
|
| 129 |
+
|
| 130 |
+
/// Warp-level Mma
|
| 131 |
+
using Operator = typename Policy::Operator;
|
| 132 |
+
|
| 133 |
+
/// Minimum architecture is Sm80 to support cp.async
|
| 134 |
+
using ArchTag = arch::Sm80;
|
| 135 |
+
|
| 136 |
+
/// Complex transform on A operand
|
| 137 |
+
static ComplexTransform const kTransformA = Operator::kTransformA;
|
| 138 |
+
|
| 139 |
+
/// Complex transform on B operand
|
| 140 |
+
static ComplexTransform const kTransformB = Operator::kTransformB;
|
| 141 |
+
|
| 142 |
+
/// Internal structure exposed for introspection.
|
| 143 |
+
struct Detail {
|
| 144 |
+
static_assert(Base::kWarpGemmIterations > 1,
|
| 145 |
+
"The pipelined structure requires at least two warp-level "
|
| 146 |
+
"GEMM operations.");
|
| 147 |
+
|
| 148 |
+
/// Number of cp.async instructions to load one stage of operand A
|
| 149 |
+
static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount;
|
| 150 |
+
|
| 151 |
+
/// Number of cp.async instructions to load one stage of operand B
|
| 152 |
+
static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount;
|
| 153 |
+
|
| 154 |
+
/// Number of stages
|
| 155 |
+
static int const kStages = Stages;
|
| 156 |
+
|
| 157 |
+
/// Number of cp.async instructions to load on group of operand A
|
| 158 |
+
static int const kAccessesPerGroupA =
|
| 159 |
+
(AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) /
|
| 160 |
+
Base::kWarpGemmIterations;
|
| 161 |
+
|
| 162 |
+
/// Number of cp.async instructions to load on group of operand B
|
| 163 |
+
static int const kAccessesPerGroupB =
|
| 164 |
+
(AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) /
|
| 165 |
+
Base::kWarpGemmIterations;
|
| 166 |
+
};
|
| 167 |
+
|
| 168 |
+
static bool const kSmemContainsEntireMat = kMaxK <= Shape::kK * Stages;
|
| 169 |
+
static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireMat ? Stages : Stages - 1;
|
| 170 |
+
|
| 171 |
+
private:
|
| 172 |
+
using WarpLoadedFragmentA = typename Operator::FragmentA;
|
| 173 |
+
using WarpLoadedFragmentB = typename Operator::FragmentB;
|
| 174 |
+
using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
|
| 175 |
+
using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
|
| 176 |
+
|
| 177 |
+
private:
|
| 178 |
+
//
|
| 179 |
+
// Data members
|
| 180 |
+
//
|
| 181 |
+
|
| 182 |
+
/// Iterator to write threadblock-scoped tile of A operand to shared memory
|
| 183 |
+
SmemIteratorA smem_iterator_A_;
|
| 184 |
+
|
| 185 |
+
/// Iterator to write threadblock-scoped tile of B operand to shared memory
|
| 186 |
+
SmemIteratorB smem_iterator_B_;
|
| 187 |
+
|
| 188 |
+
bool prologue_done_;
|
| 189 |
+
|
| 190 |
+
// Set to `True` to ensure the accumulator will be zero outside the GEMM
|
| 191 |
+
// footprint
|
| 192 |
+
bool zero_outside_bounds_;
|
| 193 |
+
|
| 194 |
+
public:
|
| 195 |
+
/// Construct from tensor references
|
| 196 |
+
CUTLASS_DEVICE
|
| 197 |
+
CustomMmaMultistage(
|
| 198 |
+
///< Shared storage needed for internal use by threadblock-scoped GEMM
|
| 199 |
+
typename Base::SharedStorageA& shared_storageA,
|
| 200 |
+
typename Base::SharedStorageB& shared_storageB,
|
| 201 |
+
///< ID within the threadblock
|
| 202 |
+
int thread_idx,
|
| 203 |
+
///< ID of warp
|
| 204 |
+
int warp_idx,
|
| 205 |
+
///< ID of each thread within a warp
|
| 206 |
+
int lane_idx)
|
| 207 |
+
: Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx),
|
| 208 |
+
smem_iterator_A_(shared_storageA.ref(), thread_idx),
|
| 209 |
+
smem_iterator_B_(shared_storageB.ref(), thread_idx),
|
| 210 |
+
prologue_done_(false),
|
| 211 |
+
zero_outside_bounds_(false)
|
| 212 |
+
{
|
| 213 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 214 |
+
// three coordinates:
|
| 215 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 216 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 217 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 218 |
+
|
| 219 |
+
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 220 |
+
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 221 |
+
|
| 222 |
+
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
|
| 223 |
+
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
|
| 224 |
+
|
| 225 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 226 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 227 |
+
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
|
| 228 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 229 |
+
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
|
| 230 |
+
}
|
| 231 |
+
CUTLASS_DEVICE
|
| 232 |
+
CustomMmaMultistage(
|
| 233 |
+
///< Shared storage needed for internal use by threadblock-scoped GEMM
|
| 234 |
+
typename Base::SharedStorage& st,
|
| 235 |
+
///< ID within the threadblock
|
| 236 |
+
int thread_idx,
|
| 237 |
+
///< ID of warp
|
| 238 |
+
int warp_idx,
|
| 239 |
+
///< ID of each thread within a warp
|
| 240 |
+
int lane_idx)
|
| 241 |
+
: CustomMmaMultistage(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx)
|
| 242 |
+
{
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
CUTLASS_DEVICE
|
| 246 |
+
bool set_prologue_done(bool value)
|
| 247 |
+
{
|
| 248 |
+
prologue_done_ = value;
|
| 249 |
+
return true;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
CUTLASS_DEVICE
|
| 253 |
+
bool set_zero_outside_bounds(bool value)
|
| 254 |
+
{
|
| 255 |
+
zero_outside_bounds_ = value;
|
| 256 |
+
return true;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <bool kLoadA = true, bool kLoadB = true>
|
| 260 |
+
CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage,
|
| 261 |
+
///< iterator over A operand in global memory
|
| 262 |
+
IteratorA iterator_A,
|
| 263 |
+
///< iterator over B operand in global memory
|
| 264 |
+
IteratorB iterator_B,
|
| 265 |
+
int thread_idx,
|
| 266 |
+
int problem_size_k)
|
| 267 |
+
{
|
| 268 |
+
prologue<kLoadA, kLoadB>(shared_storage.operand_A,
|
| 269 |
+
shared_storage.operand_B,
|
| 270 |
+
iterator_A,
|
| 271 |
+
iterator_B,
|
| 272 |
+
thread_idx,
|
| 273 |
+
problem_size_k);
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
template <bool kLoadA = true, bool kLoadB = true>
|
| 277 |
+
CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA,
|
| 278 |
+
typename Base::SharedStorageB& shared_storageB,
|
| 279 |
+
///< iterator over A operand in global memory
|
| 280 |
+
IteratorA iterator_A,
|
| 281 |
+
///< iterator over B operand in global memory
|
| 282 |
+
IteratorB iterator_B,
|
| 283 |
+
int thread_idx,
|
| 284 |
+
int problem_size_k)
|
| 285 |
+
{
|
| 286 |
+
SmemIteratorA smem_iterator_A(shared_storageA.ref(), thread_idx);
|
| 287 |
+
SmemIteratorB smem_iterator_B(shared_storageB.ref(), thread_idx);
|
| 288 |
+
int32_t iter = (problem_size_k + Base::Shape::kK - 1) / Base::Shape::kK;
|
| 289 |
+
_prologue<kLoadA, kLoadB>(iterator_A, iterator_B, iter, smem_iterator_A, smem_iterator_B);
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
CUTLASS_DEVICE
|
| 293 |
+
void copy_tiles_and_advance(IteratorA& iterator_A,
|
| 294 |
+
IteratorB& iterator_B,
|
| 295 |
+
int group_start_A = 0,
|
| 296 |
+
int group_start_B = 0)
|
| 297 |
+
{
|
| 298 |
+
iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
|
| 299 |
+
this->smem_iterator_A_.set_iteration_index(group_start_A);
|
| 300 |
+
|
| 301 |
+
// Async Copy for operand A
|
| 302 |
+
CUTLASS_PRAGMA_UNROLL
|
| 303 |
+
for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
|
| 304 |
+
if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
|
| 305 |
+
typename IteratorA::AccessType* dst_ptr =
|
| 306 |
+
reinterpret_cast<typename IteratorA::AccessType*>(this->smem_iterator_A_.get());
|
| 307 |
+
|
| 308 |
+
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
|
| 309 |
+
IteratorA::ThreadMap::kElementsPerAccess /
|
| 310 |
+
IteratorA::kAccessesPerVector / 8;
|
| 311 |
+
|
| 312 |
+
CUTLASS_PRAGMA_UNROLL
|
| 313 |
+
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
|
| 314 |
+
auto gmem_ptr = iterator_A.get();
|
| 315 |
+
|
| 316 |
+
if (zero_outside_bounds_ ||
|
| 317 |
+
SharedMemoryClear == SharedMemoryClearOption::kZfill) {
|
| 318 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
|
| 319 |
+
dst_ptr + v, gmem_ptr, iterator_A.valid());
|
| 320 |
+
} else {
|
| 321 |
+
cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
|
| 322 |
+
dst_ptr + v, gmem_ptr, iterator_A.valid());
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
++iterator_A;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
++this->smem_iterator_A_;
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
|
| 333 |
+
this->smem_iterator_B_.set_iteration_index(group_start_B);
|
| 334 |
+
|
| 335 |
+
// Async Copy for operand B
|
| 336 |
+
CUTLASS_PRAGMA_UNROLL
|
| 337 |
+
for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
|
| 338 |
+
if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
|
| 339 |
+
typename IteratorB::AccessType* dst_ptr =
|
| 340 |
+
reinterpret_cast<typename IteratorB::AccessType*>(this->smem_iterator_B_.get());
|
| 341 |
+
|
| 342 |
+
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
|
| 343 |
+
IteratorB::ThreadMap::kElementsPerAccess /
|
| 344 |
+
IteratorB::kAccessesPerVector / 8;
|
| 345 |
+
|
| 346 |
+
CUTLASS_PRAGMA_UNROLL
|
| 347 |
+
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
|
| 348 |
+
auto gmem_ptr = iterator_B.get();
|
| 349 |
+
|
| 350 |
+
if (zero_outside_bounds_ ||
|
| 351 |
+
SharedMemoryClear == SharedMemoryClearOption::kZfill) {
|
| 352 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
|
| 353 |
+
dst_ptr + v, gmem_ptr, iterator_B.valid());
|
| 354 |
+
} else {
|
| 355 |
+
cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
|
| 356 |
+
dst_ptr + v, gmem_ptr, iterator_B.valid());
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
++iterator_B;
|
| 360 |
+
}
|
| 361 |
+
++this->smem_iterator_B_;
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
template <bool kLoadA = true, bool kLoadB = true>
|
| 367 |
+
CUTLASS_DEVICE static void _prologue(IteratorA& iterator_A,
|
| 368 |
+
IteratorB& iterator_B,
|
| 369 |
+
int32_t& gemm_k_iterations,
|
| 370 |
+
SmemIteratorA& smem_iterator_A_,
|
| 371 |
+
SmemIteratorB& smem_iterator_B_)
|
| 372 |
+
{
|
| 373 |
+
// Issue several complete stages
|
| 374 |
+
CUTLASS_PRAGMA_UNROLL
|
| 375 |
+
for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations) {
|
| 376 |
+
iterator_A.clear_mask(gemm_k_iterations == 0);
|
| 377 |
+
iterator_B.clear_mask(gemm_k_iterations == 0);
|
| 378 |
+
|
| 379 |
+
iterator_A.set_iteration_index(0);
|
| 380 |
+
smem_iterator_A_.set_iteration_index(0);
|
| 381 |
+
|
| 382 |
+
// Async Copy for operand A
|
| 383 |
+
CUTLASS_PRAGMA_UNROLL
|
| 384 |
+
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
|
| 385 |
+
typename IteratorA::AccessType* dst_ptr =
|
| 386 |
+
reinterpret_cast<typename IteratorA::AccessType*>(smem_iterator_A_.get());
|
| 387 |
+
|
| 388 |
+
CUTLASS_PRAGMA_UNROLL
|
| 389 |
+
for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
|
| 390 |
+
int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
|
| 391 |
+
IteratorA::ThreadMap::kElementsPerAccess /
|
| 392 |
+
IteratorA::kAccessesPerVector / 8;
|
| 393 |
+
|
| 394 |
+
int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
|
| 395 |
+
|
| 396 |
+
if (kLoadA) {
|
| 397 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
|
| 398 |
+
dst_ptr + v, iterator_A.get(), iterator_A.valid());
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
++iterator_A;
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
++smem_iterator_A_;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
iterator_B.set_iteration_index(0);
|
| 408 |
+
smem_iterator_B_.set_iteration_index(0);
|
| 409 |
+
|
| 410 |
+
// Async Copy for operand B
|
| 411 |
+
CUTLASS_PRAGMA_UNROLL
|
| 412 |
+
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
|
| 413 |
+
typename IteratorB::AccessType* dst_ptr =
|
| 414 |
+
reinterpret_cast<typename IteratorB::AccessType*>(smem_iterator_B_.get());
|
| 415 |
+
|
| 416 |
+
CUTLASS_PRAGMA_UNROLL
|
| 417 |
+
for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
|
| 418 |
+
int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
|
| 419 |
+
IteratorB::ThreadMap::kElementsPerAccess /
|
| 420 |
+
IteratorB::kAccessesPerVector / 8;
|
| 421 |
+
|
| 422 |
+
if (kLoadB) {
|
| 423 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
|
| 424 |
+
dst_ptr + v, iterator_B.get(), iterator_B.valid());
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
++iterator_B;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
++smem_iterator_B_;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
// Move to the next stage
|
| 434 |
+
iterator_A.add_tile_offset({0, 1});
|
| 435 |
+
iterator_B.add_tile_offset({1, 0});
|
| 436 |
+
|
| 437 |
+
smem_iterator_A_.add_tile_offset({0, 1});
|
| 438 |
+
smem_iterator_B_.add_tile_offset({1, 0});
|
| 439 |
+
|
| 440 |
+
// Defines the boundary of a stage of cp.async.
|
| 441 |
+
cutlass::arch::cp_async_fence();
|
| 442 |
+
}
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
/// Perform a threadblock-scoped matrix multiply-accumulate
|
| 446 |
+
CUTLASS_DEVICE
|
| 447 |
+
void operator()(
|
| 448 |
+
///< problem size of GEMM
|
| 449 |
+
int gemm_k_iterations,
|
| 450 |
+
///< destination accumulator tile
|
| 451 |
+
FragmentC& accum,
|
| 452 |
+
///< iterator over A operand in global memory
|
| 453 |
+
IteratorA iterator_A,
|
| 454 |
+
///< iterator over B operand in global memory
|
| 455 |
+
IteratorB iterator_B,
|
| 456 |
+
///< initial value of accumulator
|
| 457 |
+
FragmentC const& src_accum)
|
| 458 |
+
{
|
| 459 |
+
//
|
| 460 |
+
// Prologue
|
| 461 |
+
//
|
| 462 |
+
|
| 463 |
+
if (!prologue_done_) {
|
| 464 |
+
_prologue<true, true>(
|
| 465 |
+
iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_);
|
| 466 |
+
} else if (!kSmemContainsEntireMat) {
|
| 467 |
+
_prologue<false, false>(
|
| 468 |
+
iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_);
|
| 469 |
+
} else {
|
| 470 |
+
gemm_k_iterations -= kNumStagesConcurrentLoad;
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
// Perform accumulation in the 'd' output operand
|
| 474 |
+
accum = src_accum;
|
| 475 |
+
|
| 476 |
+
//
|
| 477 |
+
// Clear the remaining tiles of SMEM. This is a functional requirement for
|
| 478 |
+
// some kernels so that all accumulator elements outside the GEMM footprint
|
| 479 |
+
// are zero.
|
| 480 |
+
//
|
| 481 |
+
|
| 482 |
+
if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
|
| 483 |
+
/// Iterator to write threadblock-scoped tile of A operand to shared
|
| 484 |
+
/// memory
|
| 485 |
+
SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
|
| 486 |
+
|
| 487 |
+
typename IteratorA::AccessType zero_A;
|
| 488 |
+
zero_A.clear();
|
| 489 |
+
|
| 490 |
+
last_smem_iterator_A.set_iteration_index(0);
|
| 491 |
+
|
| 492 |
+
// Async Copy for operand A
|
| 493 |
+
CUTLASS_PRAGMA_UNROLL
|
| 494 |
+
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
|
| 495 |
+
typename IteratorA::AccessType* dst_ptr =
|
| 496 |
+
reinterpret_cast<typename IteratorA::AccessType*>(last_smem_iterator_A.get());
|
| 497 |
+
|
| 498 |
+
*dst_ptr = zero_A;
|
| 499 |
+
|
| 500 |
+
++last_smem_iterator_A;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
/// Iterator to write threadblock-scoped tile of B operand to shared
|
| 504 |
+
/// memory
|
| 505 |
+
SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
|
| 506 |
+
typename IteratorB::AccessType zero_B;
|
| 507 |
+
|
| 508 |
+
zero_B.clear();
|
| 509 |
+
last_smem_iterator_B.set_iteration_index(0);
|
| 510 |
+
|
| 511 |
+
// Async Copy for operand B
|
| 512 |
+
CUTLASS_PRAGMA_UNROLL
|
| 513 |
+
for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
|
| 514 |
+
typename IteratorB::AccessType* dst_ptr =
|
| 515 |
+
reinterpret_cast<typename IteratorB::AccessType*>(last_smem_iterator_B.get());
|
| 516 |
+
|
| 517 |
+
*dst_ptr = zero_B;
|
| 518 |
+
|
| 519 |
+
++last_smem_iterator_B;
|
| 520 |
+
}
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
// Waits until kStages-2 stages have committed.
|
| 524 |
+
cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
|
| 525 |
+
__syncthreads();
|
| 526 |
+
|
| 527 |
+
// Pair of fragments used to overlap shared memory loads and math
|
| 528 |
+
// instructions
|
| 529 |
+
WarpLoadedFragmentA warp_loaded_frag_A[2];
|
| 530 |
+
WarpLoadedFragmentB warp_loaded_frag_B[2];
|
| 531 |
+
WarpTransformedFragmentA warp_transformed_frag_A[2];
|
| 532 |
+
WarpTransformedFragmentB warp_transformed_frag_B[2];
|
| 533 |
+
|
| 534 |
+
Operator warp_mma;
|
| 535 |
+
|
| 536 |
+
this->warp_tile_iterator_A_.set_kgroup_index(0);
|
| 537 |
+
this->warp_tile_iterator_B_.set_kgroup_index(0);
|
| 538 |
+
|
| 539 |
+
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
|
| 540 |
+
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
|
| 541 |
+
|
| 542 |
+
++this->warp_tile_iterator_A_;
|
| 543 |
+
++this->warp_tile_iterator_B_;
|
| 544 |
+
|
| 545 |
+
iterator_A.clear_mask(gemm_k_iterations == 0);
|
| 546 |
+
iterator_B.clear_mask(gemm_k_iterations == 0);
|
| 547 |
+
|
| 548 |
+
int smem_write_stage_idx = Base::kStages - 1;
|
| 549 |
+
int smem_read_stage_idx = 0;
|
| 550 |
+
|
| 551 |
+
warp_mma.transform(warp_transformed_frag_A[0],
|
| 552 |
+
warp_transformed_frag_B[0],
|
| 553 |
+
warp_loaded_frag_A[0],
|
| 554 |
+
warp_loaded_frag_B[0]);
|
| 555 |
+
|
| 556 |
+
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
|
| 557 |
+
// accumulator and this temporary accumulator is added to the final
|
| 558 |
+
// accumulator once in every mainloop iteration.
|
| 559 |
+
plus<FragmentC> plus_accum;
|
| 560 |
+
|
| 561 |
+
FragmentC tmp_accum;
|
| 562 |
+
|
| 563 |
+
if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value ||
|
| 564 |
+
platform::is_same<typename Operator::MathOperator,
|
| 565 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 566 |
+
tmp_accum.clear();
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
//
|
| 570 |
+
// Mainloop
|
| 571 |
+
//
|
| 572 |
+
|
| 573 |
+
CUTLASS_GEMM_LOOP
|
| 574 |
+
for (; gemm_k_iterations > (-kNumStagesConcurrentLoad);) {
|
| 575 |
+
//
|
| 576 |
+
// Loop over GEMM K dimension
|
| 577 |
+
//
|
| 578 |
+
|
| 579 |
+
// Computes a warp-level GEMM on data held in shared memory
|
| 580 |
+
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
|
| 581 |
+
CUTLASS_PRAGMA_UNROLL
|
| 582 |
+
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
|
| 583 |
+
// Load warp-level tiles from shared memory, wrapping to k offset if
|
| 584 |
+
// this is the last group as the case may be.
|
| 585 |
+
|
| 586 |
+
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) %
|
| 587 |
+
Base::kWarpGemmIterations);
|
| 588 |
+
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
|
| 589 |
+
Base::kWarpGemmIterations);
|
| 590 |
+
|
| 591 |
+
// In case of a non-circular buffer ("kSmemContainsEntireMat")
|
| 592 |
+
// make sure we don't load out of bounds data.
|
| 593 |
+
if (!kSmemContainsEntireMat || gemm_k_iterations > (-kNumStagesConcurrentLoad) ||
|
| 594 |
+
warp_mma_k < Base::kWarpGemmIterations - 1) {
|
| 595 |
+
this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
|
| 596 |
+
this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
++this->warp_tile_iterator_A_;
|
| 600 |
+
++this->warp_tile_iterator_B_;
|
| 601 |
+
|
| 602 |
+
if (warp_mma_k > 0)
|
| 603 |
+
warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
|
| 604 |
+
warp_transformed_frag_B[warp_mma_k % 2],
|
| 605 |
+
warp_loaded_frag_A[warp_mma_k % 2],
|
| 606 |
+
warp_loaded_frag_B[warp_mma_k % 2]);
|
| 607 |
+
|
| 608 |
+
if (platform::is_same<typename Operator::MathOperator,
|
| 609 |
+
arch::OpMultiplyAddFastF32>::value ||
|
| 610 |
+
platform::is_same<typename Operator::MathOperator,
|
| 611 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 612 |
+
warp_mma(tmp_accum,
|
| 613 |
+
warp_transformed_frag_A[warp_mma_k % 2],
|
| 614 |
+
warp_transformed_frag_B[warp_mma_k % 2],
|
| 615 |
+
tmp_accum);
|
| 616 |
+
|
| 617 |
+
if (warp_mma_k == 0) {
|
| 618 |
+
accum = plus_accum(accum, tmp_accum);
|
| 619 |
+
tmp_accum.clear();
|
| 620 |
+
}
|
| 621 |
+
} else {
|
| 622 |
+
warp_mma(accum,
|
| 623 |
+
warp_transformed_frag_A[warp_mma_k % 2],
|
| 624 |
+
warp_transformed_frag_B[warp_mma_k % 2],
|
| 625 |
+
accum);
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
// Issue global->shared copies for the this stage
|
| 629 |
+
if (!kSmemContainsEntireMat && warp_mma_k < Base::kWarpGemmIterations - 1) {
|
| 630 |
+
int group_start_iteration_A, group_start_iteration_B;
|
| 631 |
+
|
| 632 |
+
group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
|
| 633 |
+
group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
|
| 634 |
+
|
| 635 |
+
copy_tiles_and_advance(
|
| 636 |
+
iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B);
|
| 637 |
+
}
|
| 638 |
+
|
| 639 |
+
if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
|
| 640 |
+
if (!kSmemContainsEntireMat) {
|
| 641 |
+
int group_start_iteration_A, group_start_iteration_B;
|
| 642 |
+
group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
|
| 643 |
+
group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
|
| 644 |
+
|
| 645 |
+
copy_tiles_and_advance(iterator_A,
|
| 646 |
+
iterator_B,
|
| 647 |
+
group_start_iteration_A,
|
| 648 |
+
group_start_iteration_B);
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
// Inserts a memory fence between stages of cp.async instructions.
|
| 652 |
+
cutlass::arch::cp_async_fence();
|
| 653 |
+
|
| 654 |
+
// Waits until kStages-2 stages have committed.
|
| 655 |
+
cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
|
| 656 |
+
__syncthreads();
|
| 657 |
+
|
| 658 |
+
// Move to the next stage
|
| 659 |
+
iterator_A.add_tile_offset({0, 1});
|
| 660 |
+
iterator_B.add_tile_offset({1, 0});
|
| 661 |
+
|
| 662 |
+
this->smem_iterator_A_.add_tile_offset({0, 1});
|
| 663 |
+
this->smem_iterator_B_.add_tile_offset({1, 0});
|
| 664 |
+
|
| 665 |
+
// Add negative offsets to return iterators to the 'start' of the
|
| 666 |
+
// circular buffer in shared memory
|
| 667 |
+
if (smem_write_stage_idx == (Base::kStages - 1)) {
|
| 668 |
+
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
|
| 669 |
+
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
|
| 670 |
+
smem_write_stage_idx = 0;
|
| 671 |
+
} else {
|
| 672 |
+
++smem_write_stage_idx;
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
if (!kSmemContainsEntireMat && smem_read_stage_idx == (Base::kStages - 1)) {
|
| 676 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 677 |
+
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
|
| 678 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 679 |
+
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
|
| 680 |
+
smem_read_stage_idx = 0;
|
| 681 |
+
} else {
|
| 682 |
+
++smem_read_stage_idx;
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
--gemm_k_iterations;
|
| 686 |
+
iterator_A.clear_mask(gemm_k_iterations == 0);
|
| 687 |
+
iterator_B.clear_mask(gemm_k_iterations == 0);
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
// Do any conversions feeding the first stage at the end of the loop so
|
| 691 |
+
// we can start right away on mma instructions
|
| 692 |
+
if (warp_mma_k + 1 == Base::kWarpGemmIterations)
|
| 693 |
+
warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
|
| 694 |
+
warp_transformed_frag_B[(warp_mma_k + 1) % 2],
|
| 695 |
+
warp_loaded_frag_A[(warp_mma_k + 1) % 2],
|
| 696 |
+
warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
|
| 697 |
+
}
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value ||
|
| 701 |
+
platform::is_same<typename Operator::MathOperator,
|
| 702 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 703 |
+
accum = plus_accum(accum, tmp_accum);
|
| 704 |
+
}
|
| 705 |
+
}
|
| 706 |
+
};
|
| 707 |
+
|
| 708 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 709 |
+
|
| 710 |
+
} // namespace threadblock
|
| 711 |
+
} // namespace gemm
|
| 712 |
+
} // namespace cutlass
|
| 713 |
+
|
| 714 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h
ADDED
|
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
#pragma once
|
| 43 |
+
|
| 44 |
+
#include "cutlass/aligned_buffer.h"
|
| 45 |
+
#include "cutlass/array.h"
|
| 46 |
+
#include "cutlass/cutlass.h"
|
| 47 |
+
#include "cutlass/numeric_conversion.h"
|
| 48 |
+
|
| 49 |
+
#include "cutlass/matrix_shape.h"
|
| 50 |
+
#include "cutlass/numeric_types.h"
|
| 51 |
+
|
| 52 |
+
#include "custom_mma_base.h"
|
| 53 |
+
#include "cutlass/gemm/gemm.h"
|
| 54 |
+
|
| 55 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 56 |
+
|
| 57 |
+
namespace cutlass {
|
| 58 |
+
namespace gemm {
|
| 59 |
+
namespace threadblock {
|
| 60 |
+
|
| 61 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 62 |
+
|
| 63 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 64 |
+
/// instructions.
|
| 65 |
+
template <
|
| 66 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 67 |
+
typename Shape_,
|
| 68 |
+
/// Iterates over tiles of A operand in global memory
|
| 69 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 70 |
+
// MaskedTileIterator)
|
| 71 |
+
typename IteratorA_,
|
| 72 |
+
/// Iterates over tiles of A operand in shared memory
|
| 73 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 74 |
+
typename SmemIteratorA_,
|
| 75 |
+
/// Iterates over tiles of B operand in global memory
|
| 76 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 77 |
+
// MaskedTileIterator)
|
| 78 |
+
typename IteratorB_,
|
| 79 |
+
/// Iterates over tiles of B operand in shared memory
|
| 80 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 81 |
+
typename SmemIteratorB_,
|
| 82 |
+
/// Data type of accumulator matrix
|
| 83 |
+
typename ElementC_,
|
| 84 |
+
/// Data type of accumulator matrix
|
| 85 |
+
typename LayoutC_,
|
| 86 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 87 |
+
typename Policy_,
|
| 88 |
+
/// Transformation applied to A operand
|
| 89 |
+
typename TransformA_ = NumericArrayConverter<typename SmemIteratorA_::Element,
|
| 90 |
+
typename IteratorA_::Element,
|
| 91 |
+
IteratorA_::Fragment::kElements>,
|
| 92 |
+
///
|
| 93 |
+
/// Transformation applied to B operand
|
| 94 |
+
typename TransformB_ = NumericArrayConverter<typename SmemIteratorB_::Element,
|
| 95 |
+
typename IteratorB_::Element,
|
| 96 |
+
IteratorB_::Fragment::kElements>,
|
| 97 |
+
/// Used for partial specialization
|
| 98 |
+
typename Enable = bool>
|
| 99 |
+
class CustomMmaPipelined : public CustomMmaBase<Shape_, Policy_, 2> {
|
| 100 |
+
public:
|
| 101 |
+
///< Base class
|
| 102 |
+
using Base = CustomMmaBase<Shape_, Policy_, 2>;
|
| 103 |
+
|
| 104 |
+
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 105 |
+
using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
|
| 106 |
+
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
|
| 107 |
+
using ElementC = ElementC_; ///< Data type of accumulator matrix
|
| 108 |
+
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
|
| 109 |
+
using Policy = Policy_; ///< Policy describing tuning details
|
| 110 |
+
|
| 111 |
+
using SmemIteratorA = SmemIteratorA_;
|
| 112 |
+
using SmemIteratorB = SmemIteratorB_;
|
| 113 |
+
|
| 114 |
+
using TransformA = TransformA_;
|
| 115 |
+
using TransformB = TransformB_;
|
| 116 |
+
|
| 117 |
+
//
|
| 118 |
+
// Dependent types
|
| 119 |
+
//
|
| 120 |
+
|
| 121 |
+
/// Fragment of operand A loaded from global memory
|
| 122 |
+
using FragmentA = typename IteratorA::Fragment;
|
| 123 |
+
|
| 124 |
+
/// Fragment of operand B loaded from global memory
|
| 125 |
+
using FragmentB = typename IteratorB::Fragment;
|
| 126 |
+
|
| 127 |
+
/// Fragment of accumulator tile
|
| 128 |
+
using FragmentC = typename Policy::Operator::FragmentC;
|
| 129 |
+
|
| 130 |
+
/// Warp-level Mma
|
| 131 |
+
using Operator = typename Policy::Operator;
|
| 132 |
+
|
| 133 |
+
/// Obtain the arch tag from the warp-level operator
|
| 134 |
+
using ArchTag = typename Policy::Operator::ArchTag;
|
| 135 |
+
|
| 136 |
+
/// Complex transform on A operand
|
| 137 |
+
static ComplexTransform const kTransformA = Operator::kTransformA;
|
| 138 |
+
|
| 139 |
+
/// Complex transform on B operand
|
| 140 |
+
static ComplexTransform const kTransformB = Operator::kTransformB;
|
| 141 |
+
|
| 142 |
+
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
|
| 143 |
+
static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2");
|
| 144 |
+
|
| 145 |
+
static bool const kSmemContainsEntireMat = false;
|
| 146 |
+
|
| 147 |
+
private:
|
| 148 |
+
using WarpFragmentA = typename Operator::FragmentA;
|
| 149 |
+
using WarpFragmentB = typename Operator::FragmentB;
|
| 150 |
+
|
| 151 |
+
protected:
|
| 152 |
+
/// Iterator to write threadblock-scoped tile of A operand to shared memory
|
| 153 |
+
SmemIteratorA smem_iterator_A_;
|
| 154 |
+
|
| 155 |
+
/// Iterator to write threadblock-scoped tile of B operand to shared memory
|
| 156 |
+
SmemIteratorB smem_iterator_B_;
|
| 157 |
+
|
| 158 |
+
public:
|
| 159 |
+
/// Construct from tensor references
|
| 160 |
+
CUTLASS_DEVICE
|
| 161 |
+
CustomMmaPipelined(typename Base::SharedStorageA& shared_storageA,
|
| 162 |
+
typename Base::SharedStorageB& shared_storageB,
|
| 163 |
+
int thread_idx, ///< ID within the threadblock
|
| 164 |
+
int warp_idx, ///< ID of warp
|
| 165 |
+
int lane_idx ///< ID of each thread within a warp
|
| 166 |
+
)
|
| 167 |
+
: Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx),
|
| 168 |
+
smem_iterator_A_(shared_storageA.ref(), thread_idx),
|
| 169 |
+
smem_iterator_B_(shared_storageB.ref(), thread_idx)
|
| 170 |
+
{
|
| 171 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 172 |
+
// three coordinates:
|
| 173 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 174 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 175 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 176 |
+
|
| 177 |
+
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 178 |
+
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 179 |
+
|
| 180 |
+
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
|
| 181 |
+
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
|
| 182 |
+
|
| 183 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 184 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 185 |
+
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
|
| 186 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 187 |
+
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
|
| 188 |
+
}
|
| 189 |
+
CUTLASS_DEVICE
|
| 190 |
+
CustomMmaPipelined(
|
| 191 |
+
///< Shared storage needed for internal use by threadblock-scoped GEMM
|
| 192 |
+
typename Base::SharedStorage& st,
|
| 193 |
+
///< ID within the threadblock
|
| 194 |
+
int thread_idx,
|
| 195 |
+
///< ID of warp
|
| 196 |
+
int warp_idx,
|
| 197 |
+
///< ID of each thread within a warp
|
| 198 |
+
int lane_idx)
|
| 199 |
+
: CustomMmaPipelined(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx)
|
| 200 |
+
{
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
CUTLASS_DEVICE
|
| 204 |
+
bool set_prologue_done(bool value)
|
| 205 |
+
{
|
| 206 |
+
// NOT IMPLEMENTED FOR PIPELINED
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
CUTLASS_DEVICE
|
| 210 |
+
bool set_zero_outside_bounds(bool value)
|
| 211 |
+
{
|
| 212 |
+
// NOT NEEDED FOR PIPELINED
|
| 213 |
+
// shared memory will always be zero-filled
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
template <bool kLoadA = true, bool kLoadB = true>
|
| 217 |
+
CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage,
|
| 218 |
+
///< iterator over A operand in global memory
|
| 219 |
+
IteratorA iterator_A,
|
| 220 |
+
///< iterator over B operand in global memory
|
| 221 |
+
IteratorB iterator_B,
|
| 222 |
+
int thread_idx,
|
| 223 |
+
int problem_size_k)
|
| 224 |
+
{
|
| 225 |
+
prologue<kLoadA, kLoadB>(shared_storage.operand_A,
|
| 226 |
+
shared_storage.operand_B,
|
| 227 |
+
iterator_A,
|
| 228 |
+
iterator_B,
|
| 229 |
+
thread_idx,
|
| 230 |
+
problem_size_k);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
template <bool kLoadA = true, bool kLoadB = true>
|
| 234 |
+
CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA,
|
| 235 |
+
typename Base::SharedStorageB& shared_storageB,
|
| 236 |
+
///< iterator over A operand in global memory
|
| 237 |
+
IteratorA iterator_A,
|
| 238 |
+
///< iterator over B operand in global memory
|
| 239 |
+
IteratorB iterator_B,
|
| 240 |
+
int thread_idx,
|
| 241 |
+
int problem_size_k)
|
| 242 |
+
{
|
| 243 |
+
// NOT IMPLEMENTED FOR PIPELINED
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
/// Perform a threadblock-scoped matrix multiply-accumulate
|
| 247 |
+
CUTLASS_DEVICE
|
| 248 |
+
void operator()(
|
| 249 |
+
int gemm_k_iterations, ///< number of iterations of the mainloop
|
| 250 |
+
FragmentC& accum, ///< destination accumulator tile
|
| 251 |
+
IteratorA iterator_A, ///< iterator over A operand in global memory
|
| 252 |
+
IteratorB iterator_B, ///< iterator over B operand in global memory
|
| 253 |
+
FragmentC const& src_accum, ///< source accumulator tile
|
| 254 |
+
TransformA transform_A = TransformA(), ///< transformation applied to A fragment
|
| 255 |
+
TransformB transform_B = TransformB())
|
| 256 |
+
{ ///< transformation applied to B fragment
|
| 257 |
+
|
| 258 |
+
//
|
| 259 |
+
// Prologue
|
| 260 |
+
//
|
| 261 |
+
|
| 262 |
+
// Perform accumulation in the 'd' output operand
|
| 263 |
+
accum = src_accum;
|
| 264 |
+
|
| 265 |
+
FragmentA tb_frag_A;
|
| 266 |
+
FragmentB tb_frag_B;
|
| 267 |
+
|
| 268 |
+
tb_frag_A.clear();
|
| 269 |
+
tb_frag_B.clear();
|
| 270 |
+
|
| 271 |
+
// The last kblock is loaded in the prolog
|
| 272 |
+
iterator_A.load(tb_frag_A);
|
| 273 |
+
iterator_B.load(tb_frag_B);
|
| 274 |
+
|
| 275 |
+
++iterator_A;
|
| 276 |
+
++iterator_B;
|
| 277 |
+
|
| 278 |
+
this->smem_iterator_A_.store(transform_A(tb_frag_A));
|
| 279 |
+
this->smem_iterator_B_.store(transform_B(tb_frag_B));
|
| 280 |
+
|
| 281 |
+
++this->smem_iterator_A_;
|
| 282 |
+
++this->smem_iterator_B_;
|
| 283 |
+
|
| 284 |
+
__syncthreads();
|
| 285 |
+
|
| 286 |
+
// Pair of fragments used to overlap shared memory loads and math
|
| 287 |
+
// instructions
|
| 288 |
+
WarpFragmentA warp_frag_A[2];
|
| 289 |
+
WarpFragmentB warp_frag_B[2];
|
| 290 |
+
|
| 291 |
+
this->warp_tile_iterator_A_.set_kgroup_index(0);
|
| 292 |
+
this->warp_tile_iterator_B_.set_kgroup_index(0);
|
| 293 |
+
|
| 294 |
+
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
|
| 295 |
+
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
|
| 296 |
+
|
| 297 |
+
++this->warp_tile_iterator_A_;
|
| 298 |
+
++this->warp_tile_iterator_B_;
|
| 299 |
+
|
| 300 |
+
Operator warp_mma;
|
| 301 |
+
|
| 302 |
+
int smem_write_stage_idx = 1;
|
| 303 |
+
|
| 304 |
+
// Avoid reading out of bounds
|
| 305 |
+
iterator_A.clear_mask(gemm_k_iterations <= 1);
|
| 306 |
+
iterator_B.clear_mask(gemm_k_iterations <= 1);
|
| 307 |
+
|
| 308 |
+
// Issue loads during the first warp-level matrix multiply-add *AFTER*
|
| 309 |
+
// issuing shared memory loads (which have the tightest latency requirement).
|
| 310 |
+
|
| 311 |
+
//
|
| 312 |
+
// Mainloop
|
| 313 |
+
//
|
| 314 |
+
|
| 315 |
+
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
|
| 316 |
+
CUTLASS_GEMM_LOOP
|
| 317 |
+
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
|
| 318 |
+
//
|
| 319 |
+
// Loop over GEMM K dimension
|
| 320 |
+
//
|
| 321 |
+
|
| 322 |
+
CUTLASS_PRAGMA_UNROLL
|
| 323 |
+
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
|
| 324 |
+
// Load warp-level tiles from shared memory, wrapping to k offset if
|
| 325 |
+
// this is the last group as the case may be.
|
| 326 |
+
|
| 327 |
+
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
|
| 328 |
+
// Write fragments to shared memory
|
| 329 |
+
this->smem_iterator_A_.store(transform_A(tb_frag_A));
|
| 330 |
+
|
| 331 |
+
this->smem_iterator_B_.store(transform_B(tb_frag_B));
|
| 332 |
+
|
| 333 |
+
__syncthreads();
|
| 334 |
+
|
| 335 |
+
++this->smem_iterator_A_;
|
| 336 |
+
++this->smem_iterator_B_;
|
| 337 |
+
|
| 338 |
+
// Add negative offsets to return iterators to the 'start' of the
|
| 339 |
+
// circular buffer in shared memory
|
| 340 |
+
if (smem_write_stage_idx == 1) {
|
| 341 |
+
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
|
| 342 |
+
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
|
| 343 |
+
} else {
|
| 344 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 345 |
+
{0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
|
| 346 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 347 |
+
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
smem_write_stage_idx ^= 1;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) %
|
| 354 |
+
Base::kWarpGemmIterations);
|
| 355 |
+
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
|
| 356 |
+
Base::kWarpGemmIterations);
|
| 357 |
+
|
| 358 |
+
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
|
| 359 |
+
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
|
| 360 |
+
|
| 361 |
+
++this->warp_tile_iterator_A_;
|
| 362 |
+
++this->warp_tile_iterator_B_;
|
| 363 |
+
|
| 364 |
+
if (warp_mma_k == 0) {
|
| 365 |
+
iterator_A.load(tb_frag_A);
|
| 366 |
+
iterator_B.load(tb_frag_B);
|
| 367 |
+
|
| 368 |
+
++iterator_A;
|
| 369 |
+
++iterator_B;
|
| 370 |
+
|
| 371 |
+
// Avoid reading out of bounds if this was the last loop iteration
|
| 372 |
+
iterator_A.clear_mask(gemm_k_iterations <= 2);
|
| 373 |
+
iterator_B.clear_mask(gemm_k_iterations <= 2);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
warp_mma(accum, warp_frag_A[warp_mma_k % 2], warp_frag_B[warp_mma_k % 2], accum);
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
};
|
| 381 |
+
|
| 382 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 383 |
+
|
| 384 |
+
} // namespace threadblock
|
| 385 |
+
} // namespace gemm
|
| 386 |
+
} // namespace cutlass
|
| 387 |
+
|
| 388 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
/*! \file
|
| 38 |
+
\brief Cutlass provides helper template functions to figure out the right
|
| 39 |
+
datastructures to instantiate to run a GEMM with various parameters (see
|
| 40 |
+
`cutlass/gemm/threadblock/default_mma.h`). However, due to template
|
| 41 |
+
instantiation priority rules, it will only create an MmaMultiStage with
|
| 42 |
+
kStages=3 (otherwise creates an MmePipelined - which is not compatible with
|
| 43 |
+
FastF32). kStages=3 uses too much shared memory and we want to use kStages=2,
|
| 44 |
+
so we just copy-pasted some code from `default_mma.h` and
|
| 45 |
+
`default_mma_core.h` files and wrapped this template to allow our usecase.
|
| 46 |
+
|
| 47 |
+
This is really only for the FastF32 case - aka using TensorCores with fp32.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#pragma once
|
| 51 |
+
|
| 52 |
+
#include "cutlass/gemm/threadblock/default_mma.h"
|
| 53 |
+
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
|
| 54 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
|
| 55 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
|
| 56 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
|
| 57 |
+
|
| 58 |
+
namespace cutlass {
|
| 59 |
+
namespace gemm {
|
| 60 |
+
namespace threadblock {
|
| 61 |
+
|
| 62 |
+
template <
|
| 63 |
+
/// Element type for A matrix operand
|
| 64 |
+
typename ElementA,
|
| 65 |
+
/// Layout type for A matrix operand
|
| 66 |
+
typename LayoutA,
|
| 67 |
+
/// Access granularity of A matrix in units of elements
|
| 68 |
+
int kAlignmentA,
|
| 69 |
+
/// Element type for B matrix operand
|
| 70 |
+
typename ElementB,
|
| 71 |
+
/// Layout type for B matrix operand
|
| 72 |
+
typename LayoutB,
|
| 73 |
+
/// Access granularity of B matrix in units of elements
|
| 74 |
+
int kAlignmentB,
|
| 75 |
+
/// Element type for internal accumulation
|
| 76 |
+
typename ElementAccumulator,
|
| 77 |
+
/// Layout type for C and D matrix operand
|
| 78 |
+
typename LayoutC,
|
| 79 |
+
/// Operator class tag
|
| 80 |
+
typename OperatorClass,
|
| 81 |
+
/// Tag indicating architecture to tune for
|
| 82 |
+
typename ArchTag,
|
| 83 |
+
/// Threadblock-level tile size (concept: GemmShape)
|
| 84 |
+
typename ThreadblockShape,
|
| 85 |
+
/// Warp-level tile size (concept: GemmShape)
|
| 86 |
+
typename WarpShape,
|
| 87 |
+
/// Instruction-level tile size (concept: GemmShape)
|
| 88 |
+
typename InstructionShape,
|
| 89 |
+
/// Number of stages used in the pipelined mainloop
|
| 90 |
+
int Stages,
|
| 91 |
+
/// Operation performed by GEMM
|
| 92 |
+
typename Operator,
|
| 93 |
+
typename Enable_ = void>
|
| 94 |
+
struct FindDefaultMma {
|
| 95 |
+
static constexpr bool AccumulatorsInRowMajor = false;
|
| 96 |
+
static constexpr SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone;
|
| 97 |
+
using DefaultMma = cutlass::gemm::threadblock::DefaultMma<ElementA,
|
| 98 |
+
LayoutA,
|
| 99 |
+
kAlignmentA,
|
| 100 |
+
ElementB,
|
| 101 |
+
LayoutB,
|
| 102 |
+
kAlignmentB,
|
| 103 |
+
ElementAccumulator,
|
| 104 |
+
LayoutC,
|
| 105 |
+
OperatorClass,
|
| 106 |
+
ArchTag,
|
| 107 |
+
ThreadblockShape,
|
| 108 |
+
WarpShape,
|
| 109 |
+
InstructionShape,
|
| 110 |
+
Stages,
|
| 111 |
+
Operator,
|
| 112 |
+
AccumulatorsInRowMajor,
|
| 113 |
+
SharedMemoryClear>;
|
| 114 |
+
};
|
| 115 |
+
|
| 116 |
+
/// Specialization for sm80 / FastF32 / multistage with kStages=2
|
| 117 |
+
template <typename ElementA_,
|
| 118 |
+
/// Layout type for A matrix operand
|
| 119 |
+
typename LayoutA_,
|
| 120 |
+
/// Access granularity of A matrix in units of elements
|
| 121 |
+
int kAlignmentA,
|
| 122 |
+
typename ElementB_,
|
| 123 |
+
/// Layout type for B matrix operand
|
| 124 |
+
typename LayoutB_,
|
| 125 |
+
/// Access granularity of B matrix in units of elements
|
| 126 |
+
int kAlignmentB,
|
| 127 |
+
typename ElementAccumulator,
|
| 128 |
+
/// Threadblock-level tile size (concept: GemmShape)
|
| 129 |
+
typename ThreadblockShape,
|
| 130 |
+
/// Warp-level tile size (concept: GemmShape)
|
| 131 |
+
typename WarpShape,
|
| 132 |
+
/// Instruction-level tile size (concept: GemmShape)
|
| 133 |
+
typename InstructionShape,
|
| 134 |
+
int kStages,
|
| 135 |
+
typename Operator>
|
| 136 |
+
struct FindDefaultMma<ElementA_,
|
| 137 |
+
LayoutA_,
|
| 138 |
+
kAlignmentA,
|
| 139 |
+
ElementB_,
|
| 140 |
+
LayoutB_,
|
| 141 |
+
kAlignmentB,
|
| 142 |
+
ElementAccumulator,
|
| 143 |
+
layout::RowMajor,
|
| 144 |
+
arch::OpClassTensorOp,
|
| 145 |
+
arch::Sm80,
|
| 146 |
+
ThreadblockShape,
|
| 147 |
+
WarpShape,
|
| 148 |
+
InstructionShape,
|
| 149 |
+
kStages,
|
| 150 |
+
Operator,
|
| 151 |
+
typename cutlass::platform::enable_if<(kAlignmentA > 1)>::type> {
|
| 152 |
+
using LayoutC = layout::RowMajor;
|
| 153 |
+
using OperatorClass = arch::OpClassTensorOp;
|
| 154 |
+
using ArchTag = arch::Sm80;
|
| 155 |
+
|
| 156 |
+
using DefaultMma_ = cutlass::gemm::threadblock::DefaultMma<ElementA_,
|
| 157 |
+
LayoutA_,
|
| 158 |
+
kAlignmentA,
|
| 159 |
+
ElementB_,
|
| 160 |
+
LayoutB_,
|
| 161 |
+
kAlignmentB,
|
| 162 |
+
ElementAccumulator,
|
| 163 |
+
LayoutC,
|
| 164 |
+
OperatorClass,
|
| 165 |
+
ArchTag,
|
| 166 |
+
ThreadblockShape,
|
| 167 |
+
WarpShape,
|
| 168 |
+
InstructionShape,
|
| 169 |
+
3,
|
| 170 |
+
Operator>;
|
| 171 |
+
struct DefaultMma : DefaultMma_ {
|
| 172 |
+
using MmaCore_ = typename DefaultMma_::MmaCore;
|
| 173 |
+
// Define the threadblock-scoped multistage matrix multiply
|
| 174 |
+
using ThreadblockMma =
|
| 175 |
+
cutlass::gemm::threadblock::MmaMultistage<typename MmaCore_::Shape,
|
| 176 |
+
typename DefaultMma_::IteratorA,
|
| 177 |
+
typename MmaCore_::SmemIteratorA,
|
| 178 |
+
MmaCore_::kCacheOpA,
|
| 179 |
+
typename DefaultMma_::IteratorB,
|
| 180 |
+
typename MmaCore_::SmemIteratorB,
|
| 181 |
+
MmaCore_::kCacheOpB,
|
| 182 |
+
ElementAccumulator,
|
| 183 |
+
LayoutC,
|
| 184 |
+
typename MmaCore_::MmaPolicy,
|
| 185 |
+
kStages>;
|
| 186 |
+
};
|
| 187 |
+
};
|
| 188 |
+
|
| 189 |
+
} // namespace threadblock
|
| 190 |
+
} // namespace gemm
|
| 191 |
+
} // namespace cutlass
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include "cutlass/functional.h"
|
| 40 |
+
#include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
|
| 41 |
+
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
|
| 42 |
+
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
|
| 43 |
+
#include "cutlass/matrix_shape.h"
|
| 44 |
+
|
| 45 |
+
/*
|
| 46 |
+
TensorCores have different accumulator layouts.
|
| 47 |
+
This file provides a class to easily map the accumulator
|
| 48 |
+
i-th element with the corresponding matrix row/col.
|
| 49 |
+
*/
|
| 50 |
+
|
| 51 |
+
template <typename T, typename accum_t, int kWarpSize>
|
| 52 |
+
struct AccumLambdaIteratorSm80 {
|
| 53 |
+
static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
|
| 54 |
+
"only RowMajor is supported");
|
| 55 |
+
|
| 56 |
+
using Policy = typename T::Policy;
|
| 57 |
+
using InstructionShape = typename T::InstructionShape;
|
| 58 |
+
using OpDelta = typename T::OpDelta;
|
| 59 |
+
using Shape = typename T::Shape;
|
| 60 |
+
static int const kElementsPerAccess = InstructionShape::kN / 4;
|
| 61 |
+
static int const kRowsPerTile = 8;
|
| 62 |
+
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
|
| 63 |
+
|
| 64 |
+
static cutlass::MatrixCoord CUTLASS_DEVICE
|
| 65 |
+
get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
|
| 66 |
+
{
|
| 67 |
+
int quad = (lane_id >> 2);
|
| 68 |
+
int lane_in_quad = (lane_id & 3);
|
| 69 |
+
return cutlass::MatrixCoord(
|
| 70 |
+
quad + tile_offset.row() * Shape::kRow,
|
| 71 |
+
lane_in_quad * kElementsPerAccess + tile_offset.column() * Shape::kColumn);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template <typename FA, typename FB, typename FC>
|
| 75 |
+
CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
|
| 76 |
+
FA beginRow,
|
| 77 |
+
FB op,
|
| 78 |
+
FC endRow)
|
| 79 |
+
{
|
| 80 |
+
// See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h
|
| 81 |
+
CUTLASS_PRAGMA_UNROLL
|
| 82 |
+
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
|
| 83 |
+
CUTLASS_PRAGMA_UNROLL
|
| 84 |
+
for (int row = 0; row < kAccumulatorRows; ++row) {
|
| 85 |
+
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile +
|
| 86 |
+
lane_offset.row();
|
| 87 |
+
beginRow(accum_m);
|
| 88 |
+
|
| 89 |
+
CUTLASS_PRAGMA_UNROLL
|
| 90 |
+
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
|
| 91 |
+
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
|
| 92 |
+
(mma_n * Policy::MmaIterations::kRow + mma_m);
|
| 93 |
+
CUTLASS_PRAGMA_UNROLL
|
| 94 |
+
for (int col = 0; col < kElementsPerAccess; ++col) {
|
| 95 |
+
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col +
|
| 96 |
+
lane_offset.column();
|
| 97 |
+
int idx = mma_accum_start + row * kElementsPerAccess + col;
|
| 98 |
+
op(accum_m, accum_n, idx);
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
endRow(accum_m);
|
| 103 |
+
}
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
template <typename DT, typename F>
|
| 108 |
+
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
|
| 109 |
+
{
|
| 110 |
+
// In each warp, 4 threads will work on the same row
|
| 111 |
+
// - the ones with the same `quad`
|
| 112 |
+
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1);
|
| 113 |
+
myValue = fn(myValue, otherV);
|
| 114 |
+
otherV = __shfl_xor_sync(0xffffffff, myValue, 2);
|
| 115 |
+
myValue = fn(myValue, otherV);
|
| 116 |
+
int lane_in_quad = (lane_id & 3);
|
| 117 |
+
return lane_in_quad == 0;
|
| 118 |
+
}
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
template <typename T, typename accum_t, int kWarpSize>
|
| 122 |
+
struct AccumLambdaIteratorSm70 {
|
| 123 |
+
static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
|
| 124 |
+
"only RowMajor is supported");
|
| 125 |
+
|
| 126 |
+
using Policy = typename T::Policy;
|
| 127 |
+
using InstructionShape = typename T::InstructionShape;
|
| 128 |
+
using OpDelta = typename T::OpDelta;
|
| 129 |
+
using Shape = typename T::Shape;
|
| 130 |
+
using Element = accum_t;
|
| 131 |
+
|
| 132 |
+
static int const kElementsPerPartial = 4;
|
| 133 |
+
using EleShapePerPatial =
|
| 134 |
+
typename cutlass::platform::conditional<cutlass::platform::is_same<Element, float>::value,
|
| 135 |
+
cutlass::MatrixShape<2, 2>,
|
| 136 |
+
cutlass::MatrixShape<1, 4>>::type;
|
| 137 |
+
static int const kElementsPerMma = 8;
|
| 138 |
+
static int const kAccumulatorPatials = 2;
|
| 139 |
+
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
|
| 140 |
+
|
| 141 |
+
static cutlass::MatrixCoord CUTLASS_DEVICE
|
| 142 |
+
get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
|
| 143 |
+
{
|
| 144 |
+
int quad = (lane_id >> 2);
|
| 145 |
+
int lane_in_quad = (lane_id & 3);
|
| 146 |
+
int accum_m, accum_n;
|
| 147 |
+
|
| 148 |
+
if (cutlass::platform::is_same<Element, float>::value) {
|
| 149 |
+
// (quad[2],quad[0])+lane_in_quad[0]
|
| 150 |
+
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
|
| 151 |
+
// (quad[1])+lane_in_quad[1]
|
| 152 |
+
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
|
| 153 |
+
(lane_in_quad & 2);
|
| 154 |
+
} else {
|
| 155 |
+
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
|
| 156 |
+
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
|
| 157 |
+
}
|
| 158 |
+
return cutlass::MatrixCoord(accum_m + tile_offset.row() * Shape::kRow,
|
| 159 |
+
accum_n + tile_offset.column() * Shape::kColumn);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
template <typename DT, typename F>
|
| 163 |
+
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
|
| 164 |
+
{
|
| 165 |
+
static_assert(cutlass::platform::is_same<Element, float>::value,
|
| 166 |
+
"update to support non-float accum");
|
| 167 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16
|
| 168 |
+
// T0 & T2 share same line within a quad
|
| 169 |
+
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1);
|
| 170 |
+
myValue = fn(myValue, otherV);
|
| 171 |
+
// quad 0 and quad 2 are on the same lines
|
| 172 |
+
otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3);
|
| 173 |
+
myValue = fn(myValue, otherV);
|
| 174 |
+
return (lane_id & ((1 << 1) | (1 << 3))) == 0;
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
template <typename FA, typename FB, typename FC>
|
| 178 |
+
CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
|
| 179 |
+
FA beginRow,
|
| 180 |
+
FB op,
|
| 181 |
+
FC endRow)
|
| 182 |
+
{
|
| 183 |
+
CUTLASS_PRAGMA_UNROLL
|
| 184 |
+
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
|
| 185 |
+
CUTLASS_PRAGMA_UNROLL
|
| 186 |
+
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
|
| 187 |
+
CUTLASS_PRAGMA_UNROLL
|
| 188 |
+
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
|
| 189 |
+
int accum_m = tile_m * Policy::InterleavedTile::kRow +
|
| 190 |
+
mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row();
|
| 191 |
+
beginRow(accum_m);
|
| 192 |
+
|
| 193 |
+
CUTLASS_PRAGMA_UNROLL
|
| 194 |
+
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
|
| 195 |
+
CUTLASS_PRAGMA_UNROLL
|
| 196 |
+
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
|
| 197 |
+
CUTLASS_PRAGMA_UNROLL
|
| 198 |
+
for (int p = 0; p < kAccumulatorPatials; ++p) {
|
| 199 |
+
CUTLASS_PRAGMA_UNROLL
|
| 200 |
+
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
|
| 201 |
+
int mma_accum_start =
|
| 202 |
+
(((tile_n * Policy::TileIterations::kRow + tile_m) *
|
| 203 |
+
Policy::MmaIterations::kColumn +
|
| 204 |
+
mma_n) *
|
| 205 |
+
Policy::MmaIterations::kRow +
|
| 206 |
+
mma_m) *
|
| 207 |
+
kElementsPerMma;
|
| 208 |
+
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
|
| 209 |
+
mma_n * QuadShapePerPatialMma::kColumn +
|
| 210 |
+
p * Policy::InterleavedTile::kColumn / 2 + n +
|
| 211 |
+
lane_offset.column();
|
| 212 |
+
int idx = mma_accum_start + p * kElementsPerPartial +
|
| 213 |
+
m * EleShapePerPatial::kColumn + n;
|
| 214 |
+
op(accum_m, accum_n, idx);
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
endRow(accum_m);
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
};
|
| 225 |
+
|
| 226 |
+
template <typename T, typename accum_t, int kWarpSize>
|
| 227 |
+
struct AccumLambdaIteratorSimt {
|
| 228 |
+
using Policy = typename T::Policy;
|
| 229 |
+
using Iterations = typename T::Iterations;
|
| 230 |
+
using Element = typename T::Element;
|
| 231 |
+
using Delta = typename T::Delta;
|
| 232 |
+
using Shape = typename T::Shape;
|
| 233 |
+
static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
|
| 234 |
+
"only RowMajor is supported");
|
| 235 |
+
|
| 236 |
+
template <typename DT, typename F>
|
| 237 |
+
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
|
| 238 |
+
{
|
| 239 |
+
CUTLASS_PRAGMA_UNROLL
|
| 240 |
+
for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) {
|
| 241 |
+
auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit);
|
| 242 |
+
myValue = fn(myValue, otherV);
|
| 243 |
+
}
|
| 244 |
+
return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0;
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
template <typename FA, typename FB, typename FC>
|
| 248 |
+
CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
|
| 249 |
+
FA beginRow,
|
| 250 |
+
FB op,
|
| 251 |
+
FC endRow)
|
| 252 |
+
{
|
| 253 |
+
CUTLASS_PRAGMA_UNROLL
|
| 254 |
+
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
|
| 255 |
+
CUTLASS_PRAGMA_UNROLL
|
| 256 |
+
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
|
| 257 |
+
int accum_m = mma_m * Delta::kRow + m + lane_offset.row();
|
| 258 |
+
beginRow(accum_m);
|
| 259 |
+
|
| 260 |
+
CUTLASS_PRAGMA_UNROLL
|
| 261 |
+
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
|
| 262 |
+
int accum_n = mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN +
|
| 263 |
+
lane_offset.column();
|
| 264 |
+
CUTLASS_PRAGMA_UNROLL
|
| 265 |
+
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
|
| 266 |
+
int idx = n + Policy::LaneMmaShape::kN *
|
| 267 |
+
(mma_n + Iterations::kColumn *
|
| 268 |
+
(m + mma_m * Policy::LaneMmaShape::kM));
|
| 269 |
+
op(accum_m, accum_n + n, idx);
|
| 270 |
+
}
|
| 271 |
+
}
|
| 272 |
+
endRow(accum_m);
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
static cutlass::MatrixCoord CUTLASS_DEVICE
|
| 278 |
+
get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
|
| 279 |
+
{
|
| 280 |
+
static_assert(cutlass::platform::is_same<typename Policy::LaneLayout,
|
| 281 |
+
cutlass::layout::RowMajorInterleaved<1>>::value,
|
| 282 |
+
"");
|
| 283 |
+
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
|
| 284 |
+
|
| 285 |
+
cutlass::MatrixCoord lane_offset =
|
| 286 |
+
lane_layout.inverse(lane_id) *
|
| 287 |
+
cutlass::MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
|
| 288 |
+
return lane_offset + tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn);
|
| 289 |
+
}
|
| 290 |
+
};
|
| 291 |
+
|
| 292 |
+
template <typename T, typename accum_t, int kWarpSize>
|
| 293 |
+
struct DefaultMmaAccumLambdaIterator;
|
| 294 |
+
|
| 295 |
+
// Simt
|
| 296 |
+
template <typename S, typename P, typename accum_t, int kWarpSize>
|
| 297 |
+
struct DefaultMmaAccumLambdaIterator<
|
| 298 |
+
cutlass::gemm::warp::MmaSimtTileIterator<S,
|
| 299 |
+
cutlass::gemm::Operand::kC,
|
| 300 |
+
accum_t,
|
| 301 |
+
cutlass::layout::RowMajor,
|
| 302 |
+
P,
|
| 303 |
+
1,
|
| 304 |
+
1>,
|
| 305 |
+
accum_t,
|
| 306 |
+
kWarpSize> {
|
| 307 |
+
using WarpIterator =
|
| 308 |
+
typename cutlass::gemm::warp::MmaSimtTileIterator<S,
|
| 309 |
+
cutlass::gemm::Operand::kC,
|
| 310 |
+
accum_t,
|
| 311 |
+
cutlass::layout::RowMajor,
|
| 312 |
+
P,
|
| 313 |
+
1,
|
| 314 |
+
1>;
|
| 315 |
+
using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>;
|
| 316 |
+
};
|
| 317 |
+
|
| 318 |
+
// TensorOp - Volta
|
| 319 |
+
template <typename S1, typename S2, typename accum_t, int kWarpSize>
|
| 320 |
+
struct DefaultMmaAccumLambdaIterator<
|
| 321 |
+
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<S1,
|
| 322 |
+
accum_t,
|
| 323 |
+
cutlass::layout::RowMajor,
|
| 324 |
+
S2,
|
| 325 |
+
cutlass::MatrixShape<1, 1>>,
|
| 326 |
+
accum_t,
|
| 327 |
+
kWarpSize> {
|
| 328 |
+
using WarpIterator = typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
|
| 329 |
+
S1,
|
| 330 |
+
accum_t,
|
| 331 |
+
cutlass::layout::RowMajor,
|
| 332 |
+
S2,
|
| 333 |
+
cutlass::MatrixShape<1, 1>>;
|
| 334 |
+
using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>;
|
| 335 |
+
};
|
| 336 |
+
|
| 337 |
+
// TensorOp - Sm75+
|
| 338 |
+
template <typename S1, typename S2, typename S3, typename accum_t, int kWarpSize>
|
| 339 |
+
struct DefaultMmaAccumLambdaIterator<
|
| 340 |
+
cutlass::gemm::warp::
|
| 341 |
+
MmaTensorOpAccumulatorTileIterator<S1, accum_t, cutlass::layout::RowMajor, S2, S3>,
|
| 342 |
+
accum_t,
|
| 343 |
+
kWarpSize> {
|
| 344 |
+
using WarpIterator = typename cutlass::gemm::warp::
|
| 345 |
+
MmaTensorOpAccumulatorTileIterator<S1, accum_t, cutlass::layout::RowMajor, S2, S3>;
|
| 346 |
+
using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>;
|
| 347 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h
ADDED
|
@@ -0,0 +1,1939 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
#pragma once
|
| 43 |
+
|
| 44 |
+
#include "cutlass/aligned_buffer.h"
|
| 45 |
+
#include "cutlass/arch/memory.h"
|
| 46 |
+
#include "cutlass/array.h"
|
| 47 |
+
#include "cutlass/cutlass.h"
|
| 48 |
+
#include "cutlass/epilogue/thread/linear_combination.h"
|
| 49 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
|
| 50 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
|
| 51 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
|
| 52 |
+
#include "cutlass/functional.h"
|
| 53 |
+
#include "cutlass/gemm/gemm.h"
|
| 54 |
+
#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
|
| 55 |
+
#include "cutlass/matrix_shape.h"
|
| 56 |
+
#include "cutlass/numeric_conversion.h"
|
| 57 |
+
#include "cutlass/numeric_types.h"
|
| 58 |
+
#include "cutlass/platform/platform.h"
|
| 59 |
+
#include "cutlass/transform/threadblock/vector_iterator.h"
|
| 60 |
+
|
| 61 |
+
#include "../epilogue/epilogue_thread_apply_logsumexp.h"
|
| 62 |
+
#include "../gemm/mma_accum_lambda_iterator.h"
|
| 63 |
+
#include "../gemm_kernel_utils.h"
|
| 64 |
+
#include "../iterators/make_residual_last.h"
|
| 65 |
+
#include "../iterators/transpose_warp_iterator.h"
|
| 66 |
+
#include "../iterators/warp_iterator_from_smem.h"
|
| 67 |
+
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
|
| 68 |
+
#include "cutlass/gemm/threadblock/mma_base.h"
|
| 69 |
+
#include "cutlass/gemm/threadblock/mma_multistage.h"
|
| 70 |
+
#include "cutlass/gemm/threadblock/mma_pipelined.h"
|
| 71 |
+
#include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h"
|
| 72 |
+
|
| 73 |
+
namespace cutlass {
|
| 74 |
+
namespace gemm {
|
| 75 |
+
namespace threadblock {
|
| 76 |
+
|
| 77 |
+
/// Shared storage object needed by accumulator
|
| 78 |
+
/// From 13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
|
| 79 |
+
template <typename Shape_, typename Element_, typename Layout_, typename Padding_>
|
| 80 |
+
class AccumulatorSharedStorage {
|
| 81 |
+
public:
|
| 82 |
+
//
|
| 83 |
+
// Type definitions
|
| 84 |
+
//
|
| 85 |
+
using Shape = Shape_;
|
| 86 |
+
using Element = Element_;
|
| 87 |
+
using Layout = Layout_;
|
| 88 |
+
using Padding = Padding_;
|
| 89 |
+
|
| 90 |
+
/// Tensor reference to the accumulator
|
| 91 |
+
using TensorRefAccum = cutlass::TensorRef<Element, Layout>;
|
| 92 |
+
|
| 93 |
+
/// Shape of the accumulator matrix in shared memory
|
| 94 |
+
using ShapeAccum =
|
| 95 |
+
cutlass::MatrixShape<Shape::kM + Padding::kRow, Shape::kN + Padding::kColumn>;
|
| 96 |
+
|
| 97 |
+
public:
|
| 98 |
+
//
|
| 99 |
+
// Data members
|
| 100 |
+
//
|
| 101 |
+
|
| 102 |
+
/// Buffer for accumulator
|
| 103 |
+
cutlass::AlignedBuffer<Element, ShapeAccum::kCount> accum;
|
| 104 |
+
|
| 105 |
+
public:
|
| 106 |
+
//
|
| 107 |
+
// Methods
|
| 108 |
+
//
|
| 109 |
+
|
| 110 |
+
/// Returns a layout object for the Accum matrix
|
| 111 |
+
CUTLASS_DEVICE
|
| 112 |
+
static Layout LayoutAccum() { return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn}); }
|
| 113 |
+
|
| 114 |
+
/// Returns a TensorRef to the Accumulator
|
| 115 |
+
CUTLASS_HOST_DEVICE
|
| 116 |
+
TensorRefAccum accum_ref() { return TensorRefAccum{accum.data(), LayoutAccum()}; }
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 120 |
+
// Taken from
|
| 121 |
+
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
|
| 122 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 123 |
+
|
| 124 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 125 |
+
/// instructions.
|
| 126 |
+
template <
|
| 127 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 128 |
+
typename Shape_,
|
| 129 |
+
// Maximum value for K
|
| 130 |
+
int kMaxK,
|
| 131 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 132 |
+
typename Policy_,
|
| 133 |
+
/// Number of stages,
|
| 134 |
+
int Stages,
|
| 135 |
+
/// Used for partial specialization
|
| 136 |
+
typename Enable = bool>
|
| 137 |
+
class MmaBaseFromSharedMemory {
|
| 138 |
+
public:
|
| 139 |
+
///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 140 |
+
using Shape = Shape_;
|
| 141 |
+
|
| 142 |
+
///< Policy describing tuning details
|
| 143 |
+
using Policy = Policy_;
|
| 144 |
+
|
| 145 |
+
//
|
| 146 |
+
// Dependent types
|
| 147 |
+
//
|
| 148 |
+
|
| 149 |
+
/// Warp-level Mma
|
| 150 |
+
using Operator = typename Policy::Operator;
|
| 151 |
+
|
| 152 |
+
/// Shape describing the overall GEMM computed from shared memory
|
| 153 |
+
/// by each warp.
|
| 154 |
+
using WarpGemm = typename Policy::Operator::Shape;
|
| 155 |
+
|
| 156 |
+
/// Shape describing the number of warps filling the CTA
|
| 157 |
+
using WarpCount =
|
| 158 |
+
GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>;
|
| 159 |
+
using WarpCount1 = WarpCount;
|
| 160 |
+
|
| 161 |
+
/// Number of warp-level GEMM oeprations
|
| 162 |
+
static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK);
|
| 163 |
+
static int const kWarpGemmIterations1 = kWarpGemmIterations;
|
| 164 |
+
|
| 165 |
+
/// Number of stages
|
| 166 |
+
static int const kStages = Stages;
|
| 167 |
+
|
| 168 |
+
/// If this is true, we fill the entire shmem buffer at start
|
| 169 |
+
/// and don't need to iterate through it in a circular fashion
|
| 170 |
+
static bool const kSmemContainsEntireB = kMaxK <= Shape::kK * kStages;
|
| 171 |
+
|
| 172 |
+
/// Tensor reference to the A operand
|
| 173 |
+
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
|
| 174 |
+
|
| 175 |
+
/// Tensor reference to the B operand
|
| 176 |
+
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
|
| 177 |
+
|
| 178 |
+
//
|
| 179 |
+
// Nested structs
|
| 180 |
+
//
|
| 181 |
+
|
| 182 |
+
/// Shared storage object needed by threadblock-scoped GEMM
|
| 183 |
+
class SharedStorage {
|
| 184 |
+
public:
|
| 185 |
+
//
|
| 186 |
+
// Type definitions
|
| 187 |
+
//
|
| 188 |
+
|
| 189 |
+
/// Shape of the B matrix operand in shared memory
|
| 190 |
+
using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
|
| 191 |
+
Shape::kN + Policy::SmemPaddingB::kColumn>;
|
| 192 |
+
|
| 193 |
+
public:
|
| 194 |
+
//
|
| 195 |
+
// Data members
|
| 196 |
+
//
|
| 197 |
+
|
| 198 |
+
/// Buffer for B operand
|
| 199 |
+
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
|
| 200 |
+
|
| 201 |
+
public:
|
| 202 |
+
//
|
| 203 |
+
// Methods
|
| 204 |
+
//
|
| 205 |
+
|
| 206 |
+
/// Returns a layout object for the B matrix
|
| 207 |
+
CUTLASS_HOST_DEVICE
|
| 208 |
+
static typename Operator::LayoutB LayoutB()
|
| 209 |
+
{
|
| 210 |
+
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
/// Returns a TensorRef to the B operand
|
| 214 |
+
CUTLASS_HOST_DEVICE
|
| 215 |
+
TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; }
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
protected:
|
| 219 |
+
//
|
| 220 |
+
// Data members
|
| 221 |
+
//
|
| 222 |
+
|
| 223 |
+
// /// Iterator to load a warp-scoped tile of A operand from shared memory
|
| 224 |
+
// typename Operator::IteratorA warp_tile_iterator_A_;
|
| 225 |
+
|
| 226 |
+
/// Iterator to load a warp-scoped tile of B operand from shared memory
|
| 227 |
+
typename Operator::IteratorB warp_tile_iterator_B_;
|
| 228 |
+
|
| 229 |
+
public:
|
| 230 |
+
/// Construct from tensor references
|
| 231 |
+
CUTLASS_DEVICE
|
| 232 |
+
MmaBaseFromSharedMemory(
|
| 233 |
+
///< Shared storage needed for internal use by threadblock-scoped GEMM
|
| 234 |
+
SharedStorage& shared_storage,
|
| 235 |
+
///< ID within the threadblock
|
| 236 |
+
int thread_idx,
|
| 237 |
+
///< ID of warp
|
| 238 |
+
int warp_idx,
|
| 239 |
+
///< ID of each thread within a warp
|
| 240 |
+
int lane_idx)
|
| 241 |
+
: warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx)
|
| 242 |
+
{
|
| 243 |
+
}
|
| 244 |
+
};
|
| 245 |
+
|
| 246 |
+
namespace {
|
| 247 |
+
|
| 248 |
+
// has necessary trait compliance with WarpIteratorFromSmem but doesn't do
|
| 249 |
+
// anything, can be default initialized, and uses fragment that takes up
|
| 250 |
+
// (almost) no space. this warp iterator is selected at compile time when
|
| 251 |
+
// elementwise on-the-fly scaling for operand A is disabled, in which case
|
| 252 |
+
// operations related to loading scale factors for operand A get wiped out by
|
| 253 |
+
// the compiler.
|
| 254 |
+
template <typename TensorRef>
|
| 255 |
+
class NoOpWarpIteratorScale {
|
| 256 |
+
public:
|
| 257 |
+
// in pipelined+multistage MMA implementations we keep an array of fragments.
|
| 258 |
+
// if we aren't using scaling we don't want to waste registers on fragments
|
| 259 |
+
// of scale elements, so ideally this would be sized 0.
|
| 260 |
+
// Since arrays of zero-sized objects are not allowed, using size as 1.
|
| 261 |
+
// The compiler will most likely wipe it out anyways.
|
| 262 |
+
using Fragment = cutlass::Array<char, 1>;
|
| 263 |
+
|
| 264 |
+
CUTLASS_HOST_DEVICE
|
| 265 |
+
NoOpWarpIteratorScale() {}
|
| 266 |
+
|
| 267 |
+
CUTLASS_HOST_DEVICE
|
| 268 |
+
NoOpWarpIteratorScale(TensorRef const&, int) {}
|
| 269 |
+
|
| 270 |
+
CUTLASS_HOST_DEVICE
|
| 271 |
+
NoOpWarpIteratorScale& add_tile_offset(typename TensorRef::TensorCoord const&) { return *this; }
|
| 272 |
+
|
| 273 |
+
CUTLASS_HOST_DEVICE
|
| 274 |
+
NoOpWarpIteratorScale& operator++() { return *this; }
|
| 275 |
+
|
| 276 |
+
CUTLASS_DEVICE
|
| 277 |
+
void load(Fragment&) const {}
|
| 278 |
+
};
|
| 279 |
+
|
| 280 |
+
// if scaling is enabled, performs fragment elementwise multiplication between
|
| 281 |
+
// fragment and its scaling factor.
|
| 282 |
+
template <typename Fragment, typename FragmentScale, bool ScalingEnabled>
|
| 283 |
+
class FragmentElementwiseScaler;
|
| 284 |
+
|
| 285 |
+
// specialization for scaling being enabled.
|
| 286 |
+
template <typename Fragment, typename FragmentScale>
|
| 287 |
+
class FragmentElementwiseScaler<Fragment, FragmentScale, true> {
|
| 288 |
+
public:
|
| 289 |
+
// cast scale_frag to correct type then apply elementwise to fragment
|
| 290 |
+
CUTLASS_DEVICE
|
| 291 |
+
static Fragment apply(Fragment frag, FragmentScale const& scale_frag)
|
| 292 |
+
{
|
| 293 |
+
Fragment converted_scale_frag =
|
| 294 |
+
cutlass::NumericArrayConverter<typename Fragment::Element,
|
| 295 |
+
typename FragmentScale::Element,
|
| 296 |
+
FragmentScale::kElements>()(scale_frag);
|
| 297 |
+
return cutlass::multiplies<Fragment>()(frag, converted_scale_frag);
|
| 298 |
+
}
|
| 299 |
+
};
|
| 300 |
+
|
| 301 |
+
// specialization for scaling being disabled. doesn't do anything and should
|
| 302 |
+
// just get wiped out by the compiler.
|
| 303 |
+
template <typename Fragment, typename FragmentScale>
|
| 304 |
+
class FragmentElementwiseScaler<Fragment, FragmentScale, false> {
|
| 305 |
+
public:
|
| 306 |
+
CUTLASS_DEVICE
|
| 307 |
+
static Fragment apply(Fragment frag, FragmentScale const&) { return frag; }
|
| 308 |
+
};
|
| 309 |
+
} // namespace
|
| 310 |
+
|
| 311 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 312 |
+
// Taken from
|
| 313 |
+
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h
|
| 314 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 315 |
+
|
| 316 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 317 |
+
/// instructions.
|
| 318 |
+
template <
|
| 319 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 320 |
+
typename Shape_,
|
| 321 |
+
// BEGIN smem
|
| 322 |
+
/// Iterates over the intermediate accumulator tile in shared memory
|
| 323 |
+
typename WarpIteratorA,
|
| 324 |
+
/// whether or not to perform elementwise multiplication of A
|
| 325 |
+
// by another matrix (A_scale) that is also kept in shared memory prior
|
| 326 |
+
// to matmul A @ B
|
| 327 |
+
bool ScaleOperandA_,
|
| 328 |
+
// Accumulator type
|
| 329 |
+
typename AccumulatorSharedStorage,
|
| 330 |
+
// END smem
|
| 331 |
+
/// Iterates over tiles of B operand in global memory
|
| 332 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 333 |
+
// MaskedTileIterator)
|
| 334 |
+
typename IteratorB_,
|
| 335 |
+
/// Iterates over tiles of B operand in shared memory
|
| 336 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 337 |
+
typename SmemIteratorB_,
|
| 338 |
+
/// Data type of accumulator matrix
|
| 339 |
+
typename ElementC_,
|
| 340 |
+
/// Data type of accumulator matrix
|
| 341 |
+
typename LayoutC_,
|
| 342 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 343 |
+
typename Policy_,
|
| 344 |
+
/// Transformation applied to B operand
|
| 345 |
+
typename TransformB_ = NumericArrayConverter<typename SmemIteratorB_::Element,
|
| 346 |
+
typename IteratorB_::Element,
|
| 347 |
+
IteratorB_::Fragment::kElements>,
|
| 348 |
+
/// Used for partial specialization
|
| 349 |
+
typename Enable = bool>
|
| 350 |
+
class MmaPipelinedFromSharedMemory
|
| 351 |
+
: public MmaBaseFromSharedMemory<Shape_, AccumulatorSharedStorage::Shape::kN, Policy_, 2> {
|
| 352 |
+
public:
|
| 353 |
+
///< Base class
|
| 354 |
+
using Base = MmaBaseFromSharedMemory<Shape_, AccumulatorSharedStorage::Shape::kN, Policy_, 2>;
|
| 355 |
+
|
| 356 |
+
using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 357 |
+
static constexpr bool ScaleOperandA = ScaleOperandA_;
|
| 358 |
+
|
| 359 |
+
///< loads fragments of A_scale from shared memory if operand A scaling is
|
| 360 |
+
///< enabled. otherwise no-op.
|
| 361 |
+
using WarpIteratorAScale = typename cutlass::platform::conditional<
|
| 362 |
+
ScaleOperandA,
|
| 363 |
+
WarpIteratorA,
|
| 364 |
+
NoOpWarpIteratorScale<typename WarpIteratorA::TensorRef>>::type;
|
| 365 |
+
|
| 366 |
+
using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
|
| 367 |
+
using ElementC = ElementC_; ///< Data type of accumulator matrix
|
| 368 |
+
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
|
| 369 |
+
using Policy = Policy_; ///< Policy describing tuning details
|
| 370 |
+
|
| 371 |
+
using SmemIteratorB = SmemIteratorB_;
|
| 372 |
+
|
| 373 |
+
using TransformB = TransformB_;
|
| 374 |
+
|
| 375 |
+
//
|
| 376 |
+
// Dependent types
|
| 377 |
+
//
|
| 378 |
+
|
| 379 |
+
/// Fragment of operand B loaded from global memory
|
| 380 |
+
using FragmentB = typename IteratorB::Fragment;
|
| 381 |
+
|
| 382 |
+
/// Fragment of accumulator tile
|
| 383 |
+
using FragmentC = typename Policy::Operator::FragmentC;
|
| 384 |
+
|
| 385 |
+
/// Warp-level Mma
|
| 386 |
+
using Operator = typename Policy::Operator;
|
| 387 |
+
|
| 388 |
+
/// Obtain the arch tag from the warp-level operator
|
| 389 |
+
using ArchTag = typename Policy::Operator::ArchTag;
|
| 390 |
+
|
| 391 |
+
/// Complex transform on B operand
|
| 392 |
+
static ComplexTransform const kTransformB = Operator::kTransformB;
|
| 393 |
+
|
| 394 |
+
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
|
| 395 |
+
static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2");
|
| 396 |
+
|
| 397 |
+
private:
|
| 398 |
+
using WarpFragmentA = typename Operator::FragmentA;
|
| 399 |
+
|
| 400 |
+
/// fragment type of OperandA elementwise scaling matrix. (almost) empty
|
| 401 |
+
/// if operand A scaling is disabled.
|
| 402 |
+
using WarpFragmentAScale = typename WarpIteratorAScale::Fragment;
|
| 403 |
+
|
| 404 |
+
using WarpFragmentB = typename Operator::FragmentB;
|
| 405 |
+
|
| 406 |
+
/// applies scaling factor to operand A fragment if operand A scaling is
|
| 407 |
+
/// enabled. otherwise no-op.
|
| 408 |
+
using FragmentAScaler =
|
| 409 |
+
FragmentElementwiseScaler<WarpFragmentA, WarpFragmentAScale, ScaleOperandA>;
|
| 410 |
+
|
| 411 |
+
protected:
|
| 412 |
+
// /// Iterator to write threadblock-scoped tile of A operand to shared memory
|
| 413 |
+
// SmemIteratorA smem_iterator_A_;
|
| 414 |
+
|
| 415 |
+
/// Iterator to write threadblock-scoped tile of B operand to shared memory
|
| 416 |
+
SmemIteratorB smem_iterator_B_;
|
| 417 |
+
|
| 418 |
+
/// Iterator to load a warp-scoped tile of A operand from intermediate
|
| 419 |
+
/// accumulator tile
|
| 420 |
+
WarpIteratorA warp_tile_iterator_A_;
|
| 421 |
+
|
| 422 |
+
/// Iterator to load a warp-scoped tile of A_scale from intermediate
|
| 423 |
+
/// accumulator tile (only used if ScaleOperandA_ is true)
|
| 424 |
+
WarpIteratorAScale warp_tile_iterator_A_scale_;
|
| 425 |
+
|
| 426 |
+
public:
|
| 427 |
+
/// constructor for MMA with operand A scaling enabled.
|
| 428 |
+
CUTLASS_DEVICE
|
| 429 |
+
MmaPipelinedFromSharedMemory(
|
| 430 |
+
// shared storage needed for internal use by threadblock-scoped GEMM
|
| 431 |
+
typename Base::SharedStorage& shared_storage,
|
| 432 |
+
// warp iterator over A tile held in shared memory
|
| 433 |
+
WarpIteratorA warp_iter_a,
|
| 434 |
+
// warp iterator over A_scale tile held in shared memory
|
| 435 |
+
WarpIteratorAScale warp_iter_a_scale,
|
| 436 |
+
int thread_idx,
|
| 437 |
+
int warp_idx,
|
| 438 |
+
int lane_idx)
|
| 439 |
+
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
|
| 440 |
+
warp_tile_iterator_A_(warp_iter_a),
|
| 441 |
+
warp_tile_iterator_A_scale_(warp_iter_a_scale),
|
| 442 |
+
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
|
| 443 |
+
{
|
| 444 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 445 |
+
// three coordinates:
|
| 446 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 447 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 448 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 449 |
+
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 450 |
+
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 451 |
+
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
|
| 452 |
+
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
|
| 453 |
+
|
| 454 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 455 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 456 |
+
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
|
| 457 |
+
this->warp_tile_iterator_A_scale_.add_tile_offset(
|
| 458 |
+
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
|
| 459 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 460 |
+
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
/// Construct from tensor references
|
| 464 |
+
CUTLASS_DEVICE
|
| 465 |
+
MmaPipelinedFromSharedMemory(
|
| 466 |
+
typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use
|
| 467 |
+
///< by threadblock-scoped GEMM
|
| 468 |
+
AccumulatorSharedStorage& accumulator_shared_storage,
|
| 469 |
+
int thread_idx, ///< ID within the threadblock
|
| 470 |
+
int warp_idx, ///< ID of warp
|
| 471 |
+
int lane_idx, ///< ID of each thread within a warp
|
| 472 |
+
int problem_size_0_n)
|
| 473 |
+
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
|
| 474 |
+
warp_tile_iterator_A_(accumulator_shared_storage.accum_ref(), lane_idx),
|
| 475 |
+
smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
|
| 476 |
+
{
|
| 477 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 478 |
+
// three coordinates:
|
| 479 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 480 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 481 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 482 |
+
|
| 483 |
+
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 484 |
+
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
|
| 485 |
+
|
| 486 |
+
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
|
| 487 |
+
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
|
| 488 |
+
|
| 489 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 490 |
+
this->warp_tile_iterator_A_.add_tile_offset(
|
| 491 |
+
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
|
| 492 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 493 |
+
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
// For API compatibility with MmaMultistageFromSharedMemory
|
| 497 |
+
// but not supported as it worsens perf: older gpus < sm80 don't
|
| 498 |
+
// support async transfers and have to waste registers
|
| 499 |
+
CUTLASS_DEVICE
|
| 500 |
+
void set_prologue_done(bool value) {}
|
| 501 |
+
CUTLASS_DEVICE
|
| 502 |
+
static void prologue(typename Base::SharedStorage& shared_storage,
|
| 503 |
+
IteratorB iterator_B1,
|
| 504 |
+
int thread_idx,
|
| 505 |
+
int problem_size_0_n)
|
| 506 |
+
{
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
CUTLASS_DEVICE
|
| 510 |
+
static void drain_cp_asyncs() {}
|
| 511 |
+
|
| 512 |
+
/// Perform a threadblock-scoped matrix multiply-accumulate
|
| 513 |
+
CUTLASS_DEVICE
|
| 514 |
+
void operator()(int gemm_k_iterations, ///< number of iterations of the mainloop
|
| 515 |
+
FragmentC& accum, ///< destination accumulator tile
|
| 516 |
+
// IteratorA iterator_A, ///< iterator over A
|
| 517 |
+
// operand in global memory
|
| 518 |
+
IteratorB iterator_B, ///< iterator over B operand in global memory
|
| 519 |
+
FragmentC const& src_accum, ///< source accumulator tile
|
| 520 |
+
// TransformA transform_A = TransformA(), ///< transformation
|
| 521 |
+
// applied to A fragment
|
| 522 |
+
TransformB transform_B = TransformB())
|
| 523 |
+
{ ///< transformation applied to B fragment
|
| 524 |
+
|
| 525 |
+
//
|
| 526 |
+
// Prologue
|
| 527 |
+
//
|
| 528 |
+
|
| 529 |
+
// Perform accumulation in the 'd' output operand
|
| 530 |
+
accum = src_accum;
|
| 531 |
+
|
| 532 |
+
FragmentB tb_frag_B;
|
| 533 |
+
|
| 534 |
+
tb_frag_B.clear();
|
| 535 |
+
|
| 536 |
+
// The last kblock is loaded in the prolog
|
| 537 |
+
iterator_B.set_residual_tile(gemm_k_iterations == 1);
|
| 538 |
+
iterator_B.load(tb_frag_B);
|
| 539 |
+
|
| 540 |
+
++iterator_B;
|
| 541 |
+
|
| 542 |
+
this->smem_iterator_B_.store(transform_B(tb_frag_B));
|
| 543 |
+
|
| 544 |
+
++this->smem_iterator_B_;
|
| 545 |
+
|
| 546 |
+
__syncthreads();
|
| 547 |
+
|
| 548 |
+
// remember that WarpFragmentAScale and WarpIteratorAScale are empty/no-op
|
| 549 |
+
// if scaling is disabled.
|
| 550 |
+
|
| 551 |
+
// Pair of fragments used to overlap shared memory loads and math
|
| 552 |
+
// instructions
|
| 553 |
+
WarpFragmentA warp_frag_A[2];
|
| 554 |
+
WarpFragmentAScale warp_frag_A_scale[2];
|
| 555 |
+
WarpFragmentB warp_frag_B[2];
|
| 556 |
+
warp_frag_A[0].clear();
|
| 557 |
+
warp_frag_A_scale[0].clear();
|
| 558 |
+
warp_frag_B[0].clear();
|
| 559 |
+
|
| 560 |
+
this->warp_tile_iterator_B_.set_kgroup_index(0);
|
| 561 |
+
|
| 562 |
+
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
|
| 563 |
+
this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[0]);
|
| 564 |
+
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
|
| 565 |
+
|
| 566 |
+
++this->warp_tile_iterator_A_;
|
| 567 |
+
++this->warp_tile_iterator_A_scale_;
|
| 568 |
+
++this->warp_tile_iterator_B_;
|
| 569 |
+
|
| 570 |
+
Operator warp_mma;
|
| 571 |
+
|
| 572 |
+
int smem_write_stage_idx = 1;
|
| 573 |
+
|
| 574 |
+
// Avoid reading out of bounds
|
| 575 |
+
iterator_B.set_residual_tile(gemm_k_iterations == 2);
|
| 576 |
+
iterator_B.clear_mask(gemm_k_iterations <= 1);
|
| 577 |
+
|
| 578 |
+
// Issue loads during the first warp-level matrix multiply-add *AFTER*
|
| 579 |
+
// issuing shared memory loads (which have the tightest latency requirement).
|
| 580 |
+
|
| 581 |
+
//
|
| 582 |
+
// Mainloop
|
| 583 |
+
//
|
| 584 |
+
|
| 585 |
+
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
|
| 586 |
+
CUTLASS_GEMM_LOOP
|
| 587 |
+
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
|
| 588 |
+
//
|
| 589 |
+
// Loop over GEMM K dimension
|
| 590 |
+
//
|
| 591 |
+
|
| 592 |
+
CUTLASS_PRAGMA_UNROLL
|
| 593 |
+
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
|
| 594 |
+
// Load warp-level tiles from shared memory, wrapping to k offset if
|
| 595 |
+
// this is the last group as the case may be.
|
| 596 |
+
bool hasNext = true;
|
| 597 |
+
|
| 598 |
+
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
|
| 599 |
+
// Write fragments to shared memory
|
| 600 |
+
this->smem_iterator_B_.store(transform_B(tb_frag_B));
|
| 601 |
+
|
| 602 |
+
__syncthreads();
|
| 603 |
+
|
| 604 |
+
++this->smem_iterator_B_;
|
| 605 |
+
|
| 606 |
+
// Add negative offsets to return iterators to the 'start' of the
|
| 607 |
+
// circular buffer in shared memory SMEM: Don't reset iterator A, as
|
| 608 |
+
// we are continuing our iteration at this point
|
| 609 |
+
if (smem_write_stage_idx == 1) {
|
| 610 |
+
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
|
| 611 |
+
} else {
|
| 612 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 613 |
+
{-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
smem_write_stage_idx ^= 1;
|
| 617 |
+
hasNext = gemm_k_iterations > 1;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
// Only read the next if we need to
|
| 621 |
+
if (hasNext) {
|
| 622 |
+
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
|
| 623 |
+
Base::kWarpGemmIterations);
|
| 624 |
+
|
| 625 |
+
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
|
| 626 |
+
this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[(warp_mma_k + 1) % 2]);
|
| 627 |
+
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
|
| 628 |
+
|
| 629 |
+
++this->warp_tile_iterator_A_;
|
| 630 |
+
++this->warp_tile_iterator_A_scale_;
|
| 631 |
+
++this->warp_tile_iterator_B_;
|
| 632 |
+
|
| 633 |
+
if (warp_mma_k == 0) {
|
| 634 |
+
iterator_B.load(tb_frag_B);
|
| 635 |
+
|
| 636 |
+
++iterator_B;
|
| 637 |
+
|
| 638 |
+
// Avoid reading out of bounds if this was the last loop iteration
|
| 639 |
+
iterator_B.set_residual_tile(gemm_k_iterations == 3);
|
| 640 |
+
iterator_B.clear_mask(gemm_k_iterations <= 2);
|
| 641 |
+
}
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
warp_mma(accum,
|
| 645 |
+
FragmentAScaler::apply(warp_frag_A[warp_mma_k % 2],
|
| 646 |
+
warp_frag_A_scale[warp_mma_k % 2]),
|
| 647 |
+
warp_frag_B[warp_mma_k % 2],
|
| 648 |
+
accum);
|
| 649 |
+
}
|
| 650 |
+
}
|
| 651 |
+
}
|
| 652 |
+
};
|
| 653 |
+
|
| 654 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 655 |
+
// Taken from
|
| 656 |
+
// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h
|
| 657 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 658 |
+
|
| 659 |
+
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
|
| 660 |
+
/// instructions.
|
| 661 |
+
template <
|
| 662 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 663 |
+
typename Shape1_,
|
| 664 |
+
/// Iterates over the intermediate accumulator tile in shared memory
|
| 665 |
+
typename WarpIteratorA1_,
|
| 666 |
+
/// whether or not to perform elementwise multiplication of A
|
| 667 |
+
// by another matrix (A_scale) that is also kept in shared memory prior
|
| 668 |
+
// to matmul A @ B
|
| 669 |
+
bool ScaleOperandA_,
|
| 670 |
+
// Accumulator type
|
| 671 |
+
typename AccumulatorSharedStorage,
|
| 672 |
+
/// Iterates over tiles of B operand in global memory
|
| 673 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 674 |
+
// MaskedTileIterator)
|
| 675 |
+
typename IteratorB1_,
|
| 676 |
+
/// Iterates over tiles of B operand in shared memory
|
| 677 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 678 |
+
typename SmemIteratorB1_,
|
| 679 |
+
/// Cache operation for operand B
|
| 680 |
+
cutlass::arch::CacheOperation::Kind CacheOpB1,
|
| 681 |
+
/// Data type of accumulator matrix
|
| 682 |
+
typename ElementC_,
|
| 683 |
+
/// Data type of accumulator matrix
|
| 684 |
+
typename LayoutC_,
|
| 685 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 686 |
+
typename Policy1_,
|
| 687 |
+
/// Number of stages,
|
| 688 |
+
int Stages_,
|
| 689 |
+
int kMaxK_,
|
| 690 |
+
/// Used for partial specialization
|
| 691 |
+
typename Enable = bool>
|
| 692 |
+
class MmaMultistageFromSharedMemory
|
| 693 |
+
: public MmaBaseFromSharedMemory<Shape1_, kMaxK_, Policy1_, Stages_> {
|
| 694 |
+
public:
|
| 695 |
+
///< Base class
|
| 696 |
+
using Base = MmaBaseFromSharedMemory<Shape1_, kMaxK_, Policy1_, Stages_>;
|
| 697 |
+
|
| 698 |
+
///< Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 699 |
+
using Shape1 = Shape1_;
|
| 700 |
+
///< Iterates over tiles of B operand in global memory
|
| 701 |
+
using IteratorB1 = IteratorB1_;
|
| 702 |
+
using IteratorB = IteratorB1;
|
| 703 |
+
///< Policy describing tuning details
|
| 704 |
+
using Policy1 = Policy1_;
|
| 705 |
+
|
| 706 |
+
using SmemIteratorB1 = SmemIteratorB1_;
|
| 707 |
+
using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate
|
| 708 |
+
///< accumulator tile in shared memory
|
| 709 |
+
static constexpr bool ScaleOperandA = ScaleOperandA_;
|
| 710 |
+
|
| 711 |
+
///< warp level iterator over A_scale matrix tile kept in shared memory.
|
| 712 |
+
///< if elementwise A scaling is disabled then everything this does is no-op.
|
| 713 |
+
using WarpIteratorAScale = typename cutlass::platform::conditional<
|
| 714 |
+
ScaleOperandA,
|
| 715 |
+
WarpIteratorA1,
|
| 716 |
+
NoOpWarpIteratorScale<typename WarpIteratorA1::TensorRef>>::type;
|
| 717 |
+
///< Data type of accumulator matrix
|
| 718 |
+
using ElementC = ElementC_;
|
| 719 |
+
///< Layout of accumulator matrix
|
| 720 |
+
using LayoutC = LayoutC_;
|
| 721 |
+
|
| 722 |
+
static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
|
| 723 |
+
static constexpr bool kSmemContainsEntireB = Base::kSmemContainsEntireB;
|
| 724 |
+
|
| 725 |
+
//
|
| 726 |
+
// Dependent types
|
| 727 |
+
//
|
| 728 |
+
|
| 729 |
+
/// Fragment of accumulator tile
|
| 730 |
+
using FragmentC1 = typename Policy1::Operator::FragmentC;
|
| 731 |
+
using FragmentC = FragmentC1;
|
| 732 |
+
|
| 733 |
+
/// Warp-level Mma
|
| 734 |
+
using Operator1 = typename Policy1::Operator;
|
| 735 |
+
|
| 736 |
+
/// Minimum architecture is Sm80 to support cp.async
|
| 737 |
+
using ArchTag = arch::Sm80;
|
| 738 |
+
|
| 739 |
+
/// Complex transform on B operand
|
| 740 |
+
static ComplexTransform const kTransformB1 = Operator1::kTransformB;
|
| 741 |
+
|
| 742 |
+
/// Internal structure exposed for introspection.
|
| 743 |
+
struct Detail {
|
| 744 |
+
static_assert(Base::kWarpGemmIterations1 > 1,
|
| 745 |
+
"The pipelined structure requires at least two warp-level "
|
| 746 |
+
"GEMM operations.");
|
| 747 |
+
|
| 748 |
+
/// Number of cp.async instructions to load one stage of operand B
|
| 749 |
+
static int const TBLoadIterationsB1 = IteratorB1::ThreadMap::Iterations::kCount;
|
| 750 |
+
|
| 751 |
+
/// Number of cp.async instructions to load on group of operand B
|
| 752 |
+
static int const kAccessesPerGroupB1 =
|
| 753 |
+
(TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
|
| 754 |
+
};
|
| 755 |
+
|
| 756 |
+
static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireB ? Base::kStages
|
| 757 |
+
: Base::kStages - 1;
|
| 758 |
+
|
| 759 |
+
private:
|
| 760 |
+
using WarpLoadedFragmentA1 = typename Operator1::FragmentA;
|
| 761 |
+
/// fragment of OperandA scale matrix. if operand A scaling is disabled this
|
| 762 |
+
/// is (almost) empty.
|
| 763 |
+
using WarpLoadedFragmentA1Scale = typename WarpIteratorAScale::Fragment;
|
| 764 |
+
using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
|
| 765 |
+
using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
|
| 766 |
+
using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
|
| 767 |
+
|
| 768 |
+
/// applies elementwise scaling to fragment of A. if operand A scaling is
|
| 769 |
+
/// disabled this is a no-op.
|
| 770 |
+
using FragmentAScaler =
|
| 771 |
+
FragmentElementwiseScaler<WarpLoadedFragmentA1, WarpLoadedFragmentA1Scale, ScaleOperandA>;
|
| 772 |
+
|
| 773 |
+
private:
|
| 774 |
+
//
|
| 775 |
+
// Data members
|
| 776 |
+
//
|
| 777 |
+
|
| 778 |
+
/// Iterator to load a warp-scoped tile of A1 operand from intermediate
|
| 779 |
+
/// accumulator tile
|
| 780 |
+
WarpIteratorA1 warp_tile_iterator_A1_;
|
| 781 |
+
|
| 782 |
+
/// Iterator to load a warp-scoped tile of A1_scale operand from shared memory
|
| 783 |
+
/// if operand A scaling is disabled everything this does is a no-op.
|
| 784 |
+
WarpIteratorAScale warp_tile_iterator_A1_scale_;
|
| 785 |
+
|
| 786 |
+
/// Iterator to write threadblock-scoped tile of B operand to shared memory
|
| 787 |
+
SmemIteratorB1 smem_iterator_B1_;
|
| 788 |
+
|
| 789 |
+
bool prologue_done_;
|
| 790 |
+
|
| 791 |
+
public:
|
| 792 |
+
/// constructor for MMA with operand A scaling enabled.
|
| 793 |
+
CUTLASS_DEVICE
|
| 794 |
+
MmaMultistageFromSharedMemory(
|
| 795 |
+
// shared storage needed for internal use by threadblock-scoped GEMM
|
| 796 |
+
typename Base::SharedStorage& shared_storage,
|
| 797 |
+
// warp level iterator over operand A tile kept in shared memory
|
| 798 |
+
WarpIteratorA1 warp_tile_iterator_A1,
|
| 799 |
+
// warp level iterator over operand A elementwise scale tile kept in
|
| 800 |
+
// shared memory.
|
| 801 |
+
WarpIteratorAScale warp_tile_iterator_A1_scale,
|
| 802 |
+
int thread_idx,
|
| 803 |
+
int warp_idx,
|
| 804 |
+
int lane_idx)
|
| 805 |
+
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
|
| 806 |
+
warp_tile_iterator_A1_(warp_tile_iterator_A1),
|
| 807 |
+
warp_tile_iterator_A1_scale_(warp_tile_iterator_A1_scale),
|
| 808 |
+
smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx),
|
| 809 |
+
prologue_done_(false)
|
| 810 |
+
{
|
| 811 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 812 |
+
// three coordinates:
|
| 813 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 814 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 815 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 816 |
+
int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
|
| 817 |
+
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
|
| 818 |
+
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
|
| 819 |
+
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
|
| 820 |
+
|
| 821 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 822 |
+
warp_tile_iterator_A1_.add_tile_offset(
|
| 823 |
+
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
|
| 824 |
+
warp_tile_iterator_A1_scale_.add_tile_offset(
|
| 825 |
+
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
|
| 826 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 827 |
+
{Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
/// Construct from tensor references
|
| 831 |
+
CUTLASS_DEVICE
|
| 832 |
+
MmaMultistageFromSharedMemory(
|
| 833 |
+
typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use
|
| 834 |
+
///< by threadblock-scoped GEMM
|
| 835 |
+
AccumulatorSharedStorage& accumulator_shared_storage,
|
| 836 |
+
///< ID within the threadblock
|
| 837 |
+
int thread_idx,
|
| 838 |
+
///< ID of warp
|
| 839 |
+
int warp_idx,
|
| 840 |
+
///< ID of each thread within a warp
|
| 841 |
+
int lane_idx,
|
| 842 |
+
///< GEMM0 N is used for accumulator extent
|
| 843 |
+
int problem_size_0_n)
|
| 844 |
+
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
|
| 845 |
+
warp_tile_iterator_A1_(accumulator_shared_storage.accum_ref(), lane_idx),
|
| 846 |
+
smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx),
|
| 847 |
+
prologue_done_(false)
|
| 848 |
+
{
|
| 849 |
+
// Compute warp location within threadblock tile by mapping the warp_id to
|
| 850 |
+
// three coordinates:
|
| 851 |
+
// _m: the warp's position within the threadblock along the M dimension
|
| 852 |
+
// _n: the warp's position within the threadblock along the N dimension
|
| 853 |
+
// _k: the warp's position within the threadblock along the K dimension
|
| 854 |
+
|
| 855 |
+
int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
|
| 856 |
+
int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
|
| 857 |
+
|
| 858 |
+
int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
|
| 859 |
+
int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
|
| 860 |
+
|
| 861 |
+
// Add per-warp offsets in units of warp-level tiles
|
| 862 |
+
warp_tile_iterator_A1_.add_tile_offset(
|
| 863 |
+
{warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
|
| 864 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 865 |
+
{Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
CUTLASS_DEVICE
|
| 869 |
+
void set_prologue_done(bool value) { prologue_done_ = value; }
|
| 870 |
+
|
| 871 |
+
CUTLASS_DEVICE
|
| 872 |
+
static void prologue(typename Base::SharedStorage& shared_storage,
|
| 873 |
+
IteratorB iterator_B1,
|
| 874 |
+
int thread_idx,
|
| 875 |
+
int problem_size_0_n)
|
| 876 |
+
{
|
| 877 |
+
SmemIteratorB1 smem_iterator_B1(shared_storage.operand_B_ref(), thread_idx);
|
| 878 |
+
_prologue(iterator_B1,
|
| 879 |
+
(problem_size_0_n + Base::Shape::kK - 1) / Base::Shape::kK,
|
| 880 |
+
smem_iterator_B1);
|
| 881 |
+
}
|
| 882 |
+
|
| 883 |
+
CUTLASS_DEVICE
|
| 884 |
+
static void drain_cp_asyncs()
|
| 885 |
+
{
|
| 886 |
+
// commit and drain all pending and predicated cp.async pnz from the GEMM
|
| 887 |
+
// mainloop
|
| 888 |
+
cutlass::arch::cp_async_fence();
|
| 889 |
+
cutlass::arch::cp_async_wait<0>();
|
| 890 |
+
__syncthreads();
|
| 891 |
+
}
|
| 892 |
+
|
| 893 |
+
CUTLASS_DEVICE
|
| 894 |
+
void copy_tiles_and_advance_1(IteratorB1& iterator_B1, int group_start_B1 = 0)
|
| 895 |
+
{
|
| 896 |
+
iterator_B1.set_iteration_index(group_start_B1 * IteratorB1::kAccessesPerVector);
|
| 897 |
+
this->smem_iterator_B1_.set_iteration_index(group_start_B1);
|
| 898 |
+
|
| 899 |
+
// Load for operand B
|
| 900 |
+
CUTLASS_PRAGMA_UNROLL
|
| 901 |
+
for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
|
| 902 |
+
if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
|
| 903 |
+
typename IteratorB1::AccessType* dst_ptr =
|
| 904 |
+
reinterpret_cast<typename IteratorB1::AccessType*>(
|
| 905 |
+
this->smem_iterator_B1_.get());
|
| 906 |
+
|
| 907 |
+
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
|
| 908 |
+
IteratorB1::ThreadMap::kElementsPerAccess /
|
| 909 |
+
IteratorB1::kAccessesPerVector / 8;
|
| 910 |
+
|
| 911 |
+
CUTLASS_PRAGMA_UNROLL
|
| 912 |
+
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
|
| 913 |
+
auto gmem_ptr = iterator_B1.get();
|
| 914 |
+
|
| 915 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
|
| 916 |
+
dst_ptr + v, gmem_ptr, iterator_B1.valid());
|
| 917 |
+
|
| 918 |
+
++iterator_B1;
|
| 919 |
+
}
|
| 920 |
+
++this->smem_iterator_B1_;
|
| 921 |
+
}
|
| 922 |
+
}
|
| 923 |
+
}
|
| 924 |
+
|
| 925 |
+
CUTLASS_DEVICE
|
| 926 |
+
static void _prologue(IteratorB& iterator_B1,
|
| 927 |
+
int32_t gemm_k_iterations_1,
|
| 928 |
+
SmemIteratorB1& smem_iterator_B1_)
|
| 929 |
+
{
|
| 930 |
+
// Issue several complete stages
|
| 931 |
+
CUTLASS_PRAGMA_UNROLL
|
| 932 |
+
for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) {
|
| 933 |
+
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
|
| 934 |
+
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
|
| 935 |
+
|
| 936 |
+
iterator_B1.set_iteration_index(0);
|
| 937 |
+
smem_iterator_B1_.set_iteration_index(0);
|
| 938 |
+
|
| 939 |
+
// Load for operand B
|
| 940 |
+
CUTLASS_PRAGMA_UNROLL
|
| 941 |
+
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
|
| 942 |
+
typename IteratorB1::AccessType* dst_ptr =
|
| 943 |
+
reinterpret_cast<typename IteratorB1::AccessType*>(smem_iterator_B1_.get());
|
| 944 |
+
|
| 945 |
+
CUTLASS_PRAGMA_UNROLL
|
| 946 |
+
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
|
| 947 |
+
int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
|
| 948 |
+
IteratorB1::ThreadMap::kElementsPerAccess /
|
| 949 |
+
IteratorB1::kAccessesPerVector / 8;
|
| 950 |
+
|
| 951 |
+
cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
|
| 952 |
+
dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
|
| 953 |
+
|
| 954 |
+
++iterator_B1;
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
++smem_iterator_B1_;
|
| 958 |
+
}
|
| 959 |
+
|
| 960 |
+
// Move to the next stage
|
| 961 |
+
iterator_B1.add_tile_offset({1, 0});
|
| 962 |
+
|
| 963 |
+
smem_iterator_B1_.add_tile_offset({1, 0});
|
| 964 |
+
|
| 965 |
+
// Defines the boundary of a stage of cp.async.
|
| 966 |
+
cutlass::arch::cp_async_fence();
|
| 967 |
+
}
|
| 968 |
+
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
|
| 969 |
+
iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
/// Perform a threadblock-scoped matrix multiply-accumulate
|
| 973 |
+
CUTLASS_DEVICE
|
| 974 |
+
void operator()(
|
| 975 |
+
///< problem size of GEMM
|
| 976 |
+
int gemm_k_iterations_1_,
|
| 977 |
+
///< destination accumulator tile
|
| 978 |
+
FragmentC1& accum,
|
| 979 |
+
///< iterator over B1 operand in global memory
|
| 980 |
+
IteratorB1 iterator_B1,
|
| 981 |
+
///< initial value of accumulator
|
| 982 |
+
FragmentC1 const& src_accum)
|
| 983 |
+
{
|
| 984 |
+
// 2nd Gemm
|
| 985 |
+
|
| 986 |
+
//
|
| 987 |
+
// Prologue
|
| 988 |
+
//
|
| 989 |
+
// Perform accumulation in the 'd' output operand
|
| 990 |
+
accum = src_accum;
|
| 991 |
+
|
| 992 |
+
if (!prologue_done_) {
|
| 993 |
+
_prologue(iterator_B1, gemm_k_iterations_1_, smem_iterator_B1_);
|
| 994 |
+
} else if (!kSmemContainsEntireB) {
|
| 995 |
+
// Restore the iterators increments
|
| 996 |
+
|
| 997 |
+
int gemm_k_iterations_1 = gemm_k_iterations_1_;
|
| 998 |
+
// Issue several complete stages
|
| 999 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1000 |
+
for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) {
|
| 1001 |
+
iterator_B1.set_iteration_index(0);
|
| 1002 |
+
this->smem_iterator_B1_.set_iteration_index(0);
|
| 1003 |
+
|
| 1004 |
+
// Load for operand B
|
| 1005 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1006 |
+
for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
|
| 1007 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1008 |
+
for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { ++iterator_B1; }
|
| 1009 |
+
++this->smem_iterator_B1_;
|
| 1010 |
+
}
|
| 1011 |
+
iterator_B1.add_tile_offset({1, 0});
|
| 1012 |
+
this->smem_iterator_B1_.add_tile_offset({1, 0});
|
| 1013 |
+
}
|
| 1014 |
+
iterator_B1.set_residual_tile(gemm_k_iterations_1 <= 1);
|
| 1015 |
+
iterator_B1.clear_mask(gemm_k_iterations_1 <= 0);
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
// DEPBAR+SYNC
|
| 1019 |
+
cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
|
| 1020 |
+
__syncthreads();
|
| 1021 |
+
|
| 1022 |
+
// remember that WarpFragmentAScale and WarpIteratorAScale are no-op/empty
|
| 1023 |
+
// if scaling is disabled.
|
| 1024 |
+
|
| 1025 |
+
// Pair of fragments used to overlap shared memory loads and math
|
| 1026 |
+
// instructions
|
| 1027 |
+
WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
|
| 1028 |
+
WarpLoadedFragmentA1Scale warp_loaded_frag_A1_scale[2];
|
| 1029 |
+
WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
|
| 1030 |
+
WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
|
| 1031 |
+
WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
|
| 1032 |
+
|
| 1033 |
+
Operator1 warp_mma1;
|
| 1034 |
+
|
| 1035 |
+
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]);
|
| 1036 |
+
++warp_tile_iterator_A1_;
|
| 1037 |
+
|
| 1038 |
+
warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]);
|
| 1039 |
+
++warp_tile_iterator_A1_scale_;
|
| 1040 |
+
|
| 1041 |
+
this->warp_tile_iterator_B_.set_kgroup_index(0);
|
| 1042 |
+
this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[0]);
|
| 1043 |
+
++this->warp_tile_iterator_B_;
|
| 1044 |
+
|
| 1045 |
+
int smem_write_stage_idx = Base::kStages - 1;
|
| 1046 |
+
int smem_read_stage_idx = 0;
|
| 1047 |
+
|
| 1048 |
+
warp_mma1.transform(
|
| 1049 |
+
warp_transformed_frag_A1[0],
|
| 1050 |
+
warp_transformed_frag_B1[0],
|
| 1051 |
+
FragmentAScaler::apply(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0]),
|
| 1052 |
+
warp_loaded_frag_B1[0]);
|
| 1053 |
+
|
| 1054 |
+
// tf32x3 kernels use staging accumulation. warp_mma uses a temporary
|
| 1055 |
+
// accumulator and this temporary accumulator is added to the final
|
| 1056 |
+
// accumulator once in every mainloop iteration.
|
| 1057 |
+
plus<FragmentC1> plus_accum;
|
| 1058 |
+
|
| 1059 |
+
FragmentC1 tmp_accum;
|
| 1060 |
+
|
| 1061 |
+
if (platform::is_same<typename Operator1::MathOperator,
|
| 1062 |
+
arch::OpMultiplyAddFastF32>::value ||
|
| 1063 |
+
platform::is_same<typename Operator1::MathOperator,
|
| 1064 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 1065 |
+
tmp_accum.clear();
|
| 1066 |
+
}
|
| 1067 |
+
|
| 1068 |
+
//
|
| 1069 |
+
// Mainloop
|
| 1070 |
+
//
|
| 1071 |
+
|
| 1072 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1073 |
+
for (int gemm_k_iterations_1 = gemm_k_iterations_1_ - (Base::kStages - 1);
|
| 1074 |
+
gemm_k_iterations_1 > (-Base::kStages + 1);
|
| 1075 |
+
gemm_k_iterations_1--) {
|
| 1076 |
+
//
|
| 1077 |
+
// Loop over GEMM K dimension
|
| 1078 |
+
//
|
| 1079 |
+
|
| 1080 |
+
// Computes a warp-level GEMM on data held in shared memory
|
| 1081 |
+
// Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
|
| 1082 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1083 |
+
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
|
| 1084 |
+
// Load warp-level tile from accumulator fragment (A)
|
| 1085 |
+
// or shared memory (operand B)
|
| 1086 |
+
this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
|
| 1087 |
+
Base::kWarpGemmIterations1);
|
| 1088 |
+
// skip warp tile loading for the last kgroup (we are out of the buf)
|
| 1089 |
+
if (gemm_k_iterations_1 > (-Base::kStages + 2) ||
|
| 1090 |
+
warp_mma_k < Base::kWarpGemmIterations1 - 1) {
|
| 1091 |
+
warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2]);
|
| 1092 |
+
warp_tile_iterator_A1_scale_.load(
|
| 1093 |
+
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]);
|
| 1094 |
+
this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
|
| 1095 |
+
}
|
| 1096 |
+
++warp_tile_iterator_A1_;
|
| 1097 |
+
++warp_tile_iterator_A1_scale_;
|
| 1098 |
+
++this->warp_tile_iterator_B_;
|
| 1099 |
+
|
| 1100 |
+
if (warp_mma_k > 0)
|
| 1101 |
+
warp_mma1.transform(
|
| 1102 |
+
warp_transformed_frag_A1[warp_mma_k % 2],
|
| 1103 |
+
warp_transformed_frag_B1[warp_mma_k % 2],
|
| 1104 |
+
FragmentAScaler::apply(warp_loaded_frag_A1[warp_mma_k % 2],
|
| 1105 |
+
warp_loaded_frag_A1_scale[warp_mma_k % 2]),
|
| 1106 |
+
warp_loaded_frag_B1[warp_mma_k % 2]);
|
| 1107 |
+
|
| 1108 |
+
if (platform::is_same<typename Operator1::MathOperator,
|
| 1109 |
+
arch::OpMultiplyAddFastF32>::value ||
|
| 1110 |
+
platform::is_same<typename Operator1::MathOperator,
|
| 1111 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 1112 |
+
warp_mma1(tmp_accum,
|
| 1113 |
+
warp_transformed_frag_A1[warp_mma_k % 2],
|
| 1114 |
+
warp_transformed_frag_B1[warp_mma_k % 2],
|
| 1115 |
+
tmp_accum);
|
| 1116 |
+
|
| 1117 |
+
if (warp_mma_k == 0) {
|
| 1118 |
+
accum = plus_accum(accum, tmp_accum);
|
| 1119 |
+
tmp_accum.clear();
|
| 1120 |
+
}
|
| 1121 |
+
} else {
|
| 1122 |
+
warp_mma1(accum,
|
| 1123 |
+
warp_transformed_frag_A1[warp_mma_k % 2],
|
| 1124 |
+
warp_transformed_frag_B1[warp_mma_k % 2],
|
| 1125 |
+
accum);
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
// Issue global->shared copies for the this stage
|
| 1129 |
+
if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
|
| 1130 |
+
int group_start_iteration_B1;
|
| 1131 |
+
|
| 1132 |
+
group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
|
| 1133 |
+
|
| 1134 |
+
if (!kSmemContainsEntireB) {
|
| 1135 |
+
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
|
| 1136 |
+
}
|
| 1137 |
+
}
|
| 1138 |
+
|
| 1139 |
+
if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
|
| 1140 |
+
int group_start_iteration_B1;
|
| 1141 |
+
group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
|
| 1142 |
+
|
| 1143 |
+
if (!kSmemContainsEntireB) {
|
| 1144 |
+
copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
|
| 1145 |
+
}
|
| 1146 |
+
|
| 1147 |
+
// Inserts a memory fence between stages of cp.async instructions.
|
| 1148 |
+
cutlass::arch::cp_async_fence();
|
| 1149 |
+
|
| 1150 |
+
// Waits until kStages-2 stages have committed.
|
| 1151 |
+
arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
|
| 1152 |
+
__syncthreads();
|
| 1153 |
+
|
| 1154 |
+
// Move to the next stage
|
| 1155 |
+
iterator_B1.add_tile_offset({1, 0});
|
| 1156 |
+
|
| 1157 |
+
this->smem_iterator_B1_.add_tile_offset({1, 0});
|
| 1158 |
+
|
| 1159 |
+
// Add negative offsets to return iterators to the 'start' of the
|
| 1160 |
+
// circular buffer in shared memory
|
| 1161 |
+
if (!kSmemContainsEntireB) {
|
| 1162 |
+
if (smem_write_stage_idx == (Base::kStages - 1)) {
|
| 1163 |
+
this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
|
| 1164 |
+
smem_write_stage_idx = 0;
|
| 1165 |
+
} else {
|
| 1166 |
+
++smem_write_stage_idx;
|
| 1167 |
+
}
|
| 1168 |
+
|
| 1169 |
+
if (smem_read_stage_idx == (Base::kStages - 1)) {
|
| 1170 |
+
this->warp_tile_iterator_B_.add_tile_offset(
|
| 1171 |
+
{-Base::kStages * Policy1::kPartitionsK *
|
| 1172 |
+
Base::kWarpGemmIterations1,
|
| 1173 |
+
0});
|
| 1174 |
+
smem_read_stage_idx = 0;
|
| 1175 |
+
} else {
|
| 1176 |
+
++smem_read_stage_idx;
|
| 1177 |
+
}
|
| 1178 |
+
}
|
| 1179 |
+
|
| 1180 |
+
iterator_B1.set_residual_tile(gemm_k_iterations_1 == 2);
|
| 1181 |
+
iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
|
| 1182 |
+
}
|
| 1183 |
+
|
| 1184 |
+
// Do any conversions feeding the first stage at the end of the loop so
|
| 1185 |
+
// we can start right away on mma instructions
|
| 1186 |
+
if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
|
| 1187 |
+
warp_mma1.transform(
|
| 1188 |
+
warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
|
| 1189 |
+
warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
|
| 1190 |
+
FragmentAScaler::apply(warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
|
| 1191 |
+
warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]),
|
| 1192 |
+
warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
|
| 1193 |
+
}
|
| 1194 |
+
}
|
| 1195 |
+
|
| 1196 |
+
if (platform::is_same<typename Operator1::MathOperator,
|
| 1197 |
+
arch::OpMultiplyAddFastF32>::value ||
|
| 1198 |
+
platform::is_same<typename Operator1::MathOperator,
|
| 1199 |
+
arch::OpMultiplyAddComplexFastF32>::value) {
|
| 1200 |
+
accum = plus_accum(accum, tmp_accum);
|
| 1201 |
+
}
|
| 1202 |
+
}
|
| 1203 |
+
};
|
| 1204 |
+
|
| 1205 |
+
template <typename WarpShape,
|
| 1206 |
+
typename InstructionShape,
|
| 1207 |
+
typename RegularWarpIterator,
|
| 1208 |
+
typename Policy,
|
| 1209 |
+
typename Enable = void>
|
| 1210 |
+
struct DefaultWarpIteratorAFromSharedMemory {};
|
| 1211 |
+
|
| 1212 |
+
// TensorOp - Ampere half
|
| 1213 |
+
template <typename RegularWarpIterator, typename Policy>
|
| 1214 |
+
struct DefaultWarpIteratorAFromSharedMemory<
|
| 1215 |
+
cutlass::gemm::GemmShape<32, 32, 32>,
|
| 1216 |
+
cutlass::gemm::GemmShape<16, 8, 8>,
|
| 1217 |
+
RegularWarpIterator,
|
| 1218 |
+
Policy,
|
| 1219 |
+
typename platform::enable_if<(sizeof_bits<typename RegularWarpIterator::Element>::value == 16 &&
|
| 1220 |
+
Policy::Operator::Policy::OpDelta::kRow == 1)>::type> {
|
| 1221 |
+
static constexpr auto kWarpSize = 32;
|
| 1222 |
+
using OpDelta = typename Policy::Operator::Policy::OpDelta;
|
| 1223 |
+
using WarpShape = cutlass::MatrixShape<32, 32>;
|
| 1224 |
+
|
| 1225 |
+
using WarpIterator =
|
| 1226 |
+
cutlass::gemm::warp::WarpIteratorFromSmem<cutlass::gemm::Operand::kA,
|
| 1227 |
+
typename RegularWarpIterator::Element>;
|
| 1228 |
+
};
|
| 1229 |
+
|
| 1230 |
+
// TensorOp - Ampere f32
|
| 1231 |
+
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
|
| 1232 |
+
struct DefaultWarpIteratorAFromSharedMemory<
|
| 1233 |
+
WarpShape,
|
| 1234 |
+
cutlass::gemm::GemmShape<16, 8, 8>,
|
| 1235 |
+
RegularWarpIterator,
|
| 1236 |
+
Policy,
|
| 1237 |
+
typename platform::enable_if<(sizeof_bits<typename RegularWarpIterator::Element>::value != 16 ||
|
| 1238 |
+
Policy::Operator::Policy::OpDelta::kRow != 1)>::type> {
|
| 1239 |
+
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
|
| 1240 |
+
static constexpr auto kWarpSize = 32;
|
| 1241 |
+
using OpDelta = typename Policy::Operator::Policy::OpDelta;
|
| 1242 |
+
|
| 1243 |
+
using WarpIterator = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
|
| 1244 |
+
cutlass::MatrixShape<WarpShape::kM, WarpShape::kK>,
|
| 1245 |
+
cutlass::gemm::Operand::kA,
|
| 1246 |
+
typename RegularWarpIterator::Element,
|
| 1247 |
+
cutlass::layout::RowMajor,
|
| 1248 |
+
cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>,
|
| 1249 |
+
OpDelta::kRow,
|
| 1250 |
+
kWarpSize>;
|
| 1251 |
+
};
|
| 1252 |
+
|
| 1253 |
+
// TensorOp - Volta
|
| 1254 |
+
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
|
| 1255 |
+
struct DefaultWarpIteratorAFromSharedMemory<WarpShape,
|
| 1256 |
+
cutlass::gemm::GemmShape<16, 16, 4>,
|
| 1257 |
+
RegularWarpIterator,
|
| 1258 |
+
Policy> {
|
| 1259 |
+
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>;
|
| 1260 |
+
static constexpr auto kWarpSize = 32;
|
| 1261 |
+
using OpDelta = typename Policy::Operator::Policy::OpDelta;
|
| 1262 |
+
|
| 1263 |
+
using WarpIterator = cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator<
|
| 1264 |
+
cutlass::MatrixShape<32, 32>, // MatrixShape<WarpShape::kM,
|
| 1265 |
+
// WarpShape::kK>,
|
| 1266 |
+
cutlass::gemm::Operand::kA,
|
| 1267 |
+
typename RegularWarpIterator::Element,
|
| 1268 |
+
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>,
|
| 1269 |
+
cutlass::MatrixShape<16, 4>,
|
| 1270 |
+
OpDelta::kRow,
|
| 1271 |
+
kWarpSize>;
|
| 1272 |
+
};
|
| 1273 |
+
|
| 1274 |
+
// Simt
|
| 1275 |
+
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
|
| 1276 |
+
struct DefaultWarpIteratorAFromSharedMemory<WarpShape,
|
| 1277 |
+
cutlass::gemm::GemmShape<1, 1, 1>,
|
| 1278 |
+
RegularWarpIterator,
|
| 1279 |
+
Policy> {
|
| 1280 |
+
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
|
| 1281 |
+
static constexpr auto kWarpSize = 32;
|
| 1282 |
+
|
| 1283 |
+
// We just use the same iterator, as we reproduced the same shared-memory
|
| 1284 |
+
// schema. Just modify it to handle non-complete tiles.
|
| 1285 |
+
using WarpIterator = RegularWarpIterator;
|
| 1286 |
+
};
|
| 1287 |
+
|
| 1288 |
+
// Converts a "regular" Mma into their counterpart from shared memory
|
| 1289 |
+
template <typename Mma_,
|
| 1290 |
+
typename AccumulatorSharedStorage,
|
| 1291 |
+
/// whether or not to apply elementwise multiplication of operand A by
|
| 1292 |
+
/// another matrix in shared memory before usage in A @ B
|
| 1293 |
+
bool kScaleOperandA,
|
| 1294 |
+
bool kTransposeA = false>
|
| 1295 |
+
struct DefaultMmaFromSharedMemory;
|
| 1296 |
+
|
| 1297 |
+
// Mma pipelined
|
| 1298 |
+
template <
|
| 1299 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 1300 |
+
typename Shape_,
|
| 1301 |
+
/// Iterates over tiles of A operand in global memory
|
| 1302 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 1303 |
+
// MaskedTileIterator)
|
| 1304 |
+
typename IteratorA_,
|
| 1305 |
+
/// Iterates over tiles of A operand in shared memory
|
| 1306 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 1307 |
+
typename SmemIteratorA_,
|
| 1308 |
+
/// Iterates over tiles of B operand in global memory
|
| 1309 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 1310 |
+
// MaskedTileIterator)
|
| 1311 |
+
typename IteratorB_,
|
| 1312 |
+
/// Iterates over tiles of B operand in shared memory
|
| 1313 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 1314 |
+
typename SmemIteratorB_,
|
| 1315 |
+
/// Data type of accumulator matrix
|
| 1316 |
+
typename ElementC_,
|
| 1317 |
+
/// Data type of accumulator matrix
|
| 1318 |
+
typename LayoutC_,
|
| 1319 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 1320 |
+
typename Policy_,
|
| 1321 |
+
/// Transformation applied to A operand
|
| 1322 |
+
typename TransformA_,
|
| 1323 |
+
/// Transformation applied to B operand
|
| 1324 |
+
typename TransformB_,
|
| 1325 |
+
typename AccumulatorSharedStorage_,
|
| 1326 |
+
/// whether or not to apply elementwise multiplication of operand A by
|
| 1327 |
+
/// another matrix in shared memory before usage in A @ B
|
| 1328 |
+
bool kScaleOperandA,
|
| 1329 |
+
bool kTransposeA>
|
| 1330 |
+
struct DefaultMmaFromSharedMemory<MmaPipelined<Shape_,
|
| 1331 |
+
IteratorA_,
|
| 1332 |
+
SmemIteratorA_,
|
| 1333 |
+
IteratorB_,
|
| 1334 |
+
SmemIteratorB_,
|
| 1335 |
+
ElementC_,
|
| 1336 |
+
LayoutC_,
|
| 1337 |
+
Policy_,
|
| 1338 |
+
TransformA_,
|
| 1339 |
+
TransformB_>,
|
| 1340 |
+
AccumulatorSharedStorage_,
|
| 1341 |
+
kScaleOperandA,
|
| 1342 |
+
kTransposeA> {
|
| 1343 |
+
static constexpr int kWarpSize = 32;
|
| 1344 |
+
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
|
| 1345 |
+
|
| 1346 |
+
using RegularMma = MmaPipelined<Shape_,
|
| 1347 |
+
IteratorA_,
|
| 1348 |
+
SmemIteratorA_,
|
| 1349 |
+
IteratorB_,
|
| 1350 |
+
SmemIteratorB_,
|
| 1351 |
+
ElementC_,
|
| 1352 |
+
LayoutC_,
|
| 1353 |
+
Policy_,
|
| 1354 |
+
TransformA_,
|
| 1355 |
+
TransformB_>;
|
| 1356 |
+
|
| 1357 |
+
using WarpShape = typename Policy_::Operator::Shape;
|
| 1358 |
+
using InstructionShape = typename Policy_::Operator::InstructionShape;
|
| 1359 |
+
using ArchMmaOperator = typename Policy_::Operator;
|
| 1360 |
+
|
| 1361 |
+
static constexpr bool kIsTransposedA = false;
|
| 1362 |
+
using WarpIteratorA =
|
| 1363 |
+
typename DefaultWarpIteratorAFromSharedMemory<WarpShape,
|
| 1364 |
+
InstructionShape,
|
| 1365 |
+
typename RegularMma::Operator::IteratorA,
|
| 1366 |
+
Policy_>::WarpIterator;
|
| 1367 |
+
using IteratorB =
|
| 1368 |
+
typename cutlass::transform::threadblock::MakeIteratorResidualLast<IteratorB_>::Iterator;
|
| 1369 |
+
|
| 1370 |
+
using Mma =
|
| 1371 |
+
typename cutlass::gemm::threadblock::MmaPipelinedFromSharedMemory<Shape_,
|
| 1372 |
+
WarpIteratorA,
|
| 1373 |
+
kScaleOperandA,
|
| 1374 |
+
AccumulatorSharedStorage_,
|
| 1375 |
+
IteratorB,
|
| 1376 |
+
SmemIteratorB_,
|
| 1377 |
+
ElementC_,
|
| 1378 |
+
LayoutC_,
|
| 1379 |
+
Policy_>;
|
| 1380 |
+
};
|
| 1381 |
+
|
| 1382 |
+
template <
|
| 1383 |
+
/// Size of the Gemm problem - concept: gemm::GemmShape<>
|
| 1384 |
+
typename Shape_,
|
| 1385 |
+
/// Iterates over tiles of A operand in global memory
|
| 1386 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 1387 |
+
// MaskedTileIterator)
|
| 1388 |
+
typename IteratorA_,
|
| 1389 |
+
/// Iterates over tiles of A operand in shared memory
|
| 1390 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 1391 |
+
typename SmemIteratorA_,
|
| 1392 |
+
/// Cache operation for operand A
|
| 1393 |
+
cutlass::arch::CacheOperation::Kind CacheOpA,
|
| 1394 |
+
/// Iterates over tiles of B operand in global memory
|
| 1395 |
+
// (concept: ReadableTileIterator | ForwardTileIterator |
|
| 1396 |
+
// MaskedTileIterator)
|
| 1397 |
+
typename IteratorB_,
|
| 1398 |
+
/// Iterates over tiles of B operand in shared memory
|
| 1399 |
+
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
|
| 1400 |
+
typename SmemIteratorB_,
|
| 1401 |
+
/// Cache operation for operand B
|
| 1402 |
+
cutlass::arch::CacheOperation::Kind CacheOpB,
|
| 1403 |
+
/// Data type of accumulator matrix
|
| 1404 |
+
typename ElementC_,
|
| 1405 |
+
/// Data type of accumulator matrix
|
| 1406 |
+
typename LayoutC_,
|
| 1407 |
+
/// Policy describing tuning details (concept: MmaPolicy)
|
| 1408 |
+
typename Policy_,
|
| 1409 |
+
/// Number of stages,
|
| 1410 |
+
int Stages,
|
| 1411 |
+
/// Use zfill or predicate for out-of-bound cp.async
|
| 1412 |
+
SharedMemoryClearOption SharedMemoryClear,
|
| 1413 |
+
typename AccumulatorSharedStorage_,
|
| 1414 |
+
/// whether or not to apply elementwise multiplication of operand A by
|
| 1415 |
+
/// another matrix in shared memory before usage in A @ B
|
| 1416 |
+
bool kScaleOperandA,
|
| 1417 |
+
bool kTransposeA>
|
| 1418 |
+
struct DefaultMmaFromSharedMemory<MmaMultistage<Shape_,
|
| 1419 |
+
IteratorA_,
|
| 1420 |
+
SmemIteratorA_,
|
| 1421 |
+
CacheOpA,
|
| 1422 |
+
IteratorB_,
|
| 1423 |
+
SmemIteratorB_,
|
| 1424 |
+
CacheOpB,
|
| 1425 |
+
ElementC_,
|
| 1426 |
+
LayoutC_,
|
| 1427 |
+
Policy_,
|
| 1428 |
+
Stages,
|
| 1429 |
+
SharedMemoryClear>,
|
| 1430 |
+
AccumulatorSharedStorage_,
|
| 1431 |
+
kScaleOperandA,
|
| 1432 |
+
kTransposeA> {
|
| 1433 |
+
static constexpr int kWarpSize = 32;
|
| 1434 |
+
|
| 1435 |
+
using RegularMma = MmaMultistage<Shape_,
|
| 1436 |
+
IteratorA_,
|
| 1437 |
+
SmemIteratorA_,
|
| 1438 |
+
CacheOpA,
|
| 1439 |
+
IteratorB_,
|
| 1440 |
+
SmemIteratorB_,
|
| 1441 |
+
CacheOpB,
|
| 1442 |
+
ElementC_,
|
| 1443 |
+
LayoutC_,
|
| 1444 |
+
Policy_,
|
| 1445 |
+
Stages,
|
| 1446 |
+
SharedMemoryClear>;
|
| 1447 |
+
|
| 1448 |
+
using WarpShape = typename Policy_::Operator::Shape;
|
| 1449 |
+
using InstructionShape = typename Policy_::Operator::InstructionShape;
|
| 1450 |
+
using WarpIteratorA_ =
|
| 1451 |
+
typename DefaultWarpIteratorAFromSharedMemory<WarpShape,
|
| 1452 |
+
InstructionShape,
|
| 1453 |
+
typename RegularMma::Operator::IteratorA,
|
| 1454 |
+
Policy_>::WarpIterator;
|
| 1455 |
+
using WarpIteratorTranspose = TransposeWarpIterator<WarpIteratorA_>;
|
| 1456 |
+
static constexpr bool kIsTransposedA = WarpIteratorTranspose::kSupportsTranspose && kTransposeA;
|
| 1457 |
+
using WarpIteratorA = typename platform::
|
| 1458 |
+
conditional<kIsTransposedA, typename WarpIteratorTranspose::Iterator, WarpIteratorA_>::type;
|
| 1459 |
+
|
| 1460 |
+
static int constexpr kMaxK = kIsTransposedA ? AccumulatorSharedStorage_::Shape::kM
|
| 1461 |
+
: AccumulatorSharedStorage_::Shape::kN;
|
| 1462 |
+
// Reduce the number of stages if we don't need that many
|
| 1463 |
+
static int constexpr kStagesMax = (kMaxK + int(Shape_::kK) - 1) / int(Shape_::kK);
|
| 1464 |
+
static int constexpr kStages = cutlass::const_min(Stages, kStagesMax);
|
| 1465 |
+
|
| 1466 |
+
using IteratorB =
|
| 1467 |
+
typename cutlass::transform::threadblock::MakeIteratorResidualLast<IteratorB_>::Iterator;
|
| 1468 |
+
using Mma = typename cutlass::gemm::threadblock::MmaMultistageFromSharedMemory<
|
| 1469 |
+
Shape_,
|
| 1470 |
+
WarpIteratorA,
|
| 1471 |
+
kScaleOperandA,
|
| 1472 |
+
AccumulatorSharedStorage_,
|
| 1473 |
+
IteratorB,
|
| 1474 |
+
SmemIteratorB_,
|
| 1475 |
+
RegularMma::kCacheOpB,
|
| 1476 |
+
ElementC_,
|
| 1477 |
+
LayoutC_,
|
| 1478 |
+
Policy_,
|
| 1479 |
+
kStages,
|
| 1480 |
+
kMaxK>;
|
| 1481 |
+
};
|
| 1482 |
+
|
| 1483 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1484 |
+
|
| 1485 |
+
template <typename IteratorC,
|
| 1486 |
+
typename Operator,
|
| 1487 |
+
typename scalar_t,
|
| 1488 |
+
typename WarpShape_,
|
| 1489 |
+
typename ThreadblockShape_>
|
| 1490 |
+
struct B2bGemm;
|
| 1491 |
+
|
| 1492 |
+
// Tensor Cores >= Sm75 specialization (Ampere ...)
|
| 1493 |
+
template < /// Size of the matrix to load (concept: MatrixShape)
|
| 1494 |
+
typename Shape_,
|
| 1495 |
+
/// Element type
|
| 1496 |
+
typename Element_,
|
| 1497 |
+
/// Layout of operand in memory
|
| 1498 |
+
typename Layout_,
|
| 1499 |
+
/// Shape of one matrix product operation (concept: MatrixShape)
|
| 1500 |
+
typename InstructionShape_,
|
| 1501 |
+
/// Interval between adjacent *MMA instructions (in units of MMA
|
| 1502 |
+
/// instructions, concept: MatrixShape)
|
| 1503 |
+
typename OpDelta_,
|
| 1504 |
+
typename Operator,
|
| 1505 |
+
typename scalar_t,
|
| 1506 |
+
typename WarpShape_,
|
| 1507 |
+
typename ThreadblockShape_>
|
| 1508 |
+
struct B2bGemm<
|
| 1509 |
+
cutlass::gemm::warp::
|
| 1510 |
+
MmaTensorOpAccumulatorTileIterator<Shape_, Element_, Layout_, InstructionShape_, OpDelta_>,
|
| 1511 |
+
Operator,
|
| 1512 |
+
scalar_t,
|
| 1513 |
+
WarpShape_,
|
| 1514 |
+
ThreadblockShape_> {
|
| 1515 |
+
using IteratorC = typename cutlass::gemm::warp::
|
| 1516 |
+
MmaTensorOpAccumulatorTileIterator<Shape_, Element_, Layout_, InstructionShape_, OpDelta_>;
|
| 1517 |
+
using FragmentC = typename IteratorC::Fragment;
|
| 1518 |
+
using InstructionShape = InstructionShape_;
|
| 1519 |
+
using WarpShape = WarpShape_;
|
| 1520 |
+
using ThreadblockShape = ThreadblockShape_;
|
| 1521 |
+
using accum_t = Element_;
|
| 1522 |
+
using lse_scalar_t = float;
|
| 1523 |
+
|
| 1524 |
+
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
|
| 1525 |
+
|
| 1526 |
+
// Iterator to load accumulators (results of matmul in registers)
|
| 1527 |
+
using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
|
| 1528 |
+
WarpShape,
|
| 1529 |
+
InstructionShape,
|
| 1530 |
+
accum_t,
|
| 1531 |
+
typename Operator::Policy::Operator::FragmentC,
|
| 1532 |
+
cutlass::layout::RowMajor>;
|
| 1533 |
+
|
| 1534 |
+
// Iterator to store to shared-memory
|
| 1535 |
+
using SmemIteratorD0 =
|
| 1536 |
+
typename cutlass::epilogue::warp::TileIteratorTensorOp<WarpShape,
|
| 1537 |
+
InstructionShape,
|
| 1538 |
+
scalar_t, // accum_t,
|
| 1539 |
+
SmemAccumulatorLayout>;
|
| 1540 |
+
using AccumulatorSharedStorage =
|
| 1541 |
+
cutlass::gemm::threadblock::AccumulatorSharedStorage<ThreadblockShape,
|
| 1542 |
+
typename SmemIteratorD0::Element,
|
| 1543 |
+
typename SmemIteratorD0::TensorLayout,
|
| 1544 |
+
typename SmemIteratorD0::Padding>;
|
| 1545 |
+
// We need to provide an operation for the epilogue. Let's create an
|
| 1546 |
+
// operation that does nothing (ScaleType::Nothing), just converts
|
| 1547 |
+
// from accum_t (float) -> scalar_t (can be half)
|
| 1548 |
+
using OutputOpNoOp = cutlass::epilogue::thread::LinearCombination<
|
| 1549 |
+
typename SmemIteratorD0::Element, // ElementOutput
|
| 1550 |
+
FragmentIteratorAccumulator::Fragment::kElements,
|
| 1551 |
+
accum_t, // ElementAccumulator
|
| 1552 |
+
typename SmemIteratorD0::Element, // ElementCompute
|
| 1553 |
+
cutlass::epilogue::thread::ScaleType::Nothing>;
|
| 1554 |
+
using Epilogue = cutlass::epilogue::threadblock::EpilogueSmemAccumulator<
|
| 1555 |
+
SmemIteratorD0,
|
| 1556 |
+
FragmentIteratorAccumulator,
|
| 1557 |
+
SmemIteratorD0, // ScaleBiasIterator
|
| 1558 |
+
// - not used
|
| 1559 |
+
OutputOpNoOp>;
|
| 1560 |
+
|
| 1561 |
+
// Epilogue 2: with LSE (for backwards pass)
|
| 1562 |
+
static int const kElementsPerAccess = 2; // TODO: Why 2?
|
| 1563 |
+
using IteratorAccumulatorLSE = cutlass::transform::threadblock::VectorIterator<
|
| 1564 |
+
cutlass::transform::threadblock::PredicatedVectorAccessIterator<
|
| 1565 |
+
// Shape
|
| 1566 |
+
cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kN>,
|
| 1567 |
+
// WarpShape
|
| 1568 |
+
cutlass::MatrixShape<WarpShape::kM, WarpShape::kN>,
|
| 1569 |
+
lse_scalar_t,
|
| 1570 |
+
cutlass::layout::RowMajor,
|
| 1571 |
+
kElementsPerAccess>>;
|
| 1572 |
+
using EpilogueOpApplyLSE = cutlass::epilogue::thread::ApplyLogSumExp<
|
| 1573 |
+
scalar_t, // ElementOutput_
|
| 1574 |
+
lse_scalar_t, // ElementLSE_
|
| 1575 |
+
accum_t, // ElementAccumulator_
|
| 1576 |
+
accum_t, // ElementCompute_
|
| 1577 |
+
128 / cutlass::sizeof_bits<scalar_t>::value
|
| 1578 |
+
// FragmentIteratorAccumulator::Fragment::kElements
|
| 1579 |
+
// InstructionShape::kM * InstructionShape::kN / 32
|
| 1580 |
+
>;
|
| 1581 |
+
using EpilogueWithLSE =
|
| 1582 |
+
cutlass::epilogue::threadblock::EpilogueSmemAccumulator<SmemIteratorD0,
|
| 1583 |
+
FragmentIteratorAccumulator,
|
| 1584 |
+
IteratorAccumulatorLSE,
|
| 1585 |
+
EpilogueOpApplyLSE>;
|
| 1586 |
+
|
| 1587 |
+
static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1588 |
+
FragmentC const& accum,
|
| 1589 |
+
int lane_id,
|
| 1590 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1591 |
+
{
|
| 1592 |
+
SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
|
| 1593 |
+
smem_iterator_attn.add_tile_offset(
|
| 1594 |
+
tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow,
|
| 1595 |
+
SmemIteratorD0::TileIterations::kColumn});
|
| 1596 |
+
Epilogue epilogue;
|
| 1597 |
+
epilogue(OutputOpNoOp({}), smem_iterator_attn, accum);
|
| 1598 |
+
}
|
| 1599 |
+
|
| 1600 |
+
static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1601 |
+
FragmentC& accum,
|
| 1602 |
+
lse_scalar_t const* lse,
|
| 1603 |
+
int32_t lse_extents,
|
| 1604 |
+
int thread_id,
|
| 1605 |
+
int warp_id,
|
| 1606 |
+
int lane_id,
|
| 1607 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1608 |
+
{
|
| 1609 |
+
constexpr int32_t kAlignLSE = 32;
|
| 1610 |
+
IteratorAccumulatorLSE iterator_lse(
|
| 1611 |
+
lse,
|
| 1612 |
+
{(int32_t)0, (int32_t)ceil_div(lse_extents, kAlignLSE) * kAlignLSE},
|
| 1613 |
+
thread_id,
|
| 1614 |
+
warp_id,
|
| 1615 |
+
cutlass::MatrixCoord{0, 0} // offset
|
| 1616 |
+
);
|
| 1617 |
+
|
| 1618 |
+
SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
|
| 1619 |
+
smem_iterator_attn.add_tile_offset(
|
| 1620 |
+
tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow,
|
| 1621 |
+
SmemIteratorD0::TileIterations::kColumn});
|
| 1622 |
+
EpilogueWithLSE epilogue;
|
| 1623 |
+
EpilogueOpApplyLSE minus_lse_exp({});
|
| 1624 |
+
epilogue(minus_lse_exp,
|
| 1625 |
+
smem_iterator_attn,
|
| 1626 |
+
accum,
|
| 1627 |
+
// scale - unused
|
| 1628 |
+
iterator_lse,
|
| 1629 |
+
// bias
|
| 1630 |
+
iterator_lse);
|
| 1631 |
+
}
|
| 1632 |
+
};
|
| 1633 |
+
|
| 1634 |
+
// Volta Specialization
|
| 1635 |
+
// only supported for f16
|
| 1636 |
+
template <typename Operator, typename WarpShape_, typename ThreadblockShape_>
|
| 1637 |
+
struct B2bGemm<cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
|
| 1638 |
+
cutlass::MatrixShape<32, 32>,
|
| 1639 |
+
float,
|
| 1640 |
+
cutlass::layout::RowMajor,
|
| 1641 |
+
cutlass::gemm::GemmShape<16, 16, 4>,
|
| 1642 |
+
cutlass::MatrixShape<1, 1>>,
|
| 1643 |
+
Operator,
|
| 1644 |
+
cutlass::half_t,
|
| 1645 |
+
WarpShape_,
|
| 1646 |
+
ThreadblockShape_> {
|
| 1647 |
+
using IteratorC = cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
|
| 1648 |
+
cutlass::MatrixShape<32, 32>,
|
| 1649 |
+
float,
|
| 1650 |
+
cutlass::layout::RowMajor,
|
| 1651 |
+
cutlass::gemm::GemmShape<16, 16, 4>,
|
| 1652 |
+
cutlass::MatrixShape<1, 1>>;
|
| 1653 |
+
using scalar_t = cutlass::half_t;
|
| 1654 |
+
using accum_t = IteratorC::Element;
|
| 1655 |
+
using WarpShape = WarpShape_;
|
| 1656 |
+
using ThreadblockShape = ThreadblockShape_;
|
| 1657 |
+
using FragmentC = IteratorC::Fragment;
|
| 1658 |
+
using lse_scalar_t = float;
|
| 1659 |
+
|
| 1660 |
+
using SmemAccumulatorLayout = cutlass::layout::RowMajor;
|
| 1661 |
+
using SmemIteratorD0 =
|
| 1662 |
+
cutlass::epilogue::warp::TileIteratorVoltaTensorOp<WarpShape,
|
| 1663 |
+
cutlass::gemm::GemmShape<32, 32, 4>,
|
| 1664 |
+
scalar_t,
|
| 1665 |
+
SmemAccumulatorLayout>;
|
| 1666 |
+
|
| 1667 |
+
// // Storage in shared-memory for Q.Kt
|
| 1668 |
+
using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage<
|
| 1669 |
+
ThreadblockShape,
|
| 1670 |
+
scalar_t,
|
| 1671 |
+
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
|
| 1672 |
+
16,
|
| 1673 |
+
32>, // typename SmemIteratorD0::TensorLayout,
|
| 1674 |
+
cutlass::MatrixShape<0, 0> // Padding
|
| 1675 |
+
>;
|
| 1676 |
+
|
| 1677 |
+
using OutputLayout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>;
|
| 1678 |
+
using TensorRef = cutlass::TensorRef<scalar_t, OutputLayout>;
|
| 1679 |
+
using Policy = typename IteratorC::Policy;
|
| 1680 |
+
using Element = accum_t;
|
| 1681 |
+
// Those are MmaVoltaTensorOpAccumulatorTileIterator private fields
|
| 1682 |
+
// Let's copy their values
|
| 1683 |
+
static int const kElementsPerPartial = 4;
|
| 1684 |
+
using EleShapePerPatial =
|
| 1685 |
+
typename cutlass::platform::conditional<cutlass::platform::is_same<Element, float>::value,
|
| 1686 |
+
cutlass::MatrixShape<2, 2>,
|
| 1687 |
+
cutlass::MatrixShape<1, 4>>::type;
|
| 1688 |
+
static int const kElementsPerMma = 8;
|
| 1689 |
+
static int const kAccumulatorPatials = 2;
|
| 1690 |
+
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
|
| 1691 |
+
|
| 1692 |
+
static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1693 |
+
FragmentC const& accum,
|
| 1694 |
+
int lane_id,
|
| 1695 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1696 |
+
{
|
| 1697 |
+
// ctor - from MmaVoltaTensorOpAccumulatorTileIterator
|
| 1698 |
+
TensorRef ref_(shared_storage.accum_ref());
|
| 1699 |
+
int quad = (lane_id >> 2);
|
| 1700 |
+
int lane_in_quad = (lane_id & 3);
|
| 1701 |
+
int accum_m, accum_n;
|
| 1702 |
+
|
| 1703 |
+
if (cutlass::platform::is_same<Element, float>::value) {
|
| 1704 |
+
// (quad[2],quad[0])+lane_in_quad[0]
|
| 1705 |
+
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
|
| 1706 |
+
// (quad[1])+lane_in_quad[1]
|
| 1707 |
+
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
|
| 1708 |
+
(lane_in_quad & 2);
|
| 1709 |
+
} else {
|
| 1710 |
+
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
|
| 1711 |
+
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
|
| 1712 |
+
}
|
| 1713 |
+
cutlass::MatrixCoord lane_offset(accum_m, accum_n);
|
| 1714 |
+
|
| 1715 |
+
// Tile offset
|
| 1716 |
+
ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow,
|
| 1717 |
+
IteratorC::Shape::kColumn}));
|
| 1718 |
+
|
| 1719 |
+
using AccessType = cutlass::Array<scalar_t, EleShapePerPatial::kColumn>;
|
| 1720 |
+
|
| 1721 |
+
// store - from MmaVoltaTensorOpAccumulatorTileIterator
|
| 1722 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1723 |
+
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
|
| 1724 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1725 |
+
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
|
| 1726 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1727 |
+
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
|
| 1728 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1729 |
+
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
|
| 1730 |
+
int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) *
|
| 1731 |
+
Policy::MmaIterations::kColumn +
|
| 1732 |
+
mma_n) *
|
| 1733 |
+
Policy::MmaIterations::kRow +
|
| 1734 |
+
mma_m) *
|
| 1735 |
+
kElementsPerMma;
|
| 1736 |
+
|
| 1737 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1738 |
+
for (int p = 0; p < kAccumulatorPatials; ++p) {
|
| 1739 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1740 |
+
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
|
| 1741 |
+
int accum_m = tile_m * Policy::InterleavedTile::kRow +
|
| 1742 |
+
mma_m * QuadShapePerPatialMma::kRow + m * 2;
|
| 1743 |
+
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
|
| 1744 |
+
mma_n * QuadShapePerPatialMma::kColumn +
|
| 1745 |
+
p * Policy::InterleavedTile::kColumn / 2;
|
| 1746 |
+
int r = (accum_m + lane_offset.row());
|
| 1747 |
+
AccessType to_store;
|
| 1748 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1749 |
+
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
|
| 1750 |
+
int idx = mma_accum_start + p * kElementsPerPartial +
|
| 1751 |
+
m * EleShapePerPatial::kColumn + n;
|
| 1752 |
+
int c = (accum_n + n + lane_offset.column());
|
| 1753 |
+
to_store[n] = scalar_t(accum[idx]);
|
| 1754 |
+
}
|
| 1755 |
+
int c = (accum_n + lane_offset.column());
|
| 1756 |
+
assert(r < 32);
|
| 1757 |
+
assert(c < 32);
|
| 1758 |
+
*reinterpret_cast<AccessType*>(ref_.data() + ref_.offset({r, c})) =
|
| 1759 |
+
to_store;
|
| 1760 |
+
}
|
| 1761 |
+
}
|
| 1762 |
+
}
|
| 1763 |
+
}
|
| 1764 |
+
}
|
| 1765 |
+
}
|
| 1766 |
+
}
|
| 1767 |
+
|
| 1768 |
+
static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1769 |
+
typename IteratorC::Fragment& accum,
|
| 1770 |
+
lse_scalar_t const* lse,
|
| 1771 |
+
int lse_extent,
|
| 1772 |
+
int thread_id,
|
| 1773 |
+
int warp_id,
|
| 1774 |
+
int lane_id,
|
| 1775 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1776 |
+
{
|
| 1777 |
+
// Non-optimized way to apply LSE to registers
|
| 1778 |
+
// NOTE: accum is attn.T
|
| 1779 |
+
// TODO: Optimize for each architecture
|
| 1780 |
+
static constexpr int WarpSize = 32;
|
| 1781 |
+
using AccumLambdaIterator =
|
| 1782 |
+
typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::Iterator;
|
| 1783 |
+
auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
|
| 1784 |
+
|
| 1785 |
+
cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
|
| 1786 |
+
lse_prefetched.clear();
|
| 1787 |
+
int rowIdx = 0;
|
| 1788 |
+
int colIdx = 0;
|
| 1789 |
+
AccumLambdaIterator::iterateRows(
|
| 1790 |
+
lane_offset,
|
| 1791 |
+
[&](int accum_m) {
|
| 1792 |
+
++rowIdx;
|
| 1793 |
+
colIdx = 0;
|
| 1794 |
+
},
|
| 1795 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 1796 |
+
if (rowIdx == 1) {
|
| 1797 |
+
lse_prefetched[colIdx] = accum_n < lse_extent
|
| 1798 |
+
? lse[accum_n]
|
| 1799 |
+
: platform::numeric_limits<accum_t>::infinity();
|
| 1800 |
+
}
|
| 1801 |
+
accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
|
| 1802 |
+
++colIdx;
|
| 1803 |
+
},
|
| 1804 |
+
[&](int accum_m) {});
|
| 1805 |
+
accumToSmem(shared_storage, accum, lane_id, tile_coords);
|
| 1806 |
+
}
|
| 1807 |
+
};
|
| 1808 |
+
|
| 1809 |
+
// Simt Specialization
|
| 1810 |
+
// for f32 on Sm70-Sm75 and f16/f32 below
|
| 1811 |
+
|
| 1812 |
+
template <typename Operator,
|
| 1813 |
+
typename OperatorPolicy,
|
| 1814 |
+
typename scalar_t,
|
| 1815 |
+
typename WarpShape_,
|
| 1816 |
+
typename ThreadblockShape_>
|
| 1817 |
+
struct B2bGemm<cutlass::gemm::warp::MmaSimtTileIterator<cutlass::MatrixShape<32, 32>,
|
| 1818 |
+
cutlass::gemm::Operand::kC,
|
| 1819 |
+
float,
|
| 1820 |
+
cutlass::layout::RowMajor,
|
| 1821 |
+
OperatorPolicy,
|
| 1822 |
+
1,
|
| 1823 |
+
1>,
|
| 1824 |
+
Operator,
|
| 1825 |
+
scalar_t,
|
| 1826 |
+
WarpShape_,
|
| 1827 |
+
ThreadblockShape_> {
|
| 1828 |
+
using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator<cutlass::MatrixShape<32, 32>,
|
| 1829 |
+
cutlass::gemm::Operand::kC,
|
| 1830 |
+
float,
|
| 1831 |
+
cutlass::layout::RowMajor,
|
| 1832 |
+
OperatorPolicy,
|
| 1833 |
+
1,
|
| 1834 |
+
1>;
|
| 1835 |
+
using accum_t = typename IteratorC::Element;
|
| 1836 |
+
using WarpShape = WarpShape_;
|
| 1837 |
+
using ThreadblockShape = ThreadblockShape_;
|
| 1838 |
+
using FragmentC = typename IteratorC::Fragment;
|
| 1839 |
+
using lse_scalar_t = float;
|
| 1840 |
+
|
| 1841 |
+
// Storage in shared-memory for Q.Kt
|
| 1842 |
+
using AccumulatorSharedStorage =
|
| 1843 |
+
cutlass::gemm::threadblock::AccumulatorSharedStorage<ThreadblockShape,
|
| 1844 |
+
scalar_t,
|
| 1845 |
+
cutlass::layout::ColumnMajor,
|
| 1846 |
+
cutlass::MatrixShape<0, 0> // Padding
|
| 1847 |
+
>;
|
| 1848 |
+
|
| 1849 |
+
static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1850 |
+
FragmentC const& accum,
|
| 1851 |
+
int lane_id,
|
| 1852 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1853 |
+
{
|
| 1854 |
+
using Policy = typename IteratorC::Policy;
|
| 1855 |
+
using Element = typename IteratorC::Element;
|
| 1856 |
+
using Iterations = typename IteratorC::Iterations;
|
| 1857 |
+
using Delta = typename IteratorC::Delta;
|
| 1858 |
+
|
| 1859 |
+
auto ref_ = shared_storage.accum_ref();
|
| 1860 |
+
// ctor - MmaSimtTileIterator
|
| 1861 |
+
// compute offset based on thread ID and lane layout
|
| 1862 |
+
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
|
| 1863 |
+
|
| 1864 |
+
MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
|
| 1865 |
+
MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
|
| 1866 |
+
|
| 1867 |
+
ref_.add_coord_offset(lane_offset);
|
| 1868 |
+
|
| 1869 |
+
// Tile offset
|
| 1870 |
+
ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow,
|
| 1871 |
+
IteratorC::Shape::kColumn}));
|
| 1872 |
+
|
| 1873 |
+
// store - MmaSimtTileIterator
|
| 1874 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1875 |
+
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
|
| 1876 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1877 |
+
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
|
| 1878 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1879 |
+
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
|
| 1880 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1881 |
+
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
|
| 1882 |
+
int r = Policy::LaneMmaShape::kM * (mma_m * Policy::WarpShape::kRow) + m;
|
| 1883 |
+
int c = mma_n * Delta::kColumn + n;
|
| 1884 |
+
int idx = n + Policy::LaneMmaShape::kN *
|
| 1885 |
+
(mma_n + Iterations::kColumn *
|
| 1886 |
+
(m + mma_m * Policy::LaneMmaShape::kM));
|
| 1887 |
+
ref_.at({r, c}) = scalar_t(accum[idx]);
|
| 1888 |
+
}
|
| 1889 |
+
}
|
| 1890 |
+
}
|
| 1891 |
+
}
|
| 1892 |
+
}
|
| 1893 |
+
|
| 1894 |
+
static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
|
| 1895 |
+
typename IteratorC::Fragment& accum,
|
| 1896 |
+
lse_scalar_t const* lse,
|
| 1897 |
+
int lse_extent,
|
| 1898 |
+
int thread_id,
|
| 1899 |
+
int warp_id,
|
| 1900 |
+
int lane_id,
|
| 1901 |
+
cutlass::MatrixCoord const& tile_coords)
|
| 1902 |
+
{
|
| 1903 |
+
// Non-optimized way to apply LSE to registers
|
| 1904 |
+
// NOTE: accum is attn.T
|
| 1905 |
+
// TODO: Optimize for each architecture
|
| 1906 |
+
static constexpr int WarpSize = 32;
|
| 1907 |
+
using AccumLambdaIterator =
|
| 1908 |
+
typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::Iterator;
|
| 1909 |
+
auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
|
| 1910 |
+
|
| 1911 |
+
cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
|
| 1912 |
+
lse_prefetched.clear();
|
| 1913 |
+
int rowIdx = 0;
|
| 1914 |
+
int colIdx = 0;
|
| 1915 |
+
AccumLambdaIterator::iterateRows(
|
| 1916 |
+
lane_offset,
|
| 1917 |
+
[&](int accum_m) {
|
| 1918 |
+
++rowIdx;
|
| 1919 |
+
colIdx = 0;
|
| 1920 |
+
},
|
| 1921 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 1922 |
+
if (rowIdx == 1) {
|
| 1923 |
+
lse_prefetched[colIdx] = accum_n < lse_extent
|
| 1924 |
+
? lse[accum_n]
|
| 1925 |
+
: platform::numeric_limits<accum_t>::infinity();
|
| 1926 |
+
}
|
| 1927 |
+
accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
|
| 1928 |
+
++colIdx;
|
| 1929 |
+
},
|
| 1930 |
+
[&](int accum_m) {});
|
| 1931 |
+
accumToSmem(shared_storage, accum, lane_id, tile_coords);
|
| 1932 |
+
}
|
| 1933 |
+
};
|
| 1934 |
+
|
| 1935 |
+
} // namespace threadblock
|
| 1936 |
+
} // namespace gemm
|
| 1937 |
+
} // namespace cutlass
|
| 1938 |
+
|
| 1939 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm_kernel_utils.h
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include <type_traits>
|
| 40 |
+
#include "cutlass/arch/mma.h"
|
| 41 |
+
|
| 42 |
+
template <typename arch, typename scalar_t>
|
| 43 |
+
struct CheckArch {
|
| 44 |
+
static constexpr bool isPreVolta = arch::kMinComputeCapability < 70;
|
| 45 |
+
static constexpr bool isPreAmpere =
|
| 46 |
+
arch::kMinComputeCapability < 80 && arch::kMinComputeCapability >= 70;
|
| 47 |
+
static constexpr bool isAmpere = arch::kMinComputeCapability >= 80;
|
| 48 |
+
#if defined(__CUDA_ARCH__)
|
| 49 |
+
static constexpr bool compiler_cc = arch::kMinComputeCapability * 10 <= __CUDA_ARCH__;
|
| 50 |
+
#else
|
| 51 |
+
static constexpr bool compiler_cc = true;
|
| 52 |
+
#endif
|
| 53 |
+
static constexpr bool value = (isPreVolta && std::is_same_v<scalar_t, float>) ||
|
| 54 |
+
(isPreAmpere && !std::is_same_v<scalar_t, cutlass::bfloat16_t>) ||
|
| 55 |
+
isAmpere && compiler_cc;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
#define DISPATCH_ARCHTAG(CC, func) \
|
| 59 |
+
{ \
|
| 60 |
+
if constexpr (GPU_ARCH >= 80) { \
|
| 61 |
+
if (CC >= 80) { \
|
| 62 |
+
using ArchTag = cutlass::arch::Sm80; \
|
| 63 |
+
func; \
|
| 64 |
+
} else { \
|
| 65 |
+
EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \
|
| 66 |
+
} \
|
| 67 |
+
} else if constexpr (GPU_ARCH >= 75) { \
|
| 68 |
+
if (CC >= 75) { \
|
| 69 |
+
using ArchTag = cutlass::arch::Sm75; \
|
| 70 |
+
func; \
|
| 71 |
+
} else { \
|
| 72 |
+
EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \
|
| 73 |
+
} \
|
| 74 |
+
} else if constexpr (GPU_ARCH >= 70) { \
|
| 75 |
+
if (CC >= 70) { \
|
| 76 |
+
using ArchTag = cutlass::arch::Sm70; \
|
| 77 |
+
func; \
|
| 78 |
+
} else { \
|
| 79 |
+
EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \
|
| 80 |
+
} \
|
| 81 |
+
} else { \
|
| 82 |
+
EVOFORMER_CHECK(false, "Only GPUs with Tensor Core are supported for now"); \
|
| 83 |
+
} \
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
#define DISPATCH_TYPES(tensor, func) \
|
| 87 |
+
{ \
|
| 88 |
+
if (tensor.scalar_type() == at::ScalarType::Half) { \
|
| 89 |
+
using scalar_t = cutlass::half_t; \
|
| 90 |
+
using torch_scalar_t = at::Half; \
|
| 91 |
+
func; \
|
| 92 |
+
} else if (tensor.scalar_type() == at::ScalarType::BFloat16) { \
|
| 93 |
+
using scalar_t = cutlass::bfloat16_t; \
|
| 94 |
+
using torch_scalar_t = at::BFloat16; \
|
| 95 |
+
func; \
|
| 96 |
+
} else { \
|
| 97 |
+
EVOFORMER_CHECK(false, "Only fp16 and bf16 supported at the moment"); \
|
| 98 |
+
} \
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \
|
| 102 |
+
{ \
|
| 103 |
+
if (BOOL_V) { \
|
| 104 |
+
constexpr bool BOOL_NAME = true; \
|
| 105 |
+
F(); \
|
| 106 |
+
} else { \
|
| 107 |
+
constexpr bool BOOL_NAME = false; \
|
| 108 |
+
F(); \
|
| 109 |
+
} \
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
#ifdef TORCH_CHECK
|
| 113 |
+
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
|
| 114 |
+
EVOFORMER_CHECK(uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned")
|
| 115 |
+
#define EVOFORMER_CHECK TORCH_CHECK
|
| 116 |
+
#elif defined(__CUDACC_RTC__)
|
| 117 |
+
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
|
| 118 |
+
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { return false; }
|
| 119 |
+
#define EVOFORMER_CHECK(COND, ERR) \
|
| 120 |
+
if (!(COND)) { return false; }
|
| 121 |
+
#else
|
| 122 |
+
#include <iostream>
|
| 123 |
+
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
|
| 124 |
+
if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \
|
| 125 |
+
std::cerr << #PTR " is not correctly aligned\n"; \
|
| 126 |
+
return false; \
|
| 127 |
+
}
|
| 128 |
+
#define EVOFORMER_CHECK(COND, ERR) \
|
| 129 |
+
if (!(COND)) { \
|
| 130 |
+
std::cerr << "[Evoformer Attention]" \
|
| 131 |
+
<< "'" #COND "' failed: " << ERR << "\n"; \
|
| 132 |
+
return false; \
|
| 133 |
+
}
|
| 134 |
+
#endif
|
| 135 |
+
|
| 136 |
+
namespace gemm_kernel_utils {
|
| 137 |
+
|
| 138 |
+
template <typename integer>
|
| 139 |
+
constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m)
|
| 140 |
+
{
|
| 141 |
+
return (n + m - 1) / m;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
template <typename integer>
|
| 145 |
+
constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m)
|
| 146 |
+
{
|
| 147 |
+
return ((n + m - 1) / m) * m;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 151 |
+
// Determine the type of GEMM we do (TensorCores or not, Shapes ...)
|
| 152 |
+
// TODO: Maybe we could rely on Cutlass's DefaultGemm templates
|
| 153 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 154 |
+
|
| 155 |
+
// Fallback to Simt (FMA on cuda cores) if not in a special case below
|
| 156 |
+
template <typename ArchTag, typename scalar_t_, typename Enable = void>
|
| 157 |
+
struct DefaultGemmType {
|
| 158 |
+
static constexpr int ThreadK = 8;
|
| 159 |
+
static constexpr int WarpK = 8;
|
| 160 |
+
static constexpr int kMinimumAlignment = 1;
|
| 161 |
+
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
|
| 162 |
+
using OpClass = cutlass::arch::OpClassSimt;
|
| 163 |
+
using Operator = cutlass::arch::OpMultiplyAdd;
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
// Specialization for tensorcores with f32
|
| 167 |
+
template <typename ArchTag>
|
| 168 |
+
struct DefaultGemmType<
|
| 169 |
+
ArchTag,
|
| 170 |
+
float,
|
| 171 |
+
typename cutlass::platform::enable_if<ArchTag::kMinComputeCapability >= 80>::type> {
|
| 172 |
+
static constexpr int ThreadK = 32;
|
| 173 |
+
static constexpr int WarpK = 32;
|
| 174 |
+
static constexpr int kMinimumAlignment = 4;
|
| 175 |
+
using OpClass = cutlass::arch::OpClassTensorOp;
|
| 176 |
+
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
|
| 177 |
+
using Operator = cutlass::arch::OpMultiplyAddFastF32;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
// Specialization for tensorcores with f16/bf16 - Sm75+
|
| 181 |
+
template <typename ArchTag, typename scalar_t>
|
| 182 |
+
struct DefaultGemmType<
|
| 183 |
+
ArchTag,
|
| 184 |
+
scalar_t,
|
| 185 |
+
typename cutlass::platform::enable_if<ArchTag::kMinComputeCapability >= 75 &&
|
| 186 |
+
cutlass::sizeof_bits<scalar_t>::value == 16>::type> {
|
| 187 |
+
static constexpr int ThreadK = 32;
|
| 188 |
+
static constexpr int WarpK = 32;
|
| 189 |
+
static constexpr int kMinimumAlignment = 4;
|
| 190 |
+
using OpClass = cutlass::arch::OpClassTensorOp;
|
| 191 |
+
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
|
| 192 |
+
using Operator = cutlass::arch::OpMultiplyAdd;
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
// Specialization for tensorcores with f16 - Volta
|
| 196 |
+
template <>
|
| 197 |
+
struct DefaultGemmType<cutlass::arch::Sm70, cutlass::half_t, void> {
|
| 198 |
+
static constexpr int ThreadK = 32;
|
| 199 |
+
static constexpr int WarpK = 32;
|
| 200 |
+
static constexpr int kMinimumAlignment = 2;
|
| 201 |
+
using OpClass = cutlass::arch::OpClassTensorOp;
|
| 202 |
+
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
|
| 203 |
+
using Operator = cutlass::arch::OpMultiplyAdd;
|
| 204 |
+
};
|
| 205 |
+
|
| 206 |
+
// Enables to do
|
| 207 |
+
// `auto x = kCondition ? fa(arg) : fb(arg)`
|
| 208 |
+
// when `fa` and `fb` have different types
|
| 209 |
+
template <bool kVal, typename TA, typename TB>
|
| 210 |
+
struct call_conditional;
|
| 211 |
+
|
| 212 |
+
template <typename TA, typename TB>
|
| 213 |
+
struct call_conditional<true, TA, TB> {
|
| 214 |
+
template <typename Arg>
|
| 215 |
+
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg) -> decltype(ta(arg))
|
| 216 |
+
{
|
| 217 |
+
return ta(arg);
|
| 218 |
+
}
|
| 219 |
+
};
|
| 220 |
+
|
| 221 |
+
template <typename TA, typename TB>
|
| 222 |
+
struct call_conditional<false, TA, TB> {
|
| 223 |
+
template <typename Arg>
|
| 224 |
+
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg) -> decltype(tb(arg))
|
| 225 |
+
{
|
| 226 |
+
return tb(arg);
|
| 227 |
+
}
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 231 |
+
// Mark a variable as warp-uniform - enables some compiler optimizations
|
| 232 |
+
// The cheapest way to do it is just to broadcast it from lane 0
|
| 233 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 234 |
+
|
| 235 |
+
CUTLASS_DEVICE int32_t warp_uniform(int32_t value)
|
| 236 |
+
{
|
| 237 |
+
return (int32_t)__shfl_sync(0xffffffff, (unsigned)value, 0);
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
template <typename T>
|
| 241 |
+
CUTLASS_DEVICE T* warp_uniform(T* ptr)
|
| 242 |
+
{
|
| 243 |
+
struct {
|
| 244 |
+
union {
|
| 245 |
+
T* ptr;
|
| 246 |
+
uint32_t asInt[2];
|
| 247 |
+
};
|
| 248 |
+
} p;
|
| 249 |
+
p.ptr = ptr;
|
| 250 |
+
p.asInt[0] = warp_uniform(p.asInt[0]);
|
| 251 |
+
p.asInt[1] = warp_uniform(p.asInt[1]);
|
| 252 |
+
return p.ptr;
|
| 253 |
+
}
|
| 254 |
+
} // namespace gemm_kernel_utils
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h
ADDED
|
@@ -0,0 +1,691 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Epilogue iterator that supports prefetching
|
| 40 |
+
|
| 41 |
+
Mostly copied from "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
|
| 42 |
+
*/
|
| 43 |
+
|
| 44 |
+
#pragma once
|
| 45 |
+
|
| 46 |
+
#include "cutlass/arch/arch.h"
|
| 47 |
+
#include "cutlass/arch/memory.h"
|
| 48 |
+
#include "cutlass/array.h"
|
| 49 |
+
#include "cutlass/cutlass.h"
|
| 50 |
+
#include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
|
| 51 |
+
#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
|
| 52 |
+
#include "cutlass/layout/matrix.h"
|
| 53 |
+
#include "cutlass/layout/tensor.h"
|
| 54 |
+
#include "cutlass/matrix_shape.h"
|
| 55 |
+
#include "cutlass/numeric_types.h"
|
| 56 |
+
#include "cutlass/tensor_ref.h"
|
| 57 |
+
#include "cutlass/transform/pitch_linear_thread_map.h"
|
| 58 |
+
|
| 59 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 60 |
+
|
| 61 |
+
namespace cutlass {
|
| 62 |
+
|
| 63 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 64 |
+
|
| 65 |
+
namespace epilogue {
|
| 66 |
+
namespace threadblock {
|
| 67 |
+
|
| 68 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 69 |
+
|
| 70 |
+
/// Tile iterator used to load and store output tile from global memory in
|
| 71 |
+
/// epilogue.
|
| 72 |
+
///
|
| 73 |
+
/// Satisfies: ReadableTileIterator | PredicatedTileIterator |
|
| 74 |
+
/// ForwardTileIterator
|
| 75 |
+
///
|
| 76 |
+
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
|
| 77 |
+
typename Element_, ///< Element data type
|
| 78 |
+
bool ScatterD = false, ///< Scatter D operand or not
|
| 79 |
+
bool UseCUDAStore = false>
|
| 80 |
+
class PredicatedTileIteratorPrefetch {
|
| 81 |
+
public:
|
| 82 |
+
using ThreadMap = ThreadMap_;
|
| 83 |
+
using Shape = typename ThreadMap::Shape;
|
| 84 |
+
|
| 85 |
+
using Element = Element_;
|
| 86 |
+
|
| 87 |
+
using Layout = layout::RowMajor;
|
| 88 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 89 |
+
using ConstTensorRef = typename TensorRef::ConstTensorRef;
|
| 90 |
+
|
| 91 |
+
using Index = typename Layout::Index;
|
| 92 |
+
using LongIndex = typename Layout::LongIndex;
|
| 93 |
+
using TensorCoord = MatrixCoord;
|
| 94 |
+
|
| 95 |
+
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
|
| 96 |
+
static int const kThreads = ThreadMap::kThreads;
|
| 97 |
+
static int const kIterations = ThreadMap::Count::kTile;
|
| 98 |
+
|
| 99 |
+
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
|
| 100 |
+
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
|
| 101 |
+
static_assert(ThreadMap::Iterations::kCluster > 0,
|
| 102 |
+
"ThreadMap::Iterations::kCluster must be > 0");
|
| 103 |
+
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
|
| 104 |
+
|
| 105 |
+
/// Fragment object
|
| 106 |
+
using Fragment = Array<Element,
|
| 107 |
+
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
|
| 108 |
+
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
|
| 109 |
+
ThreadMap::kElementsPerAccess>;
|
| 110 |
+
|
| 111 |
+
/// Memory access size
|
| 112 |
+
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
|
| 113 |
+
|
| 114 |
+
//
|
| 115 |
+
// Parameters struct
|
| 116 |
+
//
|
| 117 |
+
|
| 118 |
+
/// Uses a non-template class
|
| 119 |
+
struct Params : PredicatedTileIteratorParams {
|
| 120 |
+
using Base = PredicatedTileIteratorParams;
|
| 121 |
+
|
| 122 |
+
CUTLASS_HOST_DEVICE
|
| 123 |
+
Params() {}
|
| 124 |
+
|
| 125 |
+
CUTLASS_HOST_DEVICE
|
| 126 |
+
Params(Layout const& layout)
|
| 127 |
+
: PredicatedTileIteratorParams(
|
| 128 |
+
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
|
| 129 |
+
make_OutputTileThreadMapDesc<ThreadMap>())
|
| 130 |
+
{
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
CUTLASS_HOST_DEVICE
|
| 134 |
+
Params(Base const& base) : Base(base) {}
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
/// Mask object
|
| 138 |
+
struct Mask {
|
| 139 |
+
static int const kCount = ThreadMap::Iterations::kColumn;
|
| 140 |
+
|
| 141 |
+
/// Predicate state
|
| 142 |
+
bool predicates[kCount];
|
| 143 |
+
|
| 144 |
+
//
|
| 145 |
+
// Mask
|
| 146 |
+
//
|
| 147 |
+
CUTLASS_HOST_DEVICE
|
| 148 |
+
Mask() { enable(); }
|
| 149 |
+
|
| 150 |
+
///< Efficiently disables all accesses guarded by mask
|
| 151 |
+
CUTLASS_HOST_DEVICE void clear()
|
| 152 |
+
{
|
| 153 |
+
CUTLASS_PRAGMA_UNROLL
|
| 154 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
|
| 158 |
+
CUTLASS_DEVICE void enable()
|
| 159 |
+
{
|
| 160 |
+
CUTLASS_PRAGMA_UNROLL
|
| 161 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
|
| 162 |
+
}
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
private:
|
| 166 |
+
//
|
| 167 |
+
// Data members
|
| 168 |
+
//
|
| 169 |
+
|
| 170 |
+
/// Parameters structure containing reference and precomputed state.
|
| 171 |
+
PredicatedTileIteratorParams params_;
|
| 172 |
+
|
| 173 |
+
/// Byte-level pointer
|
| 174 |
+
uint8_t* byte_pointer_;
|
| 175 |
+
|
| 176 |
+
/// Array of boolean values to contain steady-state predicates
|
| 177 |
+
Mask mask_;
|
| 178 |
+
|
| 179 |
+
/// Extent of the matrix tile in rows
|
| 180 |
+
Index extent_row_;
|
| 181 |
+
|
| 182 |
+
/// Extent of the matrix tile in rows
|
| 183 |
+
Index extent_column_;
|
| 184 |
+
|
| 185 |
+
/// A thread's starting row position (assuming steady-state predicates have
|
| 186 |
+
/// been computed)
|
| 187 |
+
Index thread_start_row_;
|
| 188 |
+
|
| 189 |
+
/// A thread's starting column
|
| 190 |
+
Index thread_start_column_;
|
| 191 |
+
|
| 192 |
+
/// Internal state counter
|
| 193 |
+
int state_[3];
|
| 194 |
+
|
| 195 |
+
/// Scatter indices
|
| 196 |
+
int const* indices_;
|
| 197 |
+
|
| 198 |
+
//
|
| 199 |
+
// Static asserts about internal strides
|
| 200 |
+
//
|
| 201 |
+
|
| 202 |
+
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
|
| 203 |
+
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
|
| 204 |
+
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
|
| 205 |
+
|
| 206 |
+
private:
|
| 207 |
+
//
|
| 208 |
+
// Methods
|
| 209 |
+
//
|
| 210 |
+
|
| 211 |
+
public:
|
| 212 |
+
//
|
| 213 |
+
// Methods
|
| 214 |
+
//
|
| 215 |
+
|
| 216 |
+
/// Constructor
|
| 217 |
+
CUTLASS_DEVICE
|
| 218 |
+
PredicatedTileIteratorPrefetch(PredicatedTileIteratorParams const& params,
|
| 219 |
+
Element* pointer,
|
| 220 |
+
TensorCoord extent,
|
| 221 |
+
int thread_idx,
|
| 222 |
+
TensorCoord threadblock_offset = TensorCoord(),
|
| 223 |
+
int const* indices = nullptr)
|
| 224 |
+
: params_(params), indices_(indices)
|
| 225 |
+
{
|
| 226 |
+
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
|
| 227 |
+
|
| 228 |
+
extent_row_ = extent.row();
|
| 229 |
+
extent_column_ = extent.column();
|
| 230 |
+
|
| 231 |
+
thread_start_row_ = thread_offset.row();
|
| 232 |
+
thread_start_column_ = thread_offset.column();
|
| 233 |
+
|
| 234 |
+
// Initialize predicates
|
| 235 |
+
CUTLASS_PRAGMA_UNROLL
|
| 236 |
+
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
|
| 237 |
+
mask_.predicates[c] =
|
| 238 |
+
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
// Null pointer performs no accesses
|
| 242 |
+
if (!pointer) { mask_.clear(); }
|
| 243 |
+
|
| 244 |
+
if (ScatterD && !indices) { mask_.clear(); }
|
| 245 |
+
|
| 246 |
+
// Initialize pointer
|
| 247 |
+
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
|
| 248 |
+
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
|
| 249 |
+
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
|
| 250 |
+
|
| 251 |
+
if (ScatterD) {
|
| 252 |
+
byte_pointer_ =
|
| 253 |
+
reinterpret_cast<uint8_t*>(pointer) +
|
| 254 |
+
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
// Initialize internal state counter
|
| 258 |
+
state_[0] = state_[1] = state_[2] = 0;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
/// Adds a pointer offset in units of Element
|
| 262 |
+
CUTLASS_HOST_DEVICE
|
| 263 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 264 |
+
{
|
| 265 |
+
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
CUTLASS_DEVICE
|
| 269 |
+
void prefetch_all()
|
| 270 |
+
{
|
| 271 |
+
CUTLASS_PRAGMA_UNROLL
|
| 272 |
+
for (int iter = 0; iter < kIterations; ++iter) {
|
| 273 |
+
prefetch();
|
| 274 |
+
++(*this);
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
CUTLASS_DEVICE
|
| 279 |
+
void prefetch()
|
| 280 |
+
{
|
| 281 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 282 |
+
|
| 283 |
+
CUTLASS_PRAGMA_UNROLL
|
| 284 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 285 |
+
CUTLASS_PRAGMA_UNROLL
|
| 286 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 287 |
+
CUTLASS_PRAGMA_UNROLL
|
| 288 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 289 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 290 |
+
group * ThreadMap::Delta::kGroup +
|
| 291 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 292 |
+
|
| 293 |
+
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer);
|
| 294 |
+
|
| 295 |
+
CUTLASS_PRAGMA_UNROLL
|
| 296 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 297 |
+
// on windows using unsigned long here gives the error
|
| 298 |
+
// error: asm operand type size(4) does not match
|
| 299 |
+
// type/size implied by constraint 'l'
|
| 300 |
+
uint64_t addr =
|
| 301 |
+
(uint64_t)((void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 302 |
+
kElementsPerAccess]);
|
| 303 |
+
asm volatile("prefetch.global.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 307 |
+
if (!ScatterD) { byte_pointer += params_.increment_row; }
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 312 |
+
byte_pointer += params_.increment_group;
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 317 |
+
byte_pointer += params_.increment_cluster;
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
/// Loads a fragment from memory
|
| 323 |
+
CUTLASS_DEVICE
|
| 324 |
+
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
|
| 325 |
+
{
|
| 326 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 327 |
+
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 328 |
+
|
| 329 |
+
CUTLASS_PRAGMA_UNROLL
|
| 330 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 331 |
+
CUTLASS_PRAGMA_UNROLL
|
| 332 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 333 |
+
CUTLASS_PRAGMA_UNROLL
|
| 334 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 335 |
+
int frag_row_idx =
|
| 336 |
+
(row + ThreadMap::Iterations::kRow *
|
| 337 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 338 |
+
|
| 339 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 340 |
+
group * ThreadMap::Delta::kGroup +
|
| 341 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 342 |
+
|
| 343 |
+
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
|
| 344 |
+
|
| 345 |
+
AccessType* memory_pointer =
|
| 346 |
+
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
|
| 347 |
+
|
| 348 |
+
if (ScatterD && row_guard) {
|
| 349 |
+
assert(indices_);
|
| 350 |
+
|
| 351 |
+
memory_pointer = reinterpret_cast<AccessType*>(
|
| 352 |
+
byte_pointer + byte_offset +
|
| 353 |
+
LongIndex(indices_[row_offset + thread_start_row_]) *
|
| 354 |
+
LongIndex(params_.stride));
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
CUTLASS_PRAGMA_UNROLL
|
| 358 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 359 |
+
bool guard = row_guard && mask_.predicates[column];
|
| 360 |
+
|
| 361 |
+
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
|
| 362 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 363 |
+
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 364 |
+
kElementsPerAccess],
|
| 365 |
+
guard);
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 369 |
+
if (!ScatterD) { byte_pointer += params_.increment_row; }
|
| 370 |
+
}
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 374 |
+
byte_pointer += params_.increment_group;
|
| 375 |
+
}
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 379 |
+
byte_pointer += params_.increment_cluster;
|
| 380 |
+
}
|
| 381 |
+
}
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
/// Loads a fragment from memory
|
| 385 |
+
CUTLASS_DEVICE
|
| 386 |
+
void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
|
| 387 |
+
|
| 388 |
+
/// Stores a fragment to memory
|
| 389 |
+
CUTLASS_DEVICE
|
| 390 |
+
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
|
| 391 |
+
{
|
| 392 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 393 |
+
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
|
| 394 |
+
|
| 395 |
+
CUTLASS_PRAGMA_UNROLL
|
| 396 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 397 |
+
CUTLASS_PRAGMA_UNROLL
|
| 398 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 399 |
+
CUTLASS_PRAGMA_UNROLL
|
| 400 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 401 |
+
int frag_row_idx =
|
| 402 |
+
(row + ThreadMap::Iterations::kRow *
|
| 403 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 404 |
+
|
| 405 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 406 |
+
group * ThreadMap::Delta::kGroup +
|
| 407 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 408 |
+
|
| 409 |
+
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
|
| 410 |
+
|
| 411 |
+
AccessType* memory_pointer =
|
| 412 |
+
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
|
| 413 |
+
|
| 414 |
+
if (ScatterD && row_guard) {
|
| 415 |
+
assert(indices_);
|
| 416 |
+
|
| 417 |
+
memory_pointer = reinterpret_cast<AccessType*>(
|
| 418 |
+
byte_pointer + byte_offset +
|
| 419 |
+
LongIndex(indices_[row_offset + thread_start_row_]) *
|
| 420 |
+
LongIndex(params_.stride));
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
CUTLASS_PRAGMA_UNROLL
|
| 424 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 425 |
+
bool guard = row_guard && mask_.predicates[column];
|
| 426 |
+
|
| 427 |
+
if (UseCUDAStore) {
|
| 428 |
+
if (guard) {
|
| 429 |
+
memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 430 |
+
kElementsPerAccess] =
|
| 431 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
|
| 432 |
+
column];
|
| 433 |
+
}
|
| 434 |
+
} else {
|
| 435 |
+
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
|
| 436 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 437 |
+
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 438 |
+
kElementsPerAccess],
|
| 439 |
+
guard);
|
| 440 |
+
}
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 444 |
+
if (!ScatterD) { byte_pointer += params_.increment_row; }
|
| 445 |
+
}
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 449 |
+
byte_pointer += params_.increment_group;
|
| 450 |
+
}
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 454 |
+
byte_pointer += params_.increment_cluster;
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
/// Stores a fragment to memory
|
| 460 |
+
CUTLASS_DEVICE
|
| 461 |
+
void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
|
| 462 |
+
|
| 463 |
+
/// Loads a fragment from memory
|
| 464 |
+
CUTLASS_DEVICE
|
| 465 |
+
void downsample_load_with_byte_offset(Fragment& frag,
|
| 466 |
+
int64_t byte_offset,
|
| 467 |
+
int convolution_P,
|
| 468 |
+
int convolution_Q,
|
| 469 |
+
int add_P,
|
| 470 |
+
int add_Q,
|
| 471 |
+
int problem_N) const
|
| 472 |
+
{
|
| 473 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 474 |
+
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 475 |
+
|
| 476 |
+
CUTLASS_PRAGMA_UNROLL
|
| 477 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 478 |
+
CUTLASS_PRAGMA_UNROLL
|
| 479 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 480 |
+
CUTLASS_PRAGMA_UNROLL
|
| 481 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 482 |
+
int frag_row_idx =
|
| 483 |
+
(row + ThreadMap::Iterations::kRow *
|
| 484 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 485 |
+
|
| 486 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 487 |
+
group * ThreadMap::Delta::kGroup +
|
| 488 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 489 |
+
|
| 490 |
+
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
|
| 491 |
+
|
| 492 |
+
int output_row = row_offset + thread_start_row_;
|
| 493 |
+
int output_N = output_row / (convolution_P * convolution_Q);
|
| 494 |
+
int output_PQ = output_row % (convolution_P * convolution_Q);
|
| 495 |
+
int output_P = output_PQ / convolution_Q;
|
| 496 |
+
int output_Q = output_PQ % convolution_Q;
|
| 497 |
+
|
| 498 |
+
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
|
| 499 |
+
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q +
|
| 500 |
+
add_Q;
|
| 501 |
+
|
| 502 |
+
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
|
| 503 |
+
|
| 504 |
+
AccessType* memory_pointer =
|
| 505 |
+
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
|
| 506 |
+
|
| 507 |
+
CUTLASS_PRAGMA_UNROLL
|
| 508 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 509 |
+
bool guard = row_guard && mask_.predicates[column];
|
| 510 |
+
|
| 511 |
+
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
|
| 512 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 513 |
+
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 514 |
+
kElementsPerAccess],
|
| 515 |
+
guard);
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 519 |
+
byte_pointer += params_.increment_row;
|
| 520 |
+
}
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 524 |
+
byte_pointer += params_.increment_group;
|
| 525 |
+
}
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 529 |
+
byte_pointer += params_.increment_cluster;
|
| 530 |
+
}
|
| 531 |
+
}
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
/// Loads a fragment from memory
|
| 535 |
+
CUTLASS_DEVICE
|
| 536 |
+
void upsample_load_with_byte_offset(Fragment& frag,
|
| 537 |
+
int64_t byte_offset,
|
| 538 |
+
int convolution_P,
|
| 539 |
+
int convolution_Q,
|
| 540 |
+
int add_P,
|
| 541 |
+
int add_Q,
|
| 542 |
+
int problem_N) const
|
| 543 |
+
{
|
| 544 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 545 |
+
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 546 |
+
|
| 547 |
+
CUTLASS_PRAGMA_UNROLL
|
| 548 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 549 |
+
CUTLASS_PRAGMA_UNROLL
|
| 550 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 551 |
+
CUTLASS_PRAGMA_UNROLL
|
| 552 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 553 |
+
int frag_row_idx =
|
| 554 |
+
(row + ThreadMap::Iterations::kRow *
|
| 555 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 556 |
+
|
| 557 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 558 |
+
group * ThreadMap::Delta::kGroup +
|
| 559 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 560 |
+
|
| 561 |
+
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
|
| 562 |
+
|
| 563 |
+
int output_row = row_offset + thread_start_row_;
|
| 564 |
+
int output_N = output_row / (convolution_P * convolution_Q);
|
| 565 |
+
int output_PQ = output_row % (convolution_P * convolution_Q);
|
| 566 |
+
int output_P = output_PQ / convolution_Q;
|
| 567 |
+
int output_Q = output_PQ % convolution_Q;
|
| 568 |
+
int row_add_P = add_P;
|
| 569 |
+
int row_add_Q = add_Q;
|
| 570 |
+
if (output_P > convolution_P - 2) row_add_P = 0;
|
| 571 |
+
if (output_Q > convolution_Q - 2) row_add_Q = 0;
|
| 572 |
+
|
| 573 |
+
int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
|
| 574 |
+
((output_P + row_add_P) / 2) * (convolution_Q / 2) +
|
| 575 |
+
(output_Q + row_add_Q) / 2;
|
| 576 |
+
|
| 577 |
+
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
|
| 578 |
+
|
| 579 |
+
AccessType* memory_pointer =
|
| 580 |
+
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
|
| 581 |
+
|
| 582 |
+
CUTLASS_PRAGMA_UNROLL
|
| 583 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 584 |
+
bool guard = row_guard && mask_.predicates[column];
|
| 585 |
+
|
| 586 |
+
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
|
| 587 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 588 |
+
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
|
| 589 |
+
kElementsPerAccess],
|
| 590 |
+
guard);
|
| 591 |
+
}
|
| 592 |
+
|
| 593 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 594 |
+
byte_pointer += params_.increment_row;
|
| 595 |
+
}
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 599 |
+
byte_pointer += params_.increment_group;
|
| 600 |
+
}
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 604 |
+
byte_pointer += params_.increment_cluster;
|
| 605 |
+
}
|
| 606 |
+
}
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
CUTLASS_DEVICE
|
| 610 |
+
MatrixCoord thread_start() const
|
| 611 |
+
{
|
| 612 |
+
return MatrixCoord(thread_start_row_, thread_start_column_);
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
/// Need to get the thread start row from the tile iterator
|
| 616 |
+
CUTLASS_DEVICE
|
| 617 |
+
int32_t thread_start_row() const { return thread_start_row_; }
|
| 618 |
+
|
| 619 |
+
/// Need to get the thread start row from the tile iterator
|
| 620 |
+
CUTLASS_DEVICE
|
| 621 |
+
int32_t thread_start_column() const { return thread_start_column_; }
|
| 622 |
+
|
| 623 |
+
/// Extent of the matrix in rows
|
| 624 |
+
CUTLASS_DEVICE
|
| 625 |
+
Index extent_row() const { return extent_row_; }
|
| 626 |
+
|
| 627 |
+
/// Extent of the matrix in columns
|
| 628 |
+
CUTLASS_DEVICE
|
| 629 |
+
Index extent_column() const { return extent_column_; }
|
| 630 |
+
|
| 631 |
+
/// Advances to the next position to load or store
|
| 632 |
+
CUTLASS_HOST_DEVICE
|
| 633 |
+
PredicatedTileIteratorPrefetch& operator++()
|
| 634 |
+
{
|
| 635 |
+
++state_[0];
|
| 636 |
+
|
| 637 |
+
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
|
| 638 |
+
|
| 639 |
+
thread_start_row_ += ThreadMap::Shape::kRow;
|
| 640 |
+
|
| 641 |
+
if (state_[0] == ThreadMap::Count::kRow) {
|
| 642 |
+
state_[0] = 0;
|
| 643 |
+
++state_[1];
|
| 644 |
+
byte_pointer_ += params_.advance_group;
|
| 645 |
+
|
| 646 |
+
thread_start_row_ +=
|
| 647 |
+
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
|
| 648 |
+
|
| 649 |
+
if (state_[1] == ThreadMap::Count::kGroup) {
|
| 650 |
+
state_[1] = 0;
|
| 651 |
+
++state_[2];
|
| 652 |
+
byte_pointer_ += params_.advance_cluster;
|
| 653 |
+
|
| 654 |
+
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
|
| 655 |
+
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
|
| 656 |
+
|
| 657 |
+
if (state_[2] == ThreadMap::Count::kCluster) {
|
| 658 |
+
state_[2] = 0;
|
| 659 |
+
byte_pointer_ += params_.advance_tile;
|
| 660 |
+
}
|
| 661 |
+
}
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
return *this;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
///< Efficiently disables all accesses guarded by mask
|
| 668 |
+
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
|
| 669 |
+
|
| 670 |
+
///< Efficiently enables all accesses guarded by mask
|
| 671 |
+
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
|
| 672 |
+
|
| 673 |
+
///< Sets the mask
|
| 674 |
+
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
|
| 675 |
+
|
| 676 |
+
///< Sets the mask
|
| 677 |
+
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
|
| 678 |
+
};
|
| 679 |
+
|
| 680 |
+
template <typename IT>
|
| 681 |
+
struct MakePrefetchableIterator {
|
| 682 |
+
using Iterator = PredicatedTileIteratorPrefetch<typename IT::ThreadMap, typename IT::Element>;
|
| 683 |
+
};
|
| 684 |
+
|
| 685 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 686 |
+
|
| 687 |
+
} // namespace threadblock
|
| 688 |
+
} // namespace epilogue
|
| 689 |
+
} // namespace cutlass
|
| 690 |
+
|
| 691 |
+
////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include "predicated_tile_access_iterator_residual_last.h"
|
| 40 |
+
#include "predicated_tile_iterator_residual_last.h"
|
| 41 |
+
|
| 42 |
+
namespace cutlass {
|
| 43 |
+
namespace transform {
|
| 44 |
+
namespace threadblock {
|
| 45 |
+
|
| 46 |
+
template <typename BaseIterator>
|
| 47 |
+
struct MakeIteratorResidualLast;
|
| 48 |
+
|
| 49 |
+
template <typename Shape,
|
| 50 |
+
typename Element,
|
| 51 |
+
typename Layout,
|
| 52 |
+
int AdvanceRank,
|
| 53 |
+
typename ThreadMap,
|
| 54 |
+
int AccessSize,
|
| 55 |
+
bool Gather>
|
| 56 |
+
struct MakeIteratorResidualLast<
|
| 57 |
+
PredicatedTileIterator<Shape, Element, Layout, AdvanceRank, ThreadMap, AccessSize, Gather>> {
|
| 58 |
+
using Iterator = PredicatedTileIteratorResidualLast<Shape,
|
| 59 |
+
Element,
|
| 60 |
+
Layout,
|
| 61 |
+
AdvanceRank,
|
| 62 |
+
ThreadMap,
|
| 63 |
+
AccessSize,
|
| 64 |
+
Gather>;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
template <typename Shape,
|
| 68 |
+
typename Element,
|
| 69 |
+
typename Layout,
|
| 70 |
+
int AdvanceRank,
|
| 71 |
+
typename ThreadMap,
|
| 72 |
+
typename AccessType,
|
| 73 |
+
bool Gather>
|
| 74 |
+
struct MakeIteratorResidualLast<PredicatedTileAccessIterator<Shape,
|
| 75 |
+
Element,
|
| 76 |
+
Layout,
|
| 77 |
+
AdvanceRank,
|
| 78 |
+
ThreadMap,
|
| 79 |
+
AccessType,
|
| 80 |
+
Gather>> {
|
| 81 |
+
using Iterator = PredicatedTileAccessIteratorResidualLast<Shape,
|
| 82 |
+
Element,
|
| 83 |
+
Layout,
|
| 84 |
+
AdvanceRank,
|
| 85 |
+
ThreadMap,
|
| 86 |
+
AccessType,
|
| 87 |
+
Gather>;
|
| 88 |
+
};
|
| 89 |
+
} // namespace threadblock
|
| 90 |
+
} // namespace transform
|
| 91 |
+
} // namespace cutlass
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h
ADDED
|
@@ -0,0 +1,1964 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Templates calculating the address and predicates to the load of tiles
|
| 40 |
+
from pitch-linear rank=2 tensors.
|
| 41 |
+
|
| 42 |
+
This iterator uses masks to guard out-of-bounds accesses. The first tile
|
| 43 |
+
this iterator visits maybe partial, then the remaining tiles are complete.
|
| 44 |
+
So, we only need to compute the predicates twice, once before the first tile
|
| 45 |
+
and once for the remaining full tiles which can share the same predicates.
|
| 46 |
+
|
| 47 |
+
A precomputed "Params" object minimizes the amount of state that must be
|
| 48 |
+
stored in registers, and integer addition is used to advance the pointer
|
| 49 |
+
through memory.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
#pragma once
|
| 53 |
+
|
| 54 |
+
#include "cutlass/array.h"
|
| 55 |
+
#include "cutlass/coord.h"
|
| 56 |
+
#include "cutlass/cutlass.h"
|
| 57 |
+
#include "cutlass/layout/matrix.h"
|
| 58 |
+
#include "cutlass/layout/pitch_linear.h"
|
| 59 |
+
#include "cutlass/matrix_shape.h"
|
| 60 |
+
#include "cutlass/predicate_vector.h"
|
| 61 |
+
#include "cutlass/tensor_ref.h"
|
| 62 |
+
#include "cutlass/tensor_view.h"
|
| 63 |
+
#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
|
| 64 |
+
|
| 65 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 66 |
+
|
| 67 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 68 |
+
|
| 69 |
+
namespace cutlass {
|
| 70 |
+
namespace transform {
|
| 71 |
+
namespace threadblock {
|
| 72 |
+
|
| 73 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 74 |
+
|
| 75 |
+
/// PredicatedTileAccessIteratorResidualLast
|
| 76 |
+
///
|
| 77 |
+
template <typename Shape,
|
| 78 |
+
typename Element,
|
| 79 |
+
typename Layout,
|
| 80 |
+
int AdvanceRank,
|
| 81 |
+
typename ThreadMap,
|
| 82 |
+
typename AccessType,
|
| 83 |
+
bool Gather = false>
|
| 84 |
+
class PredicatedTileAccessIteratorResidualLast;
|
| 85 |
+
|
| 86 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 87 |
+
|
| 88 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear
|
| 89 |
+
/// data.
|
| 90 |
+
///
|
| 91 |
+
template <typename Shape_,
|
| 92 |
+
typename Element_,
|
| 93 |
+
int AdvanceRank,
|
| 94 |
+
typename ThreadMap_,
|
| 95 |
+
typename AccessType_,
|
| 96 |
+
bool Gather>
|
| 97 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 98 |
+
Element_,
|
| 99 |
+
layout::PitchLinear,
|
| 100 |
+
AdvanceRank,
|
| 101 |
+
ThreadMap_,
|
| 102 |
+
AccessType_,
|
| 103 |
+
Gather> {
|
| 104 |
+
public:
|
| 105 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 106 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 107 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 108 |
+
|
| 109 |
+
using Shape = Shape_;
|
| 110 |
+
using Element = Element_;
|
| 111 |
+
using Layout = layout::PitchLinear;
|
| 112 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 113 |
+
using ThreadMap = ThreadMap_;
|
| 114 |
+
using AccessType = AccessType_;
|
| 115 |
+
|
| 116 |
+
using Index = typename Layout::Index;
|
| 117 |
+
using LongIndex = typename Layout::LongIndex;
|
| 118 |
+
|
| 119 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 120 |
+
using TensorView = TensorView<Element, Layout>;
|
| 121 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 122 |
+
|
| 123 |
+
using Pointer = Element*;
|
| 124 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 125 |
+
|
| 126 |
+
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<Shape,
|
| 127 |
+
Element,
|
| 128 |
+
Layout,
|
| 129 |
+
AdvanceRank,
|
| 130 |
+
ThreadMap,
|
| 131 |
+
AccessType>;
|
| 132 |
+
|
| 133 |
+
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
|
| 134 |
+
|
| 135 |
+
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
|
| 136 |
+
"Vectors implied by the thread map must be divisible by the access type.");
|
| 137 |
+
|
| 138 |
+
using Mask = typename UnderlyingPredicates::Mask;
|
| 139 |
+
|
| 140 |
+
/// Uses a non-template class
|
| 141 |
+
struct Params : PredicatedTileAccessIteratorParams {
|
| 142 |
+
using Base = PredicatedTileAccessIteratorParams;
|
| 143 |
+
|
| 144 |
+
// Default ctor
|
| 145 |
+
CUTLASS_HOST_DEVICE
|
| 146 |
+
Params() {}
|
| 147 |
+
|
| 148 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 149 |
+
CUTLASS_HOST_DEVICE
|
| 150 |
+
Params(Layout const& layout)
|
| 151 |
+
: Base(layout.stride(0),
|
| 152 |
+
MakePredicatedTileAccessIteratorDesc<Shape,
|
| 153 |
+
Element,
|
| 154 |
+
Layout,
|
| 155 |
+
kAdvanceRank,
|
| 156 |
+
ThreadMap>()())
|
| 157 |
+
{
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
CUTLASS_HOST_DEVICE
|
| 161 |
+
Params(Base const& base) : Base(base) {}
|
| 162 |
+
};
|
| 163 |
+
|
| 164 |
+
private:
|
| 165 |
+
/// Internal pointer type permits fast address arithmetic
|
| 166 |
+
using BytePointer = char*;
|
| 167 |
+
|
| 168 |
+
private:
|
| 169 |
+
//
|
| 170 |
+
// Data members
|
| 171 |
+
//
|
| 172 |
+
|
| 173 |
+
UnderlyingPredicates the_predicates;
|
| 174 |
+
Mask residual_tile_mask;
|
| 175 |
+
|
| 176 |
+
/// Parameters object with precomputed internal state
|
| 177 |
+
Params params_;
|
| 178 |
+
|
| 179 |
+
/// Internal pointer to first access of tile
|
| 180 |
+
BytePointer pointer_;
|
| 181 |
+
|
| 182 |
+
/// Below is used when Gather is turned on. We need to record strided_offset
|
| 183 |
+
/// and contiguous_offset separated to compute the offset by using
|
| 184 |
+
///
|
| 185 |
+
/// offset = contiguous_offset + indices[strided_offset]
|
| 186 |
+
///
|
| 187 |
+
|
| 188 |
+
/// Gather indices
|
| 189 |
+
int const* indices_;
|
| 190 |
+
|
| 191 |
+
Index gather_offset_strided;
|
| 192 |
+
|
| 193 |
+
private:
|
| 194 |
+
/// Computes predicates based on internally tracked per-thread offset.
|
| 195 |
+
CUTLASS_DEVICE
|
| 196 |
+
void compute_predicates_(
|
| 197 |
+
/// Extent of the matrix window
|
| 198 |
+
TensorCoord extent,
|
| 199 |
+
/// optionally, simplify predicate calculation during 'steady state' phase
|
| 200 |
+
bool is_steady_state = false)
|
| 201 |
+
{
|
| 202 |
+
the_predicates.compute_predicates_(extent, is_steady_state);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
public:
|
| 206 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 207 |
+
/// and thread ID
|
| 208 |
+
CUTLASS_HOST_DEVICE
|
| 209 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 210 |
+
/// Precomputed parameters object
|
| 211 |
+
Params const& params,
|
| 212 |
+
/// Pointer to start of tensor
|
| 213 |
+
Pointer pointer,
|
| 214 |
+
/// Extent of tensor
|
| 215 |
+
TensorCoord extent,
|
| 216 |
+
/// ID of each participating thread
|
| 217 |
+
int thread_id,
|
| 218 |
+
/// Initial offset of threadblock
|
| 219 |
+
TensorCoord const& threadblock_offset,
|
| 220 |
+
/// Gather indices
|
| 221 |
+
int const* indices = nullptr)
|
| 222 |
+
: params_(params),
|
| 223 |
+
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
|
| 224 |
+
the_predicates(extent),
|
| 225 |
+
indices_(indices)
|
| 226 |
+
{
|
| 227 |
+
the_predicates.set_predicates(thread_id, threadblock_offset);
|
| 228 |
+
the_predicates.get_mask(residual_tile_mask);
|
| 229 |
+
|
| 230 |
+
// Working around a weird compiler bug happening on P100 for the backward.
|
| 231 |
+
// I've seen together: the_predicates.predicates_[0] = 14 (instead of 15)
|
| 232 |
+
// residual_tile_mask[0] = 15 (correct)
|
| 233 |
+
//
|
| 234 |
+
// Adding prints when the value is calculated (in `compute_predicates_`)
|
| 235 |
+
// sometimes removes the bug. The consequence is that we skip some
|
| 236 |
+
// element of a tensor, leading to wrong results
|
| 237 |
+
// Setting `compute_predicates_`'s second argument (`is_steady_state`) to
|
| 238 |
+
// true also seems to get rid of the bug - at the cost of twice as many
|
| 239 |
+
// comparisons.
|
| 240 |
+
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
|
| 241 |
+
constexpr bool kWorkAroundCompilerBug = false;
|
| 242 |
+
#else
|
| 243 |
+
constexpr bool kWorkAroundCompilerBug = true;
|
| 244 |
+
#endif
|
| 245 |
+
the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug);
|
| 246 |
+
|
| 247 |
+
// update internal pointers
|
| 248 |
+
Layout layout(params_.stride_);
|
| 249 |
+
|
| 250 |
+
if (!Gather) {
|
| 251 |
+
add_pointer_offset(layout(the_predicates.thread_offset_));
|
| 252 |
+
} else {
|
| 253 |
+
gather_offset_strided = the_predicates.thread_offset_.strided();
|
| 254 |
+
add_pointer_offset(layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0)));
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 259 |
+
/// offset
|
| 260 |
+
CUTLASS_HOST_DEVICE
|
| 261 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 262 |
+
/// Precomputed parameters object
|
| 263 |
+
Params const& params,
|
| 264 |
+
/// Pointer to start of tensor
|
| 265 |
+
Pointer pointer,
|
| 266 |
+
/// Extent of tensor
|
| 267 |
+
TensorCoord extent,
|
| 268 |
+
///< ID of each participating thread
|
| 269 |
+
int thread_id)
|
| 270 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 271 |
+
pointer,
|
| 272 |
+
extent,
|
| 273 |
+
thread_id,
|
| 274 |
+
make_Coord(0, 0))
|
| 275 |
+
{
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
/// Overrides the internal iteration index
|
| 279 |
+
CUTLASS_HOST_DEVICE
|
| 280 |
+
void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
|
| 281 |
+
|
| 282 |
+
CUTLASS_HOST_DEVICE
|
| 283 |
+
void set_residual_tile(bool is_residual_tile)
|
| 284 |
+
{
|
| 285 |
+
if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); }
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
/// Adds a pointer offset in units of Element
|
| 289 |
+
CUTLASS_HOST_DEVICE
|
| 290 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 291 |
+
{
|
| 292 |
+
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 296 |
+
/// tiles
|
| 297 |
+
CUTLASS_DEVICE
|
| 298 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 299 |
+
{
|
| 300 |
+
if (!Gather) {
|
| 301 |
+
if (kAdvanceRank) {
|
| 302 |
+
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
|
| 303 |
+
pointer_ += Shape::kContiguous * tile_offset.contiguous();
|
| 304 |
+
} else {
|
| 305 |
+
pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
|
| 306 |
+
pointer_ += Shape::kStrided * tile_offset.strided();
|
| 307 |
+
}
|
| 308 |
+
} else {
|
| 309 |
+
add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
|
| 310 |
+
gather_offset_strided += Shape::kStrided * tile_offset.strided();
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
/// Returns a pointer
|
| 315 |
+
CUTLASS_HOST_DEVICE
|
| 316 |
+
AccessType* get() const
|
| 317 |
+
{
|
| 318 |
+
if (Gather) {
|
| 319 |
+
assert(indices_);
|
| 320 |
+
|
| 321 |
+
if (!valid()) { return nullptr; }
|
| 322 |
+
|
| 323 |
+
LongIndex contiguous_offset =
|
| 324 |
+
the_predicates.iteration_contiguous_ *
|
| 325 |
+
(ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) +
|
| 326 |
+
the_predicates.iteration_vector_;
|
| 327 |
+
int strided_index = gather_offset_strided +
|
| 328 |
+
the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
|
| 329 |
+
|
| 330 |
+
LongIndex strided_offset = indices_[strided_index] * LongIndex(params_.stride_) *
|
| 331 |
+
sizeof_bits<Element>::value / 8;
|
| 332 |
+
|
| 333 |
+
return reinterpret_cast<AccessType*>(pointer_ + contiguous_offset + strided_offset);
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
return reinterpret_cast<AccessType*>(
|
| 337 |
+
pointer_ + the_predicates.iteration_contiguous_ *
|
| 338 |
+
(ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) /
|
| 339 |
+
8) +
|
| 340 |
+
the_predicates.iteration_vector_;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
/// Increment and return an instance to self.
|
| 344 |
+
CUTLASS_HOST_DEVICE
|
| 345 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 346 |
+
{
|
| 347 |
+
the_predicates.operator++();
|
| 348 |
+
|
| 349 |
+
++the_predicates.iteration_vector_;
|
| 350 |
+
if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; }
|
| 351 |
+
|
| 352 |
+
the_predicates.iteration_vector_ = 0;
|
| 353 |
+
++the_predicates.iteration_contiguous_;
|
| 354 |
+
|
| 355 |
+
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
|
| 356 |
+
return *this;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
// Enter here only if (iteration_contiguous_ ==
|
| 360 |
+
// ThreadMap::Iteration::kContiguous)
|
| 361 |
+
the_predicates.iteration_contiguous_ = 0;
|
| 362 |
+
++the_predicates.iteration_strided_;
|
| 363 |
+
|
| 364 |
+
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
|
| 365 |
+
if (!Gather) { pointer_ += params_.inc_strided_; }
|
| 366 |
+
|
| 367 |
+
return *this;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
|
| 371 |
+
// which means we enter the next tile.
|
| 372 |
+
the_predicates.iteration_strided_ = 0;
|
| 373 |
+
|
| 374 |
+
if (!Gather) {
|
| 375 |
+
// advance to next tile
|
| 376 |
+
pointer_ += params_.inc_next_;
|
| 377 |
+
|
| 378 |
+
// now return to start tile - if the iterator is subsequently advanced,
|
| 379 |
+
// this subtraction as well as the subsequent integer addition are both
|
| 380 |
+
// elided by the compiler.
|
| 381 |
+
pointer_ -= params_.inc_advance_;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
return *this;
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
/// Increment and return an instance to self.
|
| 388 |
+
CUTLASS_HOST_DEVICE
|
| 389 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 390 |
+
{
|
| 391 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 392 |
+
operator++();
|
| 393 |
+
return self;
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
/// Clears the predicate set efficiently
|
| 397 |
+
CUTLASS_HOST_DEVICE
|
| 398 |
+
void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
|
| 399 |
+
|
| 400 |
+
/// Clears the predicate set efficiently
|
| 401 |
+
CUTLASS_HOST_DEVICE
|
| 402 |
+
void enable_mask() { the_predicates.enable_mask(); }
|
| 403 |
+
|
| 404 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 405 |
+
CUTLASS_HOST_DEVICE
|
| 406 |
+
void set_mask(Mask const& mask) { the_predicates.set_mask(mask); }
|
| 407 |
+
|
| 408 |
+
/// Gets the mask
|
| 409 |
+
CUTLASS_HOST_DEVICE
|
| 410 |
+
void get_mask(Mask& mask) { the_predicates.get_mask(mask); }
|
| 411 |
+
|
| 412 |
+
/// Returns whether access is valid or not
|
| 413 |
+
CUTLASS_HOST_DEVICE
|
| 414 |
+
bool valid() const { return the_predicates.valid(); }
|
| 415 |
+
};
|
| 416 |
+
|
| 417 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 418 |
+
|
| 419 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
|
| 420 |
+
/// data.
|
| 421 |
+
///
|
| 422 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 423 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 424 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 425 |
+
/// MaskedTileIteratorConcept
|
| 426 |
+
///
|
| 427 |
+
template <typename Shape_,
|
| 428 |
+
typename Element_,
|
| 429 |
+
int AdvanceRank,
|
| 430 |
+
typename ThreadMap_,
|
| 431 |
+
typename AccessType_,
|
| 432 |
+
bool Gather>
|
| 433 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 434 |
+
Element_,
|
| 435 |
+
layout::ColumnMajor,
|
| 436 |
+
AdvanceRank,
|
| 437 |
+
ThreadMap_,
|
| 438 |
+
AccessType_,
|
| 439 |
+
Gather> {
|
| 440 |
+
public:
|
| 441 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 442 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 443 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 444 |
+
|
| 445 |
+
using Shape = Shape_;
|
| 446 |
+
using Element = Element_;
|
| 447 |
+
using Layout = layout::ColumnMajor;
|
| 448 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 449 |
+
using ThreadMap = ThreadMap_;
|
| 450 |
+
using AccessType = AccessType_;
|
| 451 |
+
|
| 452 |
+
using Index = typename Layout::Index;
|
| 453 |
+
using LongIndex = typename Layout::LongIndex;
|
| 454 |
+
|
| 455 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 456 |
+
using TensorView = TensorView<Element, Layout>;
|
| 457 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 458 |
+
|
| 459 |
+
using Pointer = Element*;
|
| 460 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 461 |
+
|
| 462 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 463 |
+
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
|
| 464 |
+
Element,
|
| 465 |
+
layout::PitchLinear,
|
| 466 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 467 |
+
ThreadMap,
|
| 468 |
+
AccessType,
|
| 469 |
+
Gather>;
|
| 470 |
+
|
| 471 |
+
/// Predicate vector stores mask to guard accesses
|
| 472 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 473 |
+
|
| 474 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 475 |
+
|
| 476 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 477 |
+
class Params {
|
| 478 |
+
private:
|
| 479 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 480 |
+
|
| 481 |
+
/// Parameters object
|
| 482 |
+
typename UnderlyingIterator::Params params_;
|
| 483 |
+
|
| 484 |
+
public:
|
| 485 |
+
/// Default ctor
|
| 486 |
+
CUTLASS_HOST_DEVICE
|
| 487 |
+
Params() {}
|
| 488 |
+
|
| 489 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 490 |
+
CUTLASS_HOST_DEVICE
|
| 491 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){};
|
| 492 |
+
|
| 493 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 494 |
+
CUTLASS_HOST_DEVICE
|
| 495 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 496 |
+
};
|
| 497 |
+
|
| 498 |
+
private:
|
| 499 |
+
//
|
| 500 |
+
// Data members
|
| 501 |
+
//
|
| 502 |
+
|
| 503 |
+
/// Underlying pitch-linear tile iterator
|
| 504 |
+
UnderlyingIterator iterator_;
|
| 505 |
+
|
| 506 |
+
public:
|
| 507 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 508 |
+
/// and thread ID
|
| 509 |
+
CUTLASS_HOST_DEVICE
|
| 510 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 511 |
+
///< Precomputed parameters object
|
| 512 |
+
Params const& params,
|
| 513 |
+
///< Pointer to start of tensor
|
| 514 |
+
Pointer pointer,
|
| 515 |
+
///< Extent of tensor
|
| 516 |
+
TensorCoord extent,
|
| 517 |
+
///< ID of each participating thread
|
| 518 |
+
int thread_id,
|
| 519 |
+
///< Initial offset of threadblock
|
| 520 |
+
TensorCoord const& threadblock_offset,
|
| 521 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 522 |
+
///< gather/scatter at this specialization
|
| 523 |
+
)
|
| 524 |
+
: iterator_(params.params_,
|
| 525 |
+
pointer,
|
| 526 |
+
layout::PitchLinearCoord(extent.row(), extent.column()),
|
| 527 |
+
thread_id,
|
| 528 |
+
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()),
|
| 529 |
+
indices)
|
| 530 |
+
{
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 534 |
+
/// offset
|
| 535 |
+
CUTLASS_HOST_DEVICE
|
| 536 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 537 |
+
Params const& params, ///< Precomputed parameters object
|
| 538 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 539 |
+
TensorCoord extent, ///< Extent of tensor
|
| 540 |
+
int thread_id ///< ID of each participating thread
|
| 541 |
+
)
|
| 542 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 543 |
+
pointer,
|
| 544 |
+
extent,
|
| 545 |
+
thread_id,
|
| 546 |
+
make_Coord(0, 0))
|
| 547 |
+
{
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
/// Overrides the internal iteration index
|
| 551 |
+
CUTLASS_HOST_DEVICE
|
| 552 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 553 |
+
|
| 554 |
+
CUTLASS_HOST_DEVICE
|
| 555 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 556 |
+
|
| 557 |
+
/// Adds a pointer offset in units of Element
|
| 558 |
+
CUTLASS_HOST_DEVICE
|
| 559 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 560 |
+
{
|
| 561 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 565 |
+
/// tiles
|
| 566 |
+
CUTLASS_HOST_DEVICE
|
| 567 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 568 |
+
{
|
| 569 |
+
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
/// Returns a pointer
|
| 573 |
+
CUTLASS_HOST_DEVICE
|
| 574 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 575 |
+
|
| 576 |
+
/// Advances to the next tile in memory.
|
| 577 |
+
///
|
| 578 |
+
/// The first time this method is called, predicates are updated, and the
|
| 579 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 580 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 581 |
+
/// pointer.
|
| 582 |
+
CUTLASS_HOST_DEVICE
|
| 583 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 584 |
+
{
|
| 585 |
+
++iterator_;
|
| 586 |
+
return *this;
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
/// Advances to the next tile in memory.
|
| 590 |
+
///
|
| 591 |
+
/// The first time this method is called, predicates are updated, and the
|
| 592 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 593 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 594 |
+
/// pointer.
|
| 595 |
+
CUTLASS_HOST_DEVICE
|
| 596 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 597 |
+
{
|
| 598 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 599 |
+
operator++();
|
| 600 |
+
return self;
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
/// Clears the predicate set efficiently
|
| 604 |
+
CUTLASS_HOST_DEVICE
|
| 605 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 606 |
+
|
| 607 |
+
/// Clears the predicate set efficiently
|
| 608 |
+
CUTLASS_HOST_DEVICE
|
| 609 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 610 |
+
|
| 611 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 612 |
+
CUTLASS_HOST_DEVICE
|
| 613 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 614 |
+
|
| 615 |
+
/// Gets the mask
|
| 616 |
+
CUTLASS_HOST_DEVICE
|
| 617 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 618 |
+
|
| 619 |
+
/// Returns whether access is valid or not
|
| 620 |
+
CUTLASS_HOST_DEVICE
|
| 621 |
+
bool valid() { return iterator_.valid(); }
|
| 622 |
+
};
|
| 623 |
+
|
| 624 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 625 |
+
|
| 626 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
|
| 627 |
+
/// data.
|
| 628 |
+
///
|
| 629 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 630 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 631 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 632 |
+
/// MaskedTileIteratorConcept
|
| 633 |
+
///
|
| 634 |
+
template <typename Shape_,
|
| 635 |
+
typename Element_,
|
| 636 |
+
int AdvanceRank,
|
| 637 |
+
typename ThreadMap_,
|
| 638 |
+
typename AccessType_,
|
| 639 |
+
bool Gather>
|
| 640 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 641 |
+
Element_,
|
| 642 |
+
layout::RowMajor,
|
| 643 |
+
AdvanceRank,
|
| 644 |
+
ThreadMap_,
|
| 645 |
+
AccessType_,
|
| 646 |
+
Gather> {
|
| 647 |
+
public:
|
| 648 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 649 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 650 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 651 |
+
|
| 652 |
+
using Shape = Shape_;
|
| 653 |
+
using Element = Element_;
|
| 654 |
+
using Layout = layout::RowMajor;
|
| 655 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 656 |
+
using ThreadMap = ThreadMap_;
|
| 657 |
+
using AccessType = AccessType_;
|
| 658 |
+
|
| 659 |
+
using Index = typename Layout::Index;
|
| 660 |
+
using LongIndex = typename Layout::LongIndex;
|
| 661 |
+
|
| 662 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 663 |
+
using TensorView = TensorView<Element, Layout>;
|
| 664 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 665 |
+
|
| 666 |
+
using Pointer = Element*;
|
| 667 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 668 |
+
|
| 669 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 670 |
+
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
|
| 671 |
+
Element,
|
| 672 |
+
layout::PitchLinear,
|
| 673 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 674 |
+
ThreadMap,
|
| 675 |
+
AccessType,
|
| 676 |
+
Gather>;
|
| 677 |
+
|
| 678 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 679 |
+
|
| 680 |
+
/// Predicate vector stores mask to guard accesses
|
| 681 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 682 |
+
|
| 683 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 684 |
+
class Params {
|
| 685 |
+
private:
|
| 686 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 687 |
+
|
| 688 |
+
/// Parameters object
|
| 689 |
+
typename UnderlyingIterator::Params params_;
|
| 690 |
+
|
| 691 |
+
public:
|
| 692 |
+
/// Default ctor
|
| 693 |
+
CUTLASS_HOST_DEVICE
|
| 694 |
+
Params() {}
|
| 695 |
+
|
| 696 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 697 |
+
CUTLASS_HOST_DEVICE
|
| 698 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){};
|
| 699 |
+
|
| 700 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 701 |
+
CUTLASS_HOST_DEVICE
|
| 702 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 703 |
+
};
|
| 704 |
+
|
| 705 |
+
private:
|
| 706 |
+
//
|
| 707 |
+
// Data members
|
| 708 |
+
//
|
| 709 |
+
|
| 710 |
+
/// Underlying pitch-linear tile iterator
|
| 711 |
+
UnderlyingIterator iterator_;
|
| 712 |
+
|
| 713 |
+
public:
|
| 714 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 715 |
+
/// and thread ID
|
| 716 |
+
CUTLASS_HOST_DEVICE
|
| 717 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 718 |
+
///< Precomputed parameters object
|
| 719 |
+
Params const& params,
|
| 720 |
+
///< Pointer to start of tensor
|
| 721 |
+
Pointer pointer,
|
| 722 |
+
///< Extent of tensor
|
| 723 |
+
TensorCoord extent,
|
| 724 |
+
///< ID of each participating thread
|
| 725 |
+
int thread_id,
|
| 726 |
+
///< Initial offset of threadblock
|
| 727 |
+
TensorCoord const& threadblock_offset,
|
| 728 |
+
/// Gather indices
|
| 729 |
+
int const* indices = nullptr)
|
| 730 |
+
: iterator_(params.params_,
|
| 731 |
+
pointer,
|
| 732 |
+
layout::PitchLinearCoord(extent.column(), extent.row()),
|
| 733 |
+
thread_id,
|
| 734 |
+
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()),
|
| 735 |
+
indices)
|
| 736 |
+
{
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 740 |
+
/// offset
|
| 741 |
+
CUTLASS_HOST_DEVICE
|
| 742 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 743 |
+
Params const& params, ///< Precomputed parameters object
|
| 744 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 745 |
+
TensorCoord extent, ///< Extent of tensor
|
| 746 |
+
int thread_id ///< ID of each participating thread
|
| 747 |
+
)
|
| 748 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 749 |
+
pointer,
|
| 750 |
+
extent,
|
| 751 |
+
thread_id,
|
| 752 |
+
make_Coord(0, 0))
|
| 753 |
+
{
|
| 754 |
+
}
|
| 755 |
+
|
| 756 |
+
/// Overrides the internal iteration index
|
| 757 |
+
CUTLASS_HOST_DEVICE
|
| 758 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 759 |
+
|
| 760 |
+
CUTLASS_HOST_DEVICE
|
| 761 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 762 |
+
|
| 763 |
+
/// Adds a pointer offset in units of Element
|
| 764 |
+
CUTLASS_HOST_DEVICE
|
| 765 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 766 |
+
{
|
| 767 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 771 |
+
/// tiles
|
| 772 |
+
CUTLASS_HOST_DEVICE
|
| 773 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 774 |
+
{
|
| 775 |
+
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
/// Returns a pointer
|
| 779 |
+
CUTLASS_HOST_DEVICE
|
| 780 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 781 |
+
|
| 782 |
+
/// Advances to the next tile in memory.
|
| 783 |
+
///
|
| 784 |
+
/// The first time this method is called, predicates are updated, and the
|
| 785 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 786 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 787 |
+
/// pointer.
|
| 788 |
+
CUTLASS_HOST_DEVICE
|
| 789 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 790 |
+
{
|
| 791 |
+
++iterator_;
|
| 792 |
+
return *this;
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
/// Advances to the next tile in memory.
|
| 796 |
+
///
|
| 797 |
+
/// The first time this method is called, predicates are updated, and the
|
| 798 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 799 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 800 |
+
/// pointer.
|
| 801 |
+
CUTLASS_HOST_DEVICE
|
| 802 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 803 |
+
{
|
| 804 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 805 |
+
operator++();
|
| 806 |
+
return self;
|
| 807 |
+
}
|
| 808 |
+
|
| 809 |
+
/// Clears the predicate set efficiently
|
| 810 |
+
CUTLASS_HOST_DEVICE
|
| 811 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 812 |
+
|
| 813 |
+
/// Clears the predicate set efficiently
|
| 814 |
+
CUTLASS_HOST_DEVICE
|
| 815 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 816 |
+
|
| 817 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 818 |
+
CUTLASS_HOST_DEVICE
|
| 819 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 820 |
+
|
| 821 |
+
/// Gets the mask
|
| 822 |
+
CUTLASS_HOST_DEVICE
|
| 823 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 824 |
+
|
| 825 |
+
/// Returns whether access is valid or not
|
| 826 |
+
CUTLASS_HOST_DEVICE
|
| 827 |
+
bool valid() { return iterator_.valid(); }
|
| 828 |
+
};
|
| 829 |
+
|
| 830 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 831 |
+
|
| 832 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
|
| 833 |
+
/// data.
|
| 834 |
+
///
|
| 835 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 836 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 837 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 838 |
+
/// MaskedTileIteratorConcept
|
| 839 |
+
///
|
| 840 |
+
template <typename Shape_,
|
| 841 |
+
typename Element_,
|
| 842 |
+
int AdvanceRank,
|
| 843 |
+
typename ThreadMap_,
|
| 844 |
+
typename AccessType_>
|
| 845 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 846 |
+
Element_,
|
| 847 |
+
layout::AffineRankN<2>,
|
| 848 |
+
AdvanceRank,
|
| 849 |
+
ThreadMap_,
|
| 850 |
+
AccessType_,
|
| 851 |
+
false> {
|
| 852 |
+
public:
|
| 853 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 854 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 855 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 856 |
+
|
| 857 |
+
using Shape = Shape_;
|
| 858 |
+
using Element = Element_;
|
| 859 |
+
using Layout = layout::AffineRankN<2>;
|
| 860 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 861 |
+
using ThreadMap = ThreadMap_;
|
| 862 |
+
using AccessType = AccessType_;
|
| 863 |
+
|
| 864 |
+
using Index = typename Layout::Index;
|
| 865 |
+
using LongIndex = typename Layout::LongIndex;
|
| 866 |
+
|
| 867 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 868 |
+
using TensorView = TensorView<Element, Layout>;
|
| 869 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 870 |
+
|
| 871 |
+
using Pointer = Element*;
|
| 872 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 873 |
+
|
| 874 |
+
using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<Shape,
|
| 875 |
+
Element,
|
| 876 |
+
layout::PitchLinear,
|
| 877 |
+
AdvanceRank,
|
| 878 |
+
ThreadMap,
|
| 879 |
+
AccessType>;
|
| 880 |
+
|
| 881 |
+
static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
|
| 882 |
+
|
| 883 |
+
static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
|
| 884 |
+
"Vectors implied by the thread map must be divisible by the access type.");
|
| 885 |
+
|
| 886 |
+
/// Predicate vector stores mask to guard accesses
|
| 887 |
+
using Mask = typename UnderlyingPredicates::Mask;
|
| 888 |
+
|
| 889 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 890 |
+
class Params {
|
| 891 |
+
public:
|
| 892 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 893 |
+
|
| 894 |
+
private:
|
| 895 |
+
/// stride of pitch-linear layout (units of Element)
|
| 896 |
+
Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
|
| 897 |
+
/// amount (in byte) to increment pointer to move to next access along
|
| 898 |
+
/// contiguous dimension
|
| 899 |
+
LongIndex inc_contiguous_;
|
| 900 |
+
/// amount (in byte) to increment pointer from first access of current
|
| 901 |
+
/// contiguous dimension to first access of next one.
|
| 902 |
+
LongIndex inc_strided_;
|
| 903 |
+
/// amount (in byte) to increment pointer from last access of current
|
| 904 |
+
/// contiguous dimension to first access of next one.
|
| 905 |
+
LongIndex inc_next_strided_;
|
| 906 |
+
/// amount (in byte) to increment pointer from last access to first access
|
| 907 |
+
/// of next tile
|
| 908 |
+
LongIndex inc_next_;
|
| 909 |
+
/// amount (in byte) to increment pointer from first access of current tile
|
| 910 |
+
/// to first access of next tile
|
| 911 |
+
LongIndex inc_advance_;
|
| 912 |
+
|
| 913 |
+
public:
|
| 914 |
+
// Default ctor
|
| 915 |
+
CUTLASS_HOST_DEVICE
|
| 916 |
+
Params() : stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) {}
|
| 917 |
+
|
| 918 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 919 |
+
CUTLASS_HOST_DEVICE
|
| 920 |
+
Params(Layout const& layout) : stride_({layout.stride(0), layout.stride(1)})
|
| 921 |
+
{
|
| 922 |
+
inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
|
| 923 |
+
sizeof_bits<Element>::value / 8;
|
| 924 |
+
|
| 925 |
+
inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
|
| 926 |
+
sizeof_bits<Element>::value / 8;
|
| 927 |
+
|
| 928 |
+
inc_next_strided_ =
|
| 929 |
+
inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
|
| 930 |
+
|
| 931 |
+
if (kAdvanceRank) {
|
| 932 |
+
// advance along strided dimension
|
| 933 |
+
inc_advance_ =
|
| 934 |
+
Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8;
|
| 935 |
+
} else {
|
| 936 |
+
// advance along contiguous dimension
|
| 937 |
+
inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
|
| 938 |
+
}
|
| 939 |
+
|
| 940 |
+
inc_next_ = inc_advance_ -
|
| 941 |
+
LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ -
|
| 942 |
+
LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
|
| 943 |
+
};
|
| 944 |
+
};
|
| 945 |
+
|
| 946 |
+
private:
|
| 947 |
+
/// Internal pointer type permits fast address arithmetic
|
| 948 |
+
using BytePointer = char*;
|
| 949 |
+
|
| 950 |
+
//
|
| 951 |
+
// Data members
|
| 952 |
+
//
|
| 953 |
+
|
| 954 |
+
/// Parameters object with precomputed internal state
|
| 955 |
+
Params params_;
|
| 956 |
+
|
| 957 |
+
/// Internal pointer to first access of tile
|
| 958 |
+
BytePointer pointer_;
|
| 959 |
+
|
| 960 |
+
UnderlyingPredicates the_predicates;
|
| 961 |
+
Mask residual_tile_mask;
|
| 962 |
+
|
| 963 |
+
private:
|
| 964 |
+
/// Computes predicates based on internally tracked per-thread offset.
|
| 965 |
+
CUTLASS_DEVICE
|
| 966 |
+
void compute_predicates_(
|
| 967 |
+
/// Extent of the matrix window
|
| 968 |
+
TensorCoord extent,
|
| 969 |
+
/// optionally, simplify predicate calculation during 'steady state' phase
|
| 970 |
+
bool is_steady_state = false)
|
| 971 |
+
{
|
| 972 |
+
the_predicates.compute_predicates_(extent, is_steady_state);
|
| 973 |
+
}
|
| 974 |
+
|
| 975 |
+
public:
|
| 976 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 977 |
+
/// and thread ID
|
| 978 |
+
CUTLASS_HOST_DEVICE
|
| 979 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 980 |
+
///< Precomputed parameters object
|
| 981 |
+
Params const& params,
|
| 982 |
+
///< Pointer to start of tensor
|
| 983 |
+
Pointer pointer,
|
| 984 |
+
///< Extent of tensor
|
| 985 |
+
TensorCoord extent,
|
| 986 |
+
///< ID of each participating thread
|
| 987 |
+
int thread_id,
|
| 988 |
+
///< Initial offset of threadblock
|
| 989 |
+
TensorCoord const& threadblock_offset,
|
| 990 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 991 |
+
///< gather/scatter at this specialization
|
| 992 |
+
)
|
| 993 |
+
: params_(params),
|
| 994 |
+
pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
|
| 995 |
+
the_predicates(extent)
|
| 996 |
+
{
|
| 997 |
+
the_predicates.set_predicates(thread_id, threadblock_offset);
|
| 998 |
+
|
| 999 |
+
// update internal pointers
|
| 1000 |
+
Layout layout(params_.stride_);
|
| 1001 |
+
add_pointer_offset(layout(the_predicates.thread_offset_));
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 1005 |
+
/// offset
|
| 1006 |
+
CUTLASS_HOST_DEVICE
|
| 1007 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1008 |
+
Params const& params, ///< Precomputed parameters object
|
| 1009 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1010 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1011 |
+
int thread_id ///< ID of each participating thread
|
| 1012 |
+
)
|
| 1013 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 1014 |
+
pointer,
|
| 1015 |
+
extent,
|
| 1016 |
+
thread_id,
|
| 1017 |
+
make_Coord(0, 0))
|
| 1018 |
+
{
|
| 1019 |
+
}
|
| 1020 |
+
|
| 1021 |
+
/// Overrides the internal iteration index
|
| 1022 |
+
CUTLASS_HOST_DEVICE
|
| 1023 |
+
void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
|
| 1024 |
+
|
| 1025 |
+
CUTLASS_HOST_DEVICE
|
| 1026 |
+
void set_residual_tile(bool is_residual_tile)
|
| 1027 |
+
{
|
| 1028 |
+
if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); }
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
/// Adds a pointer offset in units of Element
|
| 1032 |
+
CUTLASS_HOST_DEVICE
|
| 1033 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1034 |
+
{
|
| 1035 |
+
pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
|
| 1036 |
+
}
|
| 1037 |
+
|
| 1038 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 1039 |
+
/// tiles
|
| 1040 |
+
CUTLASS_HOST_DEVICE
|
| 1041 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 1042 |
+
{
|
| 1043 |
+
if (kAdvanceRank) {
|
| 1044 |
+
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
|
| 1045 |
+
pointer_ += Shape::kContiguous * tile_offset[0];
|
| 1046 |
+
} else {
|
| 1047 |
+
pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
|
| 1048 |
+
pointer_ += Shape::kStrided * tile_offset[1];
|
| 1049 |
+
}
|
| 1050 |
+
}
|
| 1051 |
+
|
| 1052 |
+
/// Returns a pointer
|
| 1053 |
+
CUTLASS_HOST_DEVICE
|
| 1054 |
+
AccessType* get() const
|
| 1055 |
+
{
|
| 1056 |
+
return reinterpret_cast<AccessType*>(pointer_) + the_predicates.iteration_vector_;
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
/// Advances to the next tile in memory.
|
| 1060 |
+
///
|
| 1061 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1062 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1063 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1064 |
+
/// pointer.
|
| 1065 |
+
CUTLASS_HOST_DEVICE
|
| 1066 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 1067 |
+
{
|
| 1068 |
+
the_predicates.operator++();
|
| 1069 |
+
++the_predicates.iteration_vector_;
|
| 1070 |
+
if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; }
|
| 1071 |
+
|
| 1072 |
+
the_predicates.iteration_vector_ = 0;
|
| 1073 |
+
++the_predicates.iteration_contiguous_;
|
| 1074 |
+
|
| 1075 |
+
if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
|
| 1076 |
+
pointer_ += params_.inc_contiguous_;
|
| 1077 |
+
return *this;
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
// Enter here only if (iteration_contiguous_ ==
|
| 1081 |
+
// ThreadMap::Iteration::kContiguous)
|
| 1082 |
+
the_predicates.iteration_contiguous_ = 0;
|
| 1083 |
+
++the_predicates.iteration_strided_;
|
| 1084 |
+
|
| 1085 |
+
if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
|
| 1086 |
+
pointer_ += params_.inc_next_strided_;
|
| 1087 |
+
return *this;
|
| 1088 |
+
}
|
| 1089 |
+
|
| 1090 |
+
// Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
|
| 1091 |
+
// which means we enter the next tile.
|
| 1092 |
+
the_predicates.iteration_strided_ = 0;
|
| 1093 |
+
|
| 1094 |
+
// advance to next tile
|
| 1095 |
+
pointer_ += params_.inc_next_;
|
| 1096 |
+
|
| 1097 |
+
// now return to start tile - if the iterator is subsequently advanced, this
|
| 1098 |
+
// subtraction as well as the subsequent integer addition are both elided by
|
| 1099 |
+
// the compiler.
|
| 1100 |
+
pointer_ -= params_.inc_advance_;
|
| 1101 |
+
|
| 1102 |
+
return *this;
|
| 1103 |
+
}
|
| 1104 |
+
|
| 1105 |
+
/// Advances to the next tile in memory.
|
| 1106 |
+
///
|
| 1107 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1108 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1109 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1110 |
+
/// pointer.
|
| 1111 |
+
CUTLASS_HOST_DEVICE
|
| 1112 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 1113 |
+
{
|
| 1114 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 1115 |
+
operator++();
|
| 1116 |
+
return self;
|
| 1117 |
+
}
|
| 1118 |
+
|
| 1119 |
+
/// Clears the predicate set efficiently
|
| 1120 |
+
CUTLASS_HOST_DEVICE
|
| 1121 |
+
void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
|
| 1122 |
+
|
| 1123 |
+
/// Clears the predicate set efficiently
|
| 1124 |
+
CUTLASS_HOST_DEVICE
|
| 1125 |
+
void enable_mask() { the_predicates.enable_mask(); }
|
| 1126 |
+
|
| 1127 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1128 |
+
CUTLASS_HOST_DEVICE
|
| 1129 |
+
void set_mask(Mask const& mask) { the_predicates.set_mask(mask); }
|
| 1130 |
+
|
| 1131 |
+
/// Gets the mask
|
| 1132 |
+
CUTLASS_HOST_DEVICE
|
| 1133 |
+
void get_mask(Mask& mask) { the_predicates.get_mask(mask); }
|
| 1134 |
+
|
| 1135 |
+
/// Returns whether access is valid or not
|
| 1136 |
+
CUTLASS_HOST_DEVICE
|
| 1137 |
+
bool valid() { return the_predicates.valid(); }
|
| 1138 |
+
};
|
| 1139 |
+
|
| 1140 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1141 |
+
|
| 1142 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
|
| 1143 |
+
/// column-major data.
|
| 1144 |
+
///
|
| 1145 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1146 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1147 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1148 |
+
/// MaskedTileIteratorConcept
|
| 1149 |
+
///
|
| 1150 |
+
template <typename Shape_,
|
| 1151 |
+
typename Element_,
|
| 1152 |
+
int AdvanceRank,
|
| 1153 |
+
typename ThreadMap_,
|
| 1154 |
+
typename AccessType_>
|
| 1155 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 1156 |
+
Element_,
|
| 1157 |
+
layout::AffineRank2ColumnMajor,
|
| 1158 |
+
AdvanceRank,
|
| 1159 |
+
ThreadMap_,
|
| 1160 |
+
AccessType_,
|
| 1161 |
+
false> {
|
| 1162 |
+
public:
|
| 1163 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1164 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1165 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1166 |
+
|
| 1167 |
+
using Shape = Shape_;
|
| 1168 |
+
using Element = Element_;
|
| 1169 |
+
using Layout = layout::AffineRank2ColumnMajor;
|
| 1170 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1171 |
+
using ThreadMap = ThreadMap_;
|
| 1172 |
+
using AccessType = AccessType_;
|
| 1173 |
+
|
| 1174 |
+
using Index = typename Layout::Index;
|
| 1175 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1176 |
+
|
| 1177 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1178 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1179 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1180 |
+
|
| 1181 |
+
using Pointer = Element*;
|
| 1182 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1183 |
+
|
| 1184 |
+
// Map to the underlying AffineRankN<2> layout
|
| 1185 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 1186 |
+
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
|
| 1187 |
+
Element,
|
| 1188 |
+
layout::AffineRankN<2>,
|
| 1189 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 1190 |
+
ThreadMap,
|
| 1191 |
+
AccessType>;
|
| 1192 |
+
|
| 1193 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 1194 |
+
|
| 1195 |
+
/// Predicate vector stores mask to guard accesses
|
| 1196 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1197 |
+
|
| 1198 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1199 |
+
class Params {
|
| 1200 |
+
private:
|
| 1201 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 1202 |
+
|
| 1203 |
+
/// Parameters object
|
| 1204 |
+
typename UnderlyingIterator::Params params_;
|
| 1205 |
+
|
| 1206 |
+
public:
|
| 1207 |
+
/// Default ctor
|
| 1208 |
+
CUTLASS_HOST_DEVICE
|
| 1209 |
+
Params() {}
|
| 1210 |
+
|
| 1211 |
+
/// Construct the Params object given an AffineRankN<2> tensor's layout
|
| 1212 |
+
CUTLASS_HOST_DEVICE
|
| 1213 |
+
Params(Layout const& layout)
|
| 1214 |
+
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
|
| 1215 |
+
};
|
| 1216 |
+
|
| 1217 |
+
private:
|
| 1218 |
+
//
|
| 1219 |
+
// Data members
|
| 1220 |
+
//
|
| 1221 |
+
|
| 1222 |
+
/// Underlying AffineRankN<2> tile iterator
|
| 1223 |
+
UnderlyingIterator iterator_;
|
| 1224 |
+
|
| 1225 |
+
public:
|
| 1226 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1227 |
+
/// and thread ID
|
| 1228 |
+
CUTLASS_HOST_DEVICE
|
| 1229 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1230 |
+
///< Precomputed parameters object
|
| 1231 |
+
Params const& params,
|
| 1232 |
+
///< Pointer to start of tensor
|
| 1233 |
+
Pointer pointer,
|
| 1234 |
+
///< Extent of tensor
|
| 1235 |
+
TensorCoord extent,
|
| 1236 |
+
///< ID of each participating thread
|
| 1237 |
+
int thread_id,
|
| 1238 |
+
///< Initial offset of threadblock
|
| 1239 |
+
TensorCoord const& threadblock_offset,
|
| 1240 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1241 |
+
///< gather/scatter at this specialization
|
| 1242 |
+
)
|
| 1243 |
+
: iterator_(params.params_,
|
| 1244 |
+
pointer,
|
| 1245 |
+
layout::PitchLinearCoord(extent.row(), extent.column()),
|
| 1246 |
+
thread_id,
|
| 1247 |
+
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()))
|
| 1248 |
+
{
|
| 1249 |
+
}
|
| 1250 |
+
|
| 1251 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 1252 |
+
/// offset
|
| 1253 |
+
CUTLASS_HOST_DEVICE
|
| 1254 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1255 |
+
Params const& params, ///< Precomputed parameters object
|
| 1256 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1257 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1258 |
+
int thread_id ///< ID of each participating thread
|
| 1259 |
+
)
|
| 1260 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 1261 |
+
pointer,
|
| 1262 |
+
extent,
|
| 1263 |
+
thread_id,
|
| 1264 |
+
make_Coord(0, 0))
|
| 1265 |
+
{
|
| 1266 |
+
}
|
| 1267 |
+
|
| 1268 |
+
/// Overrides the internal iteration index
|
| 1269 |
+
CUTLASS_HOST_DEVICE
|
| 1270 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 1271 |
+
|
| 1272 |
+
CUTLASS_HOST_DEVICE
|
| 1273 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1274 |
+
|
| 1275 |
+
/// Adds a pointer offset in units of Element
|
| 1276 |
+
CUTLASS_HOST_DEVICE
|
| 1277 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1278 |
+
{
|
| 1279 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1280 |
+
}
|
| 1281 |
+
|
| 1282 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 1283 |
+
/// tiles
|
| 1284 |
+
CUTLASS_HOST_DEVICE
|
| 1285 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 1286 |
+
{
|
| 1287 |
+
iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column()));
|
| 1288 |
+
}
|
| 1289 |
+
|
| 1290 |
+
/// Returns a pointer
|
| 1291 |
+
CUTLASS_HOST_DEVICE
|
| 1292 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 1293 |
+
|
| 1294 |
+
/// Advances to the next tile in memory.
|
| 1295 |
+
///
|
| 1296 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1297 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1298 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1299 |
+
/// pointer.
|
| 1300 |
+
CUTLASS_HOST_DEVICE
|
| 1301 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 1302 |
+
{
|
| 1303 |
+
++iterator_;
|
| 1304 |
+
return *this;
|
| 1305 |
+
}
|
| 1306 |
+
|
| 1307 |
+
/// Advances to the next tile in memory.
|
| 1308 |
+
///
|
| 1309 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1310 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1311 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1312 |
+
/// pointer.
|
| 1313 |
+
CUTLASS_HOST_DEVICE
|
| 1314 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 1315 |
+
{
|
| 1316 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 1317 |
+
operator++();
|
| 1318 |
+
return self;
|
| 1319 |
+
}
|
| 1320 |
+
|
| 1321 |
+
/// Clears the predicate set efficiently
|
| 1322 |
+
CUTLASS_HOST_DEVICE
|
| 1323 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1324 |
+
|
| 1325 |
+
/// Clears the predicate set efficiently
|
| 1326 |
+
CUTLASS_HOST_DEVICE
|
| 1327 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1328 |
+
|
| 1329 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1330 |
+
CUTLASS_HOST_DEVICE
|
| 1331 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1332 |
+
|
| 1333 |
+
/// Gets the mask
|
| 1334 |
+
CUTLASS_HOST_DEVICE
|
| 1335 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1336 |
+
|
| 1337 |
+
/// Returns whether access is valid or not
|
| 1338 |
+
CUTLASS_HOST_DEVICE
|
| 1339 |
+
bool valid() { return iterator_.valid(); }
|
| 1340 |
+
};
|
| 1341 |
+
|
| 1342 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1343 |
+
|
| 1344 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2
|
| 1345 |
+
/// row-major data.
|
| 1346 |
+
///
|
| 1347 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1348 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1349 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1350 |
+
/// MaskedTileIteratorConcept
|
| 1351 |
+
///
|
| 1352 |
+
template <typename Shape_,
|
| 1353 |
+
typename Element_,
|
| 1354 |
+
int AdvanceRank,
|
| 1355 |
+
typename ThreadMap_,
|
| 1356 |
+
typename AccessType_>
|
| 1357 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 1358 |
+
Element_,
|
| 1359 |
+
layout::AffineRank2RowMajor,
|
| 1360 |
+
AdvanceRank,
|
| 1361 |
+
ThreadMap_,
|
| 1362 |
+
AccessType_,
|
| 1363 |
+
false> {
|
| 1364 |
+
public:
|
| 1365 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1366 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1367 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1368 |
+
|
| 1369 |
+
using Shape = Shape_;
|
| 1370 |
+
using Element = Element_;
|
| 1371 |
+
using Layout = layout::AffineRank2RowMajor;
|
| 1372 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1373 |
+
using ThreadMap = ThreadMap_;
|
| 1374 |
+
using AccessType = AccessType_;
|
| 1375 |
+
|
| 1376 |
+
using Index = typename Layout::Index;
|
| 1377 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1378 |
+
|
| 1379 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1380 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1381 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1382 |
+
|
| 1383 |
+
using Pointer = Element*;
|
| 1384 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1385 |
+
|
| 1386 |
+
// Map to the underlying AffineRankN<2> layout
|
| 1387 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 1388 |
+
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
|
| 1389 |
+
Element,
|
| 1390 |
+
layout::AffineRankN<2>,
|
| 1391 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 1392 |
+
ThreadMap,
|
| 1393 |
+
AccessType>;
|
| 1394 |
+
|
| 1395 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 1396 |
+
|
| 1397 |
+
/// Predicate vector stores mask to guard accesses
|
| 1398 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1399 |
+
|
| 1400 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1401 |
+
class Params {
|
| 1402 |
+
private:
|
| 1403 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 1404 |
+
|
| 1405 |
+
/// Parameters object
|
| 1406 |
+
typename UnderlyingIterator::Params params_;
|
| 1407 |
+
|
| 1408 |
+
public:
|
| 1409 |
+
/// Default ctor
|
| 1410 |
+
CUTLASS_HOST_DEVICE
|
| 1411 |
+
Params() {}
|
| 1412 |
+
|
| 1413 |
+
/// Construct the Params object given an AffineRankN<2> tensor's layout
|
| 1414 |
+
CUTLASS_HOST_DEVICE
|
| 1415 |
+
Params(Layout const& layout)
|
| 1416 |
+
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
|
| 1417 |
+
};
|
| 1418 |
+
|
| 1419 |
+
private:
|
| 1420 |
+
//
|
| 1421 |
+
// Data members
|
| 1422 |
+
//
|
| 1423 |
+
|
| 1424 |
+
/// Underlying AffineRankN<2> tile iterator
|
| 1425 |
+
UnderlyingIterator iterator_;
|
| 1426 |
+
|
| 1427 |
+
public:
|
| 1428 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1429 |
+
/// and thread ID
|
| 1430 |
+
CUTLASS_HOST_DEVICE
|
| 1431 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1432 |
+
///< Precomputed parameters object
|
| 1433 |
+
Params const& params,
|
| 1434 |
+
///< Pointer to start of tensor
|
| 1435 |
+
Pointer pointer,
|
| 1436 |
+
///< Extent of tensor
|
| 1437 |
+
TensorCoord extent,
|
| 1438 |
+
///< ID of each participating thread
|
| 1439 |
+
int thread_id,
|
| 1440 |
+
///< Initial offset of threadblock
|
| 1441 |
+
TensorCoord const& threadblock_offset,
|
| 1442 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1443 |
+
///< gather/scatter at this specialization
|
| 1444 |
+
)
|
| 1445 |
+
: iterator_(params.params_,
|
| 1446 |
+
pointer,
|
| 1447 |
+
layout::PitchLinearCoord(extent.column(), extent.row()),
|
| 1448 |
+
thread_id,
|
| 1449 |
+
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()))
|
| 1450 |
+
{
|
| 1451 |
+
}
|
| 1452 |
+
|
| 1453 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 1454 |
+
/// offset
|
| 1455 |
+
CUTLASS_HOST_DEVICE
|
| 1456 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1457 |
+
Params const& params, ///< Precomputed parameters object
|
| 1458 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1459 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1460 |
+
int thread_id ///< ID of each participating thread
|
| 1461 |
+
)
|
| 1462 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 1463 |
+
pointer,
|
| 1464 |
+
extent,
|
| 1465 |
+
thread_id,
|
| 1466 |
+
make_Coord(0, 0))
|
| 1467 |
+
{
|
| 1468 |
+
}
|
| 1469 |
+
|
| 1470 |
+
/// Overrides the internal iteration index
|
| 1471 |
+
CUTLASS_HOST_DEVICE
|
| 1472 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 1473 |
+
|
| 1474 |
+
CUTLASS_HOST_DEVICE
|
| 1475 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1476 |
+
|
| 1477 |
+
/// Adds a pointer offset in units of Element
|
| 1478 |
+
CUTLASS_HOST_DEVICE
|
| 1479 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1480 |
+
{
|
| 1481 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1482 |
+
}
|
| 1483 |
+
|
| 1484 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 1485 |
+
/// tiles
|
| 1486 |
+
CUTLASS_HOST_DEVICE
|
| 1487 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 1488 |
+
{
|
| 1489 |
+
iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row()));
|
| 1490 |
+
}
|
| 1491 |
+
|
| 1492 |
+
/// Returns a pointer
|
| 1493 |
+
CUTLASS_HOST_DEVICE
|
| 1494 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 1495 |
+
|
| 1496 |
+
/// Advances to the next tile in memory.
|
| 1497 |
+
///
|
| 1498 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1499 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1500 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1501 |
+
/// pointer.
|
| 1502 |
+
CUTLASS_HOST_DEVICE
|
| 1503 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 1504 |
+
{
|
| 1505 |
+
++iterator_;
|
| 1506 |
+
return *this;
|
| 1507 |
+
}
|
| 1508 |
+
|
| 1509 |
+
/// Advances to the next tile in memory.
|
| 1510 |
+
///
|
| 1511 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1512 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1513 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1514 |
+
/// pointer.
|
| 1515 |
+
CUTLASS_HOST_DEVICE
|
| 1516 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 1517 |
+
{
|
| 1518 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 1519 |
+
operator++();
|
| 1520 |
+
return self;
|
| 1521 |
+
}
|
| 1522 |
+
|
| 1523 |
+
/// Clears the predicate set efficiently
|
| 1524 |
+
CUTLASS_HOST_DEVICE
|
| 1525 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1526 |
+
|
| 1527 |
+
/// Clears the predicate set efficiently
|
| 1528 |
+
CUTLASS_HOST_DEVICE
|
| 1529 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1530 |
+
|
| 1531 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1532 |
+
CUTLASS_HOST_DEVICE
|
| 1533 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1534 |
+
|
| 1535 |
+
/// Gets the mask
|
| 1536 |
+
CUTLASS_HOST_DEVICE
|
| 1537 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1538 |
+
|
| 1539 |
+
/// Returns whether access is valid or not
|
| 1540 |
+
CUTLASS_HOST_DEVICE
|
| 1541 |
+
bool valid() { return iterator_.valid(); }
|
| 1542 |
+
};
|
| 1543 |
+
|
| 1544 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1545 |
+
|
| 1546 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
|
| 1547 |
+
/// interleaved data. It is mapped to the congruous layout.
|
| 1548 |
+
///
|
| 1549 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1550 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1551 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1552 |
+
/// MaskedTileIteratorConcept
|
| 1553 |
+
///
|
| 1554 |
+
|
| 1555 |
+
template <typename Shape_,
|
| 1556 |
+
typename Element_,
|
| 1557 |
+
int AdvanceRank,
|
| 1558 |
+
typename ThreadMap_,
|
| 1559 |
+
typename AccessType_,
|
| 1560 |
+
int InterleavedK>
|
| 1561 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 1562 |
+
Element_,
|
| 1563 |
+
layout::ColumnMajorInterleaved<InterleavedK>,
|
| 1564 |
+
AdvanceRank,
|
| 1565 |
+
ThreadMap_,
|
| 1566 |
+
AccessType_,
|
| 1567 |
+
false> {
|
| 1568 |
+
public:
|
| 1569 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1570 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1571 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1572 |
+
|
| 1573 |
+
using Shape = Shape_;
|
| 1574 |
+
using Element = Element_;
|
| 1575 |
+
static int const kInterleavedK = InterleavedK;
|
| 1576 |
+
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
|
| 1577 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1578 |
+
using ThreadMap = ThreadMap_;
|
| 1579 |
+
using AccessType = AccessType_;
|
| 1580 |
+
|
| 1581 |
+
using Index = typename Layout::Index;
|
| 1582 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1583 |
+
|
| 1584 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1585 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1586 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1587 |
+
|
| 1588 |
+
using Pointer = Element*;
|
| 1589 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1590 |
+
|
| 1591 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 1592 |
+
layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>,
|
| 1593 |
+
Element,
|
| 1594 |
+
layout::PitchLinear,
|
| 1595 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 1596 |
+
ThreadMap,
|
| 1597 |
+
AccessType>;
|
| 1598 |
+
|
| 1599 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 1600 |
+
|
| 1601 |
+
/// Predicate vector stores mask to guard accesses
|
| 1602 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1603 |
+
|
| 1604 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1605 |
+
class Params {
|
| 1606 |
+
private:
|
| 1607 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 1608 |
+
|
| 1609 |
+
/// Parameters object
|
| 1610 |
+
typename UnderlyingIterator::Params params_;
|
| 1611 |
+
|
| 1612 |
+
public:
|
| 1613 |
+
CUTLASS_HOST_DEVICE
|
| 1614 |
+
Params() {}
|
| 1615 |
+
|
| 1616 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 1617 |
+
CUTLASS_HOST_DEVICE
|
| 1618 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 1619 |
+
|
| 1620 |
+
CUTLASS_HOST_DEVICE
|
| 1621 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 1622 |
+
};
|
| 1623 |
+
|
| 1624 |
+
private:
|
| 1625 |
+
//
|
| 1626 |
+
// Data members
|
| 1627 |
+
//
|
| 1628 |
+
|
| 1629 |
+
/// Underlying pitch-linear tile iterator
|
| 1630 |
+
UnderlyingIterator iterator_;
|
| 1631 |
+
|
| 1632 |
+
public:
|
| 1633 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1634 |
+
/// and thread ID
|
| 1635 |
+
CUTLASS_HOST_DEVICE
|
| 1636 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1637 |
+
/// Precomputed parameters object
|
| 1638 |
+
Params const& params,
|
| 1639 |
+
/// Pointer to start of tensor
|
| 1640 |
+
Pointer pointer,
|
| 1641 |
+
/// Extent of tensor
|
| 1642 |
+
TensorCoord extent,
|
| 1643 |
+
/// ID of each participating thread
|
| 1644 |
+
int thread_id,
|
| 1645 |
+
/// Initial offset of threadblock
|
| 1646 |
+
TensorCoord const& threadblock_offset,
|
| 1647 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1648 |
+
///< gather/scatter at this specialization
|
| 1649 |
+
)
|
| 1650 |
+
: iterator_(params.params_,
|
| 1651 |
+
pointer,
|
| 1652 |
+
layout::PitchLinearCoord(extent.row() * kInterleavedK,
|
| 1653 |
+
extent.column() / kInterleavedK),
|
| 1654 |
+
thread_id,
|
| 1655 |
+
layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK,
|
| 1656 |
+
threadblock_offset.column() / kInterleavedK))
|
| 1657 |
+
{
|
| 1658 |
+
}
|
| 1659 |
+
|
| 1660 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 1661 |
+
/// offset
|
| 1662 |
+
CUTLASS_HOST_DEVICE
|
| 1663 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1664 |
+
Params const& params, ///< Precomputed parameters object
|
| 1665 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1666 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1667 |
+
int thread_id ///< ID of each participating thread
|
| 1668 |
+
)
|
| 1669 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 1670 |
+
pointer,
|
| 1671 |
+
extent,
|
| 1672 |
+
thread_id,
|
| 1673 |
+
make_Coord(0, 0))
|
| 1674 |
+
{
|
| 1675 |
+
}
|
| 1676 |
+
|
| 1677 |
+
/// Overrides the internal iteration index
|
| 1678 |
+
CUTLASS_HOST_DEVICE
|
| 1679 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 1680 |
+
|
| 1681 |
+
CUTLASS_HOST_DEVICE
|
| 1682 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1683 |
+
|
| 1684 |
+
/// Adds a pointer offset in units of Element
|
| 1685 |
+
CUTLASS_HOST_DEVICE
|
| 1686 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1687 |
+
{
|
| 1688 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1689 |
+
}
|
| 1690 |
+
|
| 1691 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 1692 |
+
/// tiles
|
| 1693 |
+
CUTLASS_HOST_DEVICE
|
| 1694 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 1695 |
+
{
|
| 1696 |
+
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
|
| 1697 |
+
}
|
| 1698 |
+
|
| 1699 |
+
/// Returns a pointer
|
| 1700 |
+
CUTLASS_HOST_DEVICE
|
| 1701 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 1702 |
+
|
| 1703 |
+
/// Advances to the next tile in memory.
|
| 1704 |
+
///
|
| 1705 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1706 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1707 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1708 |
+
/// pointer.
|
| 1709 |
+
CUTLASS_HOST_DEVICE
|
| 1710 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 1711 |
+
{
|
| 1712 |
+
++iterator_;
|
| 1713 |
+
return *this;
|
| 1714 |
+
}
|
| 1715 |
+
|
| 1716 |
+
/// Advances to the next tile in memory.
|
| 1717 |
+
///
|
| 1718 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1719 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1720 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1721 |
+
/// pointer.
|
| 1722 |
+
CUTLASS_HOST_DEVICE
|
| 1723 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 1724 |
+
{
|
| 1725 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 1726 |
+
operator++();
|
| 1727 |
+
return self;
|
| 1728 |
+
}
|
| 1729 |
+
|
| 1730 |
+
/// Clears the predicate set efficiently
|
| 1731 |
+
CUTLASS_HOST_DEVICE
|
| 1732 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1733 |
+
|
| 1734 |
+
/// Clears the predicate set efficiently
|
| 1735 |
+
CUTLASS_HOST_DEVICE
|
| 1736 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1737 |
+
|
| 1738 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1739 |
+
CUTLASS_HOST_DEVICE
|
| 1740 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1741 |
+
|
| 1742 |
+
/// Gets the mask
|
| 1743 |
+
CUTLASS_HOST_DEVICE
|
| 1744 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1745 |
+
|
| 1746 |
+
/// Returns whether access is valid or not
|
| 1747 |
+
CUTLASS_HOST_DEVICE
|
| 1748 |
+
bool valid() { return iterator_.valid(); }
|
| 1749 |
+
};
|
| 1750 |
+
|
| 1751 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1752 |
+
|
| 1753 |
+
/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
|
| 1754 |
+
/// interleaved data.
|
| 1755 |
+
// It is mapped to the congruous layout.
|
| 1756 |
+
///
|
| 1757 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1758 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1759 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1760 |
+
/// MaskedTileIteratorConcept
|
| 1761 |
+
///
|
| 1762 |
+
template <typename Shape_,
|
| 1763 |
+
typename Element_,
|
| 1764 |
+
int AdvanceRank,
|
| 1765 |
+
typename ThreadMap_,
|
| 1766 |
+
typename AccessType_,
|
| 1767 |
+
int InterleavedK>
|
| 1768 |
+
class PredicatedTileAccessIteratorResidualLast<Shape_,
|
| 1769 |
+
Element_,
|
| 1770 |
+
layout::RowMajorInterleaved<InterleavedK>,
|
| 1771 |
+
AdvanceRank,
|
| 1772 |
+
ThreadMap_,
|
| 1773 |
+
AccessType_,
|
| 1774 |
+
false> {
|
| 1775 |
+
public:
|
| 1776 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1777 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1778 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1779 |
+
|
| 1780 |
+
using Shape = Shape_;
|
| 1781 |
+
using Element = Element_;
|
| 1782 |
+
static int const kInterleavedK = InterleavedK;
|
| 1783 |
+
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
|
| 1784 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1785 |
+
using ThreadMap = ThreadMap_;
|
| 1786 |
+
using AccessType = AccessType_;
|
| 1787 |
+
|
| 1788 |
+
using Index = typename Layout::Index;
|
| 1789 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1790 |
+
|
| 1791 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1792 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1793 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1794 |
+
|
| 1795 |
+
using Pointer = Element*;
|
| 1796 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1797 |
+
|
| 1798 |
+
using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
|
| 1799 |
+
layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>,
|
| 1800 |
+
Element,
|
| 1801 |
+
layout::PitchLinear,
|
| 1802 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 1803 |
+
ThreadMap,
|
| 1804 |
+
AccessType>;
|
| 1805 |
+
|
| 1806 |
+
static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
|
| 1807 |
+
|
| 1808 |
+
/// Predicate vector stores mask to guard accesses
|
| 1809 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1810 |
+
|
| 1811 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1812 |
+
class Params {
|
| 1813 |
+
private:
|
| 1814 |
+
friend PredicatedTileAccessIteratorResidualLast;
|
| 1815 |
+
|
| 1816 |
+
/// Parameters object
|
| 1817 |
+
typename UnderlyingIterator::Params params_;
|
| 1818 |
+
|
| 1819 |
+
public:
|
| 1820 |
+
CUTLASS_HOST_DEVICE
|
| 1821 |
+
Params() {}
|
| 1822 |
+
|
| 1823 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 1824 |
+
CUTLASS_HOST_DEVICE
|
| 1825 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 1826 |
+
|
| 1827 |
+
CUTLASS_HOST_DEVICE
|
| 1828 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 1829 |
+
};
|
| 1830 |
+
|
| 1831 |
+
private:
|
| 1832 |
+
//
|
| 1833 |
+
// Data members
|
| 1834 |
+
//
|
| 1835 |
+
|
| 1836 |
+
/// Underlying pitch-linear tile iterator
|
| 1837 |
+
UnderlyingIterator iterator_;
|
| 1838 |
+
|
| 1839 |
+
public:
|
| 1840 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1841 |
+
/// and thread ID
|
| 1842 |
+
CUTLASS_HOST_DEVICE
|
| 1843 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1844 |
+
/// Precomputed parameters object
|
| 1845 |
+
Params const& params,
|
| 1846 |
+
/// Pointer to start of tensor
|
| 1847 |
+
Pointer pointer,
|
| 1848 |
+
/// Extent of tensor
|
| 1849 |
+
TensorCoord extent,
|
| 1850 |
+
/// ID of each participating thread
|
| 1851 |
+
int thread_id,
|
| 1852 |
+
/// Initial offset of threadblock
|
| 1853 |
+
TensorCoord const& threadblock_offset,
|
| 1854 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1855 |
+
///< gather/scatter at this specialization
|
| 1856 |
+
)
|
| 1857 |
+
: iterator_(params.params_,
|
| 1858 |
+
pointer,
|
| 1859 |
+
layout::PitchLinearCoord(extent.column() * kInterleavedK,
|
| 1860 |
+
extent.row() / kInterleavedK),
|
| 1861 |
+
thread_id,
|
| 1862 |
+
layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK,
|
| 1863 |
+
threadblock_offset.row() / kInterleavedK))
|
| 1864 |
+
{
|
| 1865 |
+
}
|
| 1866 |
+
|
| 1867 |
+
/// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
|
| 1868 |
+
/// offset
|
| 1869 |
+
CUTLASS_HOST_DEVICE
|
| 1870 |
+
PredicatedTileAccessIteratorResidualLast(
|
| 1871 |
+
Params const& params, ///< Precomputed parameters object
|
| 1872 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1873 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1874 |
+
int thread_id ///< ID of each participating thread
|
| 1875 |
+
)
|
| 1876 |
+
: PredicatedTileAccessIteratorResidualLast(params,
|
| 1877 |
+
pointer,
|
| 1878 |
+
extent,
|
| 1879 |
+
thread_id,
|
| 1880 |
+
make_Coord(0, 0))
|
| 1881 |
+
{
|
| 1882 |
+
}
|
| 1883 |
+
|
| 1884 |
+
/// Overrides the internal iteration index
|
| 1885 |
+
CUTLASS_HOST_DEVICE
|
| 1886 |
+
void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
|
| 1887 |
+
|
| 1888 |
+
CUTLASS_HOST_DEVICE
|
| 1889 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1890 |
+
|
| 1891 |
+
/// Adds a pointer offset in units of Element
|
| 1892 |
+
CUTLASS_HOST_DEVICE
|
| 1893 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1894 |
+
{
|
| 1895 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1896 |
+
}
|
| 1897 |
+
|
| 1898 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 1899 |
+
/// tiles
|
| 1900 |
+
CUTLASS_HOST_DEVICE
|
| 1901 |
+
void add_tile_offset(TensorCoord const& tile_offset)
|
| 1902 |
+
{
|
| 1903 |
+
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
|
| 1904 |
+
}
|
| 1905 |
+
|
| 1906 |
+
/// Returns a pointer
|
| 1907 |
+
CUTLASS_HOST_DEVICE
|
| 1908 |
+
AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
|
| 1909 |
+
|
| 1910 |
+
/// Advances to the next tile in memory.
|
| 1911 |
+
///
|
| 1912 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1913 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1914 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1915 |
+
/// pointer.
|
| 1916 |
+
CUTLASS_HOST_DEVICE
|
| 1917 |
+
PredicatedTileAccessIteratorResidualLast& operator++()
|
| 1918 |
+
{
|
| 1919 |
+
++iterator_;
|
| 1920 |
+
return *this;
|
| 1921 |
+
}
|
| 1922 |
+
|
| 1923 |
+
/// Advances to the next tile in memory.
|
| 1924 |
+
///
|
| 1925 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1926 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1927 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1928 |
+
/// pointer.
|
| 1929 |
+
CUTLASS_HOST_DEVICE
|
| 1930 |
+
PredicatedTileAccessIteratorResidualLast operator++(int)
|
| 1931 |
+
{
|
| 1932 |
+
PredicatedTileAccessIteratorResidualLast self(*this);
|
| 1933 |
+
operator++();
|
| 1934 |
+
return self;
|
| 1935 |
+
}
|
| 1936 |
+
|
| 1937 |
+
/// Clears the predicate set efficiently
|
| 1938 |
+
CUTLASS_HOST_DEVICE
|
| 1939 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1940 |
+
|
| 1941 |
+
/// Clears the predicate set efficiently
|
| 1942 |
+
CUTLASS_HOST_DEVICE
|
| 1943 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1944 |
+
|
| 1945 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1946 |
+
CUTLASS_HOST_DEVICE
|
| 1947 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1948 |
+
|
| 1949 |
+
/// Gets the mask
|
| 1950 |
+
CUTLASS_HOST_DEVICE
|
| 1951 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1952 |
+
|
| 1953 |
+
/// Returns whether access is valid or not
|
| 1954 |
+
CUTLASS_HOST_DEVICE
|
| 1955 |
+
bool valid() { return iterator_.valid(); }
|
| 1956 |
+
};
|
| 1957 |
+
|
| 1958 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1959 |
+
|
| 1960 |
+
} // namespace threadblock
|
| 1961 |
+
} // namespace transform
|
| 1962 |
+
} // namespace cutlass
|
| 1963 |
+
|
| 1964 |
+
////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h
ADDED
|
@@ -0,0 +1,886 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
#include <cutlass/cutlass.h>
|
| 8 |
+
#include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
|
| 9 |
+
#include <cutlass/tensor_coord.h>
|
| 10 |
+
namespace cutlass {
|
| 11 |
+
namespace epilogue {
|
| 12 |
+
namespace threadblock {
|
| 13 |
+
|
| 14 |
+
template <class AccessType, class Enable = void>
|
| 15 |
+
struct atomic_store {};
|
| 16 |
+
|
| 17 |
+
template <class AccessType>
|
| 18 |
+
struct atomic_store<AccessType,
|
| 19 |
+
typename platform::enable_if<
|
| 20 |
+
platform::is_same<typename AccessType::Element, half_t>::value>::type> {
|
| 21 |
+
using Element = typename AccessType::Element;
|
| 22 |
+
static const int kCount = AccessType::kElements;
|
| 23 |
+
|
| 24 |
+
CUTLASS_DEVICE
|
| 25 |
+
atomic_store(AccessType const& D, void* ptr, bool pred_guard)
|
| 26 |
+
{
|
| 27 |
+
static_assert(!(kCount % 2), "kCount must be even");
|
| 28 |
+
half2* p = reinterpret_cast<half2*>(ptr);
|
| 29 |
+
uint const* data = reinterpret_cast<uint const*>(&D);
|
| 30 |
+
asm volatile(
|
| 31 |
+
"{\n"
|
| 32 |
+
" .reg .pred p;\n"
|
| 33 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 34 |
+
:
|
| 35 |
+
: "r"((int)pred_guard));
|
| 36 |
+
for (int i = 0; i < kCount / 2; i++) {
|
| 37 |
+
asm volatile(" @p red.relaxed.global.add.noftz.f16x2 [%0], %1;\n"
|
| 38 |
+
:
|
| 39 |
+
: "l"(p + i), "r"(data[i]));
|
| 40 |
+
}
|
| 41 |
+
asm volatile("}\n" ::);
|
| 42 |
+
}
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
template <class AccessType>
|
| 46 |
+
struct atomic_store<AccessType,
|
| 47 |
+
typename platform::enable_if<
|
| 48 |
+
platform::is_same<typename AccessType::Element, float>::value>::type> {
|
| 49 |
+
using Element = typename AccessType::Element;
|
| 50 |
+
static const int kCount = AccessType::kElements;
|
| 51 |
+
|
| 52 |
+
CUTLASS_DEVICE
|
| 53 |
+
atomic_store(AccessType const& D, void* ptr, bool pred_guard)
|
| 54 |
+
{
|
| 55 |
+
Element* p = reinterpret_cast<Element*>(ptr);
|
| 56 |
+
uint const* data = reinterpret_cast<uint const*>(&D);
|
| 57 |
+
asm volatile(
|
| 58 |
+
"{\n"
|
| 59 |
+
" .reg .pred p;\n"
|
| 60 |
+
" setp.ne.b32 p, %0, 0;\n"
|
| 61 |
+
:
|
| 62 |
+
: "r"((int)pred_guard));
|
| 63 |
+
for (int i = 0; i < kCount; i++) {
|
| 64 |
+
asm volatile(" @p red.relaxed.global.add.f32 [%0], %1;\n"
|
| 65 |
+
:
|
| 66 |
+
: "l"(p + i), "r"(data[i]));
|
| 67 |
+
}
|
| 68 |
+
asm volatile("}\n" ::);
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
|
| 73 |
+
typename Element_, ///< Element data type
|
| 74 |
+
int Rank>
|
| 75 |
+
class PredicatedTileIteratorAffineRankNAtomic {
|
| 76 |
+
public:
|
| 77 |
+
using ThreadMap = ThreadMap_;
|
| 78 |
+
using Shape = typename ThreadMap::Shape;
|
| 79 |
+
|
| 80 |
+
using Element = Element_;
|
| 81 |
+
|
| 82 |
+
using Layout = layout::AffineRankN<Rank>;
|
| 83 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 84 |
+
using TensorView = TensorView<Element, Layout>;
|
| 85 |
+
using ConstTensorRef = typename TensorRef::ConstTensorRef;
|
| 86 |
+
|
| 87 |
+
using Index = typename Layout::Index;
|
| 88 |
+
using LongIndex = typename Layout::LongIndex;
|
| 89 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 90 |
+
|
| 91 |
+
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
|
| 92 |
+
static int const kThreads = ThreadMap::kThreads;
|
| 93 |
+
static int const kIterations = ThreadMap::Count::kTile;
|
| 94 |
+
|
| 95 |
+
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
|
| 96 |
+
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
|
| 97 |
+
static_assert(ThreadMap::Iterations::kCluster > 0,
|
| 98 |
+
"ThreadMap::Iterations::kCluster must be > 0");
|
| 99 |
+
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
|
| 100 |
+
static_assert(!(Layout::kRank % 2),
|
| 101 |
+
"Layout rank must be even. This assumes the first half of the "
|
| 102 |
+
"modes correspond to the 'row' "
|
| 103 |
+
"and the second half of the modes correspond to the 'column'");
|
| 104 |
+
|
| 105 |
+
static bool const kBigEndian = false;
|
| 106 |
+
|
| 107 |
+
/// Fragment object
|
| 108 |
+
using Fragment = Array<Element,
|
| 109 |
+
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
|
| 110 |
+
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
|
| 111 |
+
ThreadMap::kElementsPerAccess>;
|
| 112 |
+
|
| 113 |
+
/// Memory access size
|
| 114 |
+
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
|
| 115 |
+
|
| 116 |
+
//
|
| 117 |
+
// Parameters struct
|
| 118 |
+
//
|
| 119 |
+
|
| 120 |
+
/// Parameters structure
|
| 121 |
+
struct Params {
|
| 122 |
+
//
|
| 123 |
+
// Data members
|
| 124 |
+
//
|
| 125 |
+
|
| 126 |
+
Layout layout;
|
| 127 |
+
|
| 128 |
+
/// Stride in units of bytes along M modes
|
| 129 |
+
Coord<Layout::kRank / 2, typename Layout::LongIndex> stride_m;
|
| 130 |
+
|
| 131 |
+
/// Stride in units of bytes along N modes
|
| 132 |
+
Coord<Layout::kRank / 2, typename Layout::LongIndex> stride_n;
|
| 133 |
+
|
| 134 |
+
/// Fast divmod objects divided by tensor extents
|
| 135 |
+
FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)];
|
| 136 |
+
|
| 137 |
+
/// Fast divmod objects divided by tensor extents
|
| 138 |
+
FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)];
|
| 139 |
+
|
| 140 |
+
int64_t rank2_inc_col;
|
| 141 |
+
int64_t rank2_inc_row;
|
| 142 |
+
|
| 143 |
+
//
|
| 144 |
+
// Methods
|
| 145 |
+
//
|
| 146 |
+
CUTLASS_HOST_DEVICE
|
| 147 |
+
Params() {}
|
| 148 |
+
|
| 149 |
+
CUTLASS_HOST_DEVICE
|
| 150 |
+
Params(TensorCoord const& extent, Layout const& layout_) : layout(layout_)
|
| 151 |
+
{
|
| 152 |
+
CUTLASS_PRAGMA_UNROLL
|
| 153 |
+
for (int i = 0; i < Layout::kRank / 2; ++i) {
|
| 154 |
+
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
|
| 155 |
+
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
if (kBigEndian) {
|
| 159 |
+
// "Big Endian" scheme
|
| 160 |
+
CUTLASS_PRAGMA_UNROLL
|
| 161 |
+
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
|
| 162 |
+
divmod_m[i] = FastDivmod(extent[i + 1]);
|
| 163 |
+
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
|
| 164 |
+
}
|
| 165 |
+
} else {
|
| 166 |
+
// "Little Endian" scheme
|
| 167 |
+
CUTLASS_PRAGMA_UNROLL
|
| 168 |
+
for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
|
| 169 |
+
divmod_m[i] = FastDivmod(extent[i]);
|
| 170 |
+
divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
CUTLASS_HOST_DEVICE
|
| 176 |
+
Params(Layout const& layout_) : layout(layout_)
|
| 177 |
+
{
|
| 178 |
+
CUTLASS_PRAGMA_UNROLL
|
| 179 |
+
for (int i = 0; i < Layout::kRank / 2; ++i) {
|
| 180 |
+
stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
|
| 181 |
+
stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0];
|
| 185 |
+
rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0];
|
| 186 |
+
}
|
| 187 |
+
};
|
| 188 |
+
|
| 189 |
+
/// Mask object
|
| 190 |
+
struct Mask {
|
| 191 |
+
static int const kCount = ThreadMap::Iterations::kColumn;
|
| 192 |
+
|
| 193 |
+
/// Predicate state
|
| 194 |
+
bool predicates[kCount];
|
| 195 |
+
|
| 196 |
+
//
|
| 197 |
+
// Mask
|
| 198 |
+
//
|
| 199 |
+
CUTLASS_HOST_DEVICE
|
| 200 |
+
Mask() { enable(); }
|
| 201 |
+
|
| 202 |
+
///< Efficiently disables all accesses guarded by mask
|
| 203 |
+
CUTLASS_HOST_DEVICE void clear()
|
| 204 |
+
{
|
| 205 |
+
CUTLASS_PRAGMA_UNROLL
|
| 206 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
|
| 210 |
+
CUTLASS_DEVICE void enable()
|
| 211 |
+
{
|
| 212 |
+
CUTLASS_PRAGMA_UNROLL
|
| 213 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
|
| 214 |
+
}
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
private:
|
| 218 |
+
//
|
| 219 |
+
// Data members
|
| 220 |
+
//
|
| 221 |
+
|
| 222 |
+
/// Parameters structure containing reference and precomputed state.
|
| 223 |
+
Params params_;
|
| 224 |
+
|
| 225 |
+
/// Byte-level pointer
|
| 226 |
+
uint8_t* byte_pointer_;
|
| 227 |
+
|
| 228 |
+
/// Array of boolean values to contain steady-state predicates
|
| 229 |
+
Mask mask_;
|
| 230 |
+
|
| 231 |
+
/// Extent of the matrix tile in rows
|
| 232 |
+
Index extent_row_;
|
| 233 |
+
|
| 234 |
+
/// Extent of the matrix tile in columns
|
| 235 |
+
Index extent_col_;
|
| 236 |
+
|
| 237 |
+
/// A thread's starting row position (assuming steady-state predicates have
|
| 238 |
+
/// been computed)
|
| 239 |
+
Index thread_start_row_;
|
| 240 |
+
|
| 241 |
+
/// A thread's starting column position (assuming steady-state predicates have
|
| 242 |
+
/// been computed)
|
| 243 |
+
Index thread_start_column_;
|
| 244 |
+
|
| 245 |
+
/// Internal state counter
|
| 246 |
+
int state_[3];
|
| 247 |
+
|
| 248 |
+
/// Offsets in columns, cached for performance
|
| 249 |
+
int64_t offset_modes_n_[ThreadMap::Iterations::kColumn];
|
| 250 |
+
|
| 251 |
+
//
|
| 252 |
+
// Static asserts about internal strides
|
| 253 |
+
//
|
| 254 |
+
|
| 255 |
+
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
|
| 256 |
+
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
|
| 257 |
+
|
| 258 |
+
private:
|
| 259 |
+
//
|
| 260 |
+
// Methods
|
| 261 |
+
//
|
| 262 |
+
|
| 263 |
+
public:
|
| 264 |
+
//
|
| 265 |
+
// Methods
|
| 266 |
+
//
|
| 267 |
+
|
| 268 |
+
/// Constructor
|
| 269 |
+
CUTLASS_DEVICE
|
| 270 |
+
PredicatedTileIteratorAffineRankNAtomic(
|
| 271 |
+
Params const& params,
|
| 272 |
+
Element* pointer,
|
| 273 |
+
MatrixCoord extent,
|
| 274 |
+
int thread_idx,
|
| 275 |
+
MatrixCoord threadblock_offset = MatrixCoord(),
|
| 276 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 277 |
+
///< gather/scatter at this specialization
|
| 278 |
+
)
|
| 279 |
+
: params_(params)
|
| 280 |
+
{
|
| 281 |
+
MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
|
| 282 |
+
|
| 283 |
+
extent_row_ = extent.row();
|
| 284 |
+
extent_col_ = extent.column();
|
| 285 |
+
|
| 286 |
+
thread_start_row_ = thread_offset.row();
|
| 287 |
+
thread_start_column_ = thread_offset.column();
|
| 288 |
+
|
| 289 |
+
if (Layout::kRank > 2) {
|
| 290 |
+
// Initialize predicates
|
| 291 |
+
CUTLASS_PRAGMA_UNROLL
|
| 292 |
+
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
|
| 293 |
+
//
|
| 294 |
+
// Compute coordinate and decompose into N modes
|
| 295 |
+
//
|
| 296 |
+
|
| 297 |
+
int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn;
|
| 298 |
+
|
| 299 |
+
mask_.predicates[c] = coord_n < extent.column();
|
| 300 |
+
|
| 301 |
+
Coord<Layout::kRank / 2, Index> modes_n;
|
| 302 |
+
|
| 303 |
+
int64_t offset_modes_n = 0;
|
| 304 |
+
|
| 305 |
+
if (kBigEndian) {
|
| 306 |
+
modes_n = CoordinateDecomposition<Layout::kRank / 2>(coord_n, params_.divmod_n);
|
| 307 |
+
|
| 308 |
+
offset_modes_n = dot(modes_n, params_.stride_n);
|
| 309 |
+
} else {
|
| 310 |
+
modes_n = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(
|
| 311 |
+
coord_n, params_.divmod_n);
|
| 312 |
+
|
| 313 |
+
offset_modes_n = dot(modes_n, params_.stride_n);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
offset_modes_n_[c] = offset_modes_n;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
if (!pointer) { mask_.clear(); }
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
// Initialize pointer
|
| 323 |
+
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer);
|
| 324 |
+
|
| 325 |
+
// Initialize internal state counter
|
| 326 |
+
state_[0] = state_[1] = state_[2] = 0;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
/// Adds a pointer offset in units of Element
|
| 330 |
+
CUTLASS_HOST_DEVICE
|
| 331 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 332 |
+
{
|
| 333 |
+
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
/// Stores a fragment to memory
|
| 337 |
+
CUTLASS_DEVICE
|
| 338 |
+
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset)
|
| 339 |
+
{
|
| 340 |
+
uint8_t* byte_pointer = byte_pointer_;
|
| 341 |
+
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
|
| 342 |
+
|
| 343 |
+
CUTLASS_PRAGMA_UNROLL
|
| 344 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 345 |
+
CUTLASS_PRAGMA_UNROLL
|
| 346 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 347 |
+
int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup +
|
| 348 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 349 |
+
int64_t offset_modes_m = row_begin * params_.stride_m[0];
|
| 350 |
+
|
| 351 |
+
CUTLASS_PRAGMA_UNROLL
|
| 352 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 353 |
+
int frag_row_idx =
|
| 354 |
+
(row + ThreadMap::Iterations::kRow *
|
| 355 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 356 |
+
|
| 357 |
+
//
|
| 358 |
+
// Compute coordinate and decompose into M modes
|
| 359 |
+
//
|
| 360 |
+
|
| 361 |
+
int coord_m = row * ThreadMap::Delta::kRow + row_begin;
|
| 362 |
+
|
| 363 |
+
Coord<Layout::kRank / 2, Index> modes_m;
|
| 364 |
+
|
| 365 |
+
if (Layout::kRank > 2) {
|
| 366 |
+
if (kBigEndian) {
|
| 367 |
+
modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m,
|
| 368 |
+
params_.divmod_m);
|
| 369 |
+
} else {
|
| 370 |
+
modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(
|
| 371 |
+
coord_m, params_.divmod_m);
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
offset_modes_m = dot(modes_m, params_.stride_m);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
//
|
| 378 |
+
// Compute the offset due to modes M
|
| 379 |
+
//
|
| 380 |
+
|
| 381 |
+
bool row_guard = (coord_m < extent_row_);
|
| 382 |
+
int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
|
| 383 |
+
|
| 384 |
+
CUTLASS_PRAGMA_UNROLL
|
| 385 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 386 |
+
//
|
| 387 |
+
// Compute coordinate and decompose into N modes
|
| 388 |
+
//
|
| 389 |
+
|
| 390 |
+
if (Layout::kRank > 2) { offset_modes_n = offset_modes_n_[column]; }
|
| 391 |
+
|
| 392 |
+
//
|
| 393 |
+
// Compute the pointer and access
|
| 394 |
+
//
|
| 395 |
+
bool guard;
|
| 396 |
+
if (Layout::kRank > 2) {
|
| 397 |
+
guard = row_guard && mask_.predicates[column];
|
| 398 |
+
} else {
|
| 399 |
+
guard = (coord_m < extent_row_) &&
|
| 400 |
+
((thread_start_column_ + ThreadMap::Delta::kColumn * column) <
|
| 401 |
+
extent_col_);
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
atomic_store<AccessType>(
|
| 405 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 406 |
+
(void*)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
|
| 407 |
+
guard);
|
| 408 |
+
|
| 409 |
+
if (Layout::kRank == 2) { offset_modes_n += params_.rank2_inc_col; }
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
if (Layout::kRank == 2) { offset_modes_m += params_.rank2_inc_row; }
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
/// Stores a fragment to memory
|
| 419 |
+
CUTLASS_DEVICE
|
| 420 |
+
void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
|
| 421 |
+
|
| 422 |
+
CUTLASS_DEVICE
|
| 423 |
+
void load(Fragment& frag) {}
|
| 424 |
+
|
| 425 |
+
/// Advances to the next position to load or store
|
| 426 |
+
CUTLASS_HOST_DEVICE
|
| 427 |
+
PredicatedTileIteratorAffineRankNAtomic& operator++()
|
| 428 |
+
{
|
| 429 |
+
++state_[0];
|
| 430 |
+
thread_start_row_ += ThreadMap::Shape::kRow;
|
| 431 |
+
|
| 432 |
+
if (state_[0] == ThreadMap::Count::kRow) {
|
| 433 |
+
state_[0] = 0;
|
| 434 |
+
++state_[1];
|
| 435 |
+
|
| 436 |
+
thread_start_row_ +=
|
| 437 |
+
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
|
| 438 |
+
|
| 439 |
+
if (state_[1] == ThreadMap::Count::kGroup) {
|
| 440 |
+
state_[1] = 0;
|
| 441 |
+
++state_[2];
|
| 442 |
+
|
| 443 |
+
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
|
| 444 |
+
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
|
| 445 |
+
|
| 446 |
+
if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; }
|
| 447 |
+
}
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
return *this;
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
///< Efficiently disables all accesses guarded by mask
|
| 454 |
+
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
|
| 455 |
+
|
| 456 |
+
///< Efficiently enables all accesses guarded by mask
|
| 457 |
+
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
|
| 458 |
+
|
| 459 |
+
///< Sets the mask
|
| 460 |
+
CUTLASS_DEVICE void get_mask(Mask& mask) { mask = mask_; }
|
| 461 |
+
|
| 462 |
+
///< Sets the mask
|
| 463 |
+
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
|
| 464 |
+
};
|
| 465 |
+
|
| 466 |
+
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
|
| 467 |
+
typename Element_, ///< Element data type
|
| 468 |
+
bool ScatterD = false, ///< Scatter D operand or not
|
| 469 |
+
typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not
|
| 470 |
+
bool UseCUDAStore = false>
|
| 471 |
+
class PredicatedTileIteratorAtomic {
|
| 472 |
+
public:
|
| 473 |
+
using ThreadMap = ThreadMap_;
|
| 474 |
+
using Shape = typename ThreadMap::Shape;
|
| 475 |
+
|
| 476 |
+
using Element = Element_;
|
| 477 |
+
|
| 478 |
+
using Layout = layout::RowMajor;
|
| 479 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 480 |
+
using ConstTensorRef = typename TensorRef::ConstTensorRef;
|
| 481 |
+
|
| 482 |
+
using Index = typename Layout::Index;
|
| 483 |
+
using LongIndex = typename Layout::LongIndex;
|
| 484 |
+
using TensorCoord = MatrixCoord;
|
| 485 |
+
|
| 486 |
+
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
|
| 487 |
+
static int const kThreads = ThreadMap::kThreads;
|
| 488 |
+
static int const kIterations = ThreadMap::Count::kTile;
|
| 489 |
+
|
| 490 |
+
static bool constexpr PermuteD = !layout::is_trivial_permute<PermuteDLayout>;
|
| 491 |
+
|
| 492 |
+
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
|
| 493 |
+
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
|
| 494 |
+
static_assert(ThreadMap::Iterations::kCluster > 0,
|
| 495 |
+
"ThreadMap::Iterations::kCluster must be > 0");
|
| 496 |
+
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
|
| 497 |
+
|
| 498 |
+
/// Fragment object
|
| 499 |
+
using Fragment = Array<Element,
|
| 500 |
+
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
|
| 501 |
+
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
|
| 502 |
+
ThreadMap::kElementsPerAccess>;
|
| 503 |
+
|
| 504 |
+
/// Memory access size
|
| 505 |
+
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
|
| 506 |
+
|
| 507 |
+
//
|
| 508 |
+
// Parameters struct
|
| 509 |
+
//
|
| 510 |
+
|
| 511 |
+
/// Uses a non-template class
|
| 512 |
+
struct Params : PredicatedTileIteratorParams {
|
| 513 |
+
using Base = PredicatedTileIteratorParams;
|
| 514 |
+
|
| 515 |
+
CUTLASS_HOST_DEVICE
|
| 516 |
+
Params() {}
|
| 517 |
+
|
| 518 |
+
CUTLASS_HOST_DEVICE
|
| 519 |
+
Params(Layout const& layout)
|
| 520 |
+
: PredicatedTileIteratorParams(
|
| 521 |
+
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
|
| 522 |
+
make_OutputTileThreadMapDesc<ThreadMap>())
|
| 523 |
+
{
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
CUTLASS_HOST_DEVICE
|
| 527 |
+
Params(Base const& base) : Base(base) {}
|
| 528 |
+
};
|
| 529 |
+
|
| 530 |
+
/// Mask object
|
| 531 |
+
struct Mask {
|
| 532 |
+
static int const kCount = ThreadMap::Iterations::kColumn;
|
| 533 |
+
|
| 534 |
+
/// Predicate state
|
| 535 |
+
bool predicates[kCount];
|
| 536 |
+
|
| 537 |
+
//
|
| 538 |
+
// Mask
|
| 539 |
+
//
|
| 540 |
+
CUTLASS_HOST_DEVICE
|
| 541 |
+
Mask() { enable(); }
|
| 542 |
+
|
| 543 |
+
///< Efficiently disables all accesses guarded by mask
|
| 544 |
+
CUTLASS_HOST_DEVICE void clear()
|
| 545 |
+
{
|
| 546 |
+
CUTLASS_PRAGMA_UNROLL
|
| 547 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
|
| 551 |
+
CUTLASS_DEVICE void enable()
|
| 552 |
+
{
|
| 553 |
+
CUTLASS_PRAGMA_UNROLL
|
| 554 |
+
for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
|
| 555 |
+
}
|
| 556 |
+
};
|
| 557 |
+
|
| 558 |
+
private:
|
| 559 |
+
//
|
| 560 |
+
// Data members
|
| 561 |
+
//
|
| 562 |
+
|
| 563 |
+
/// Parameters structure containing reference and precomputed state.
|
| 564 |
+
PredicatedTileIteratorParams params_;
|
| 565 |
+
|
| 566 |
+
/// Byte-level pointer. This pointer is usually for both load() and store(),
|
| 567 |
+
/// unless PermuteD is performed. When having PermuteD, byte_pointer_ is only
|
| 568 |
+
/// for load().
|
| 569 |
+
uint8_t* byte_pointer_;
|
| 570 |
+
|
| 571 |
+
/// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_
|
| 572 |
+
/// may be with different address computation compared to byte_pointer_.
|
| 573 |
+
uint8_t* store_byte_pointer_;
|
| 574 |
+
|
| 575 |
+
/// Array of boolean values to contain steady-state predicates
|
| 576 |
+
Mask mask_;
|
| 577 |
+
|
| 578 |
+
/// Extent of the matrix tile in rows
|
| 579 |
+
Index extent_row_;
|
| 580 |
+
|
| 581 |
+
/// Extent of the matrix tile in rows
|
| 582 |
+
Index extent_column_;
|
| 583 |
+
|
| 584 |
+
/// A thread's starting row position (assuming steady-state predicates have
|
| 585 |
+
/// been computed)
|
| 586 |
+
Index thread_start_row_;
|
| 587 |
+
|
| 588 |
+
/// A thread's starting column
|
| 589 |
+
Index thread_start_column_;
|
| 590 |
+
|
| 591 |
+
/// Internal state counter
|
| 592 |
+
int state_[3];
|
| 593 |
+
|
| 594 |
+
/// Scatter indices
|
| 595 |
+
int const* indices_;
|
| 596 |
+
|
| 597 |
+
/// PermuteDLayout
|
| 598 |
+
PermuteDLayout permute_layout_;
|
| 599 |
+
|
| 600 |
+
//
|
| 601 |
+
// Static asserts about internal strides
|
| 602 |
+
//
|
| 603 |
+
|
| 604 |
+
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
|
| 605 |
+
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
|
| 606 |
+
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
|
| 607 |
+
|
| 608 |
+
private:
|
| 609 |
+
//
|
| 610 |
+
// Methods
|
| 611 |
+
//
|
| 612 |
+
|
| 613 |
+
public:
|
| 614 |
+
//
|
| 615 |
+
// Methods
|
| 616 |
+
//
|
| 617 |
+
|
| 618 |
+
/// Constructor
|
| 619 |
+
CUTLASS_DEVICE
|
| 620 |
+
PredicatedTileIteratorAtomic(PredicatedTileIteratorParams const& params,
|
| 621 |
+
Element* pointer,
|
| 622 |
+
TensorCoord extent,
|
| 623 |
+
int thread_idx,
|
| 624 |
+
TensorCoord threadblock_offset = TensorCoord(),
|
| 625 |
+
int const* indices = nullptr)
|
| 626 |
+
: params_(params),
|
| 627 |
+
indices_(indices),
|
| 628 |
+
permute_layout_(PitchLinearCoord(extent.column(), extent.row()),
|
| 629 |
+
params_.stride * kElementsPerAccess / sizeof(AccessType))
|
| 630 |
+
{
|
| 631 |
+
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
|
| 632 |
+
|
| 633 |
+
extent_row_ = extent.row();
|
| 634 |
+
extent_column_ = extent.column();
|
| 635 |
+
|
| 636 |
+
thread_start_row_ = thread_offset.row();
|
| 637 |
+
thread_start_column_ = thread_offset.column();
|
| 638 |
+
|
| 639 |
+
// Initialize predicates
|
| 640 |
+
CUTLASS_PRAGMA_UNROLL
|
| 641 |
+
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
|
| 642 |
+
mask_.predicates[c] =
|
| 643 |
+
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
// Null pointer performs no accesses
|
| 647 |
+
if (!pointer) { mask_.clear(); }
|
| 648 |
+
|
| 649 |
+
if (ScatterD && !indices) { mask_.clear(); }
|
| 650 |
+
|
| 651 |
+
// Initialize byte_pointer_
|
| 652 |
+
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
|
| 653 |
+
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
|
| 654 |
+
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
|
| 655 |
+
|
| 656 |
+
if (ScatterD) {
|
| 657 |
+
byte_pointer_ =
|
| 658 |
+
reinterpret_cast<uint8_t*>(pointer) +
|
| 659 |
+
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
// store_byte_pointer_ is set to be the same with byte_pointer_ unless
|
| 663 |
+
// PermuteD is used.
|
| 664 |
+
store_byte_pointer_ = PermuteD ? reinterpret_cast<uint8_t*>(pointer) : byte_pointer_;
|
| 665 |
+
|
| 666 |
+
// Initialize internal state counter
|
| 667 |
+
state_[0] = state_[1] = state_[2] = 0;
|
| 668 |
+
}
|
| 669 |
+
|
| 670 |
+
/// Adds a pointer offset in units of Element
|
| 671 |
+
CUTLASS_HOST_DEVICE
|
| 672 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 673 |
+
{
|
| 674 |
+
store_byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
|
| 675 |
+
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
/// Stores a fragment to memory
|
| 679 |
+
CUTLASS_DEVICE
|
| 680 |
+
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
|
| 681 |
+
{
|
| 682 |
+
uint8_t* byte_pointer = store_byte_pointer_;
|
| 683 |
+
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
|
| 684 |
+
|
| 685 |
+
CUTLASS_PRAGMA_UNROLL
|
| 686 |
+
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
|
| 687 |
+
CUTLASS_PRAGMA_UNROLL
|
| 688 |
+
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
|
| 689 |
+
CUTLASS_PRAGMA_UNROLL
|
| 690 |
+
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
|
| 691 |
+
int frag_row_idx =
|
| 692 |
+
(row + ThreadMap::Iterations::kRow *
|
| 693 |
+
(group + ThreadMap::Iterations::kGroup * cluster));
|
| 694 |
+
|
| 695 |
+
int row_offset = row * ThreadMap::Delta::kRow +
|
| 696 |
+
group * ThreadMap::Delta::kGroup +
|
| 697 |
+
cluster * ThreadMap::Delta::kCluster;
|
| 698 |
+
|
| 699 |
+
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
|
| 700 |
+
|
| 701 |
+
AccessType* memory_pointer =
|
| 702 |
+
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
|
| 703 |
+
|
| 704 |
+
if (ScatterD && row_guard) {
|
| 705 |
+
assert(indices_);
|
| 706 |
+
|
| 707 |
+
memory_pointer = reinterpret_cast<AccessType*>(
|
| 708 |
+
byte_pointer + byte_offset +
|
| 709 |
+
LongIndex(indices_[row_offset + thread_start_row_]) *
|
| 710 |
+
LongIndex(params_.stride));
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
CUTLASS_PRAGMA_UNROLL
|
| 714 |
+
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
|
| 715 |
+
bool guard = row_guard && mask_.predicates[column];
|
| 716 |
+
|
| 717 |
+
if (PermuteD) {
|
| 718 |
+
int col_offset = column * ThreadMap::Delta::kColumn;
|
| 719 |
+
|
| 720 |
+
int col = col_offset + thread_start_column_;
|
| 721 |
+
int row = row_offset + thread_start_row_;
|
| 722 |
+
|
| 723 |
+
// Locate memory_pointer
|
| 724 |
+
memory_pointer = reinterpret_cast<AccessType*>(
|
| 725 |
+
byte_pointer + byte_offset +
|
| 726 |
+
permute_layout_(PitchLinearCoord(col, row)) * sizeof(AccessType) /
|
| 727 |
+
kElementsPerAccess);
|
| 728 |
+
}
|
| 729 |
+
atomic_store<AccessType>(
|
| 730 |
+
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
|
| 731 |
+
(void*)&memory_pointer[0],
|
| 732 |
+
guard);
|
| 733 |
+
|
| 734 |
+
if (!PermuteD) {
|
| 735 |
+
memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess);
|
| 736 |
+
}
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
if (row + 1 < ThreadMap::Iterations::kRow) {
|
| 740 |
+
if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_row; }
|
| 741 |
+
}
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
if (group + 1 < ThreadMap::Iterations::kGroup) {
|
| 745 |
+
byte_pointer += params_.increment_group;
|
| 746 |
+
}
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
|
| 750 |
+
byte_pointer += params_.increment_cluster;
|
| 751 |
+
}
|
| 752 |
+
}
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
/// Stores a fragment to memory
|
| 756 |
+
CUTLASS_DEVICE
|
| 757 |
+
void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
|
| 758 |
+
|
| 759 |
+
CUTLASS_DEVICE
|
| 760 |
+
void load(Fragment& frag) {}
|
| 761 |
+
|
| 762 |
+
CUTLASS_DEVICE
|
| 763 |
+
MatrixCoord thread_start() const
|
| 764 |
+
{
|
| 765 |
+
return MatrixCoord(thread_start_row_, thread_start_column_);
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
/// Need to get the thread start row from the tile iterator
|
| 769 |
+
CUTLASS_DEVICE
|
| 770 |
+
int32_t thread_start_row() const { return thread_start_row_; }
|
| 771 |
+
|
| 772 |
+
/// Need to get the thread start row from the tile iterator
|
| 773 |
+
CUTLASS_DEVICE
|
| 774 |
+
int32_t thread_start_column() const { return thread_start_column_; }
|
| 775 |
+
|
| 776 |
+
/// Extent of the matrix in rows
|
| 777 |
+
CUTLASS_DEVICE
|
| 778 |
+
Index extent_row() const { return extent_row_; }
|
| 779 |
+
|
| 780 |
+
/// Extent of the matrix in columns
|
| 781 |
+
CUTLASS_DEVICE
|
| 782 |
+
Index extent_column() const { return extent_column_; }
|
| 783 |
+
|
| 784 |
+
/// Advances to the next position to load or store
|
| 785 |
+
CUTLASS_HOST_DEVICE
|
| 786 |
+
PredicatedTileIteratorAtomic& operator++()
|
| 787 |
+
{
|
| 788 |
+
++state_[0];
|
| 789 |
+
|
| 790 |
+
if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_row; }
|
| 791 |
+
|
| 792 |
+
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
|
| 793 |
+
|
| 794 |
+
thread_start_row_ += ThreadMap::Shape::kRow;
|
| 795 |
+
|
| 796 |
+
if (state_[0] == ThreadMap::Count::kRow) {
|
| 797 |
+
state_[0] = 0;
|
| 798 |
+
++state_[1];
|
| 799 |
+
byte_pointer_ += params_.advance_group;
|
| 800 |
+
store_byte_pointer_ += params_.advance_group;
|
| 801 |
+
|
| 802 |
+
thread_start_row_ +=
|
| 803 |
+
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
|
| 804 |
+
|
| 805 |
+
if (state_[1] == ThreadMap::Count::kGroup) {
|
| 806 |
+
state_[1] = 0;
|
| 807 |
+
++state_[2];
|
| 808 |
+
byte_pointer_ += params_.advance_cluster;
|
| 809 |
+
store_byte_pointer_ += params_.advance_cluster;
|
| 810 |
+
|
| 811 |
+
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
|
| 812 |
+
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
|
| 813 |
+
|
| 814 |
+
if (state_[2] == ThreadMap::Count::kCluster) {
|
| 815 |
+
state_[2] = 0;
|
| 816 |
+
byte_pointer_ += params_.advance_tile;
|
| 817 |
+
store_byte_pointer_ += params_.advance_tile;
|
| 818 |
+
|
| 819 |
+
thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow *
|
| 820 |
+
ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile;
|
| 821 |
+
}
|
| 822 |
+
}
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
return *this;
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
/// Advances a number of positions to load or store
|
| 829 |
+
CUTLASS_HOST_DEVICE
|
| 830 |
+
PredicatedTileIteratorAtomic& operator+=(int increment)
|
| 831 |
+
{
|
| 832 |
+
// Row
|
| 833 |
+
state_[0] += increment;
|
| 834 |
+
int increment_row = state_[0] / ThreadMap::Count::kRow;
|
| 835 |
+
state_[0] = state_[0] % ThreadMap::Count::kRow;
|
| 836 |
+
|
| 837 |
+
byte_pointer_ += (params_.advance_row * increment);
|
| 838 |
+
store_byte_pointer_ += (params_.advance_row * increment);
|
| 839 |
+
thread_start_row_ += (ThreadMap::Shape::kRow * increment);
|
| 840 |
+
|
| 841 |
+
// Group
|
| 842 |
+
state_[1] += increment_row;
|
| 843 |
+
int increment_group = state_[1] / ThreadMap::Count::kGroup;
|
| 844 |
+
state_[1] = state_[1] % ThreadMap::Count::kGroup;
|
| 845 |
+
|
| 846 |
+
byte_pointer_ += (params_.advance_group * increment_row);
|
| 847 |
+
store_byte_pointer_ += (params_.advance_group * increment_row);
|
| 848 |
+
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow *
|
| 849 |
+
ThreadMap::Count::kRow * increment_row;
|
| 850 |
+
|
| 851 |
+
// Cluster
|
| 852 |
+
state_[2] += increment_group;
|
| 853 |
+
int increment_cluster = state_[2] / ThreadMap::Count::kCluster;
|
| 854 |
+
state_[2] = state_[2] % ThreadMap::Count::kCluster;
|
| 855 |
+
|
| 856 |
+
byte_pointer_ += (params_.advance_cluster * increment_group);
|
| 857 |
+
store_byte_pointer_ += (params_.advance_cluster * increment_group);
|
| 858 |
+
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
|
| 859 |
+
ThreadMap::Count::kRow * ThreadMap::Shape::kRow * increment_group;
|
| 860 |
+
|
| 861 |
+
// Tile
|
| 862 |
+
byte_pointer_ += (params_.advance_tile * increment_cluster);
|
| 863 |
+
store_byte_pointer_ += (params_.advance_tile * increment_cluster);
|
| 864 |
+
thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow *
|
| 865 |
+
ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile *
|
| 866 |
+
increment_cluster;
|
| 867 |
+
|
| 868 |
+
return *this;
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
///< Efficiently disables all accesses guarded by mask
|
| 872 |
+
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
|
| 873 |
+
|
| 874 |
+
///< Efficiently enables all accesses guarded by mask
|
| 875 |
+
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
|
| 876 |
+
|
| 877 |
+
///< Sets the mask
|
| 878 |
+
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
|
| 879 |
+
|
| 880 |
+
///< Sets the mask
|
| 881 |
+
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
|
| 882 |
+
};
|
| 883 |
+
|
| 884 |
+
} // namespace threadblock
|
| 885 |
+
} // namespace epilogue
|
| 886 |
+
} // namespace cutlass
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h
ADDED
|
@@ -0,0 +1,1938 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Templates implementing loading of tiles from pitch-linear rank=2
|
| 40 |
+
tensors.
|
| 41 |
+
|
| 42 |
+
This iterator uses masks to guard out-of-bounds accesses. The first tile
|
| 43 |
+
this iterator visits maybe partial, then the remaining tiles are complete.
|
| 44 |
+
So, we only need to compute the predicates twice, once before the first tile
|
| 45 |
+
and once for the remaining full tiles which can share the same predicates.
|
| 46 |
+
|
| 47 |
+
A precomputed "Params" object minimizes the amount of state that must be
|
| 48 |
+
stored in registers, and integer addition is used to advance the pointer
|
| 49 |
+
through memory.
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
#pragma once
|
| 53 |
+
|
| 54 |
+
#include "cutlass/arch/memory.h"
|
| 55 |
+
#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
|
| 56 |
+
|
| 57 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 58 |
+
|
| 59 |
+
namespace cutlass {
|
| 60 |
+
namespace transform {
|
| 61 |
+
namespace threadblock {
|
| 62 |
+
|
| 63 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 64 |
+
|
| 65 |
+
/// PredicatedTileIteratorResidualLast
|
| 66 |
+
///
|
| 67 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 68 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 69 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 70 |
+
/// MaskedTileIteratorConcept
|
| 71 |
+
///
|
| 72 |
+
/// Regular tile iterator using a precomputed control structure to minimize
|
| 73 |
+
/// register liveness and integer arithmetic.
|
| 74 |
+
///
|
| 75 |
+
/// Layout is assumed to be invariant at the time the precomputed "Params"
|
| 76 |
+
/// object is constructed.
|
| 77 |
+
///
|
| 78 |
+
/// Base pointer and tensor extents may be specified at the time the iterator is
|
| 79 |
+
/// constructed. Subsequently, they are assumed to be immutable.
|
| 80 |
+
///
|
| 81 |
+
/// Adding a logical coordinate offset may be performed at the time the iterator
|
| 82 |
+
/// is constructed. Subsequent additions to logical coordinate offset may be
|
| 83 |
+
/// performed but are relatively expensive.
|
| 84 |
+
///
|
| 85 |
+
/// Visitation order is intended to first visit a "residual" tile that may be
|
| 86 |
+
/// partially full in both the advance dimension and the steady-state dimension.
|
| 87 |
+
/// This is assumed to be the last tile in the iteration sequence. Advancing an
|
| 88 |
+
/// iterator that has just been constructed moves to the first tile that is full
|
| 89 |
+
/// in the advance dimension and recomputes predicates. Subsequent accesses may
|
| 90 |
+
/// be performed without updating internal predicates and are efficient in terms
|
| 91 |
+
/// of live register state and pointer arithmetic instructions.
|
| 92 |
+
///
|
| 93 |
+
/// To be efficient, this assumes the iterator will be dereferenced and advanced
|
| 94 |
+
/// at least once outside any looping structure to minimize integer arithmetic.
|
| 95 |
+
///
|
| 96 |
+
/// Accesses out of bounds are safe so long as `clear_mask()` is called prior to
|
| 97 |
+
/// dereferencing the iterator.
|
| 98 |
+
///
|
| 99 |
+
///
|
| 100 |
+
/// Example:
|
| 101 |
+
///
|
| 102 |
+
/// An efficient pipeline structure may be constructed as follows:
|
| 103 |
+
///
|
| 104 |
+
// template <typename Iterator>
|
| 105 |
+
// __global__ void kernel(
|
| 106 |
+
// typename Iterator::Params params,
|
| 107 |
+
// typename Iterator::Element *ptr,
|
| 108 |
+
// TensorCoord extent) {
|
| 109 |
+
//
|
| 110 |
+
// typename Iterator::Fragment fragment;
|
| 111 |
+
//
|
| 112 |
+
// TensorCoord threadblock_offset(0, 0);
|
| 113 |
+
//
|
| 114 |
+
// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
|
| 115 |
+
//
|
| 116 |
+
//
|
| 117 |
+
// fragment = *iter; // load "residue" tile first
|
| 118 |
+
// ++iter; // advance to first "steady state" tile and update
|
| 119 |
+
// internal masks
|
| 120 |
+
//
|
| 121 |
+
//
|
| 122 |
+
// #pragma unroll
|
| 123 |
+
// for (int i = Remaining - 1; i >= 0; --i) {
|
| 124 |
+
//
|
| 125 |
+
// f(fragment);
|
| 126 |
+
//
|
| 127 |
+
// if (!i) {
|
| 128 |
+
// iter.clear_mask(); // light-weight operation to clear masks -
|
| 129 |
+
// subsequent loads become NO-OPs.
|
| 130 |
+
// }
|
| 131 |
+
//
|
| 132 |
+
// fragment = *iter; // load tile during "steady state" phase
|
| 133 |
+
// ++iter; // advance to next tile - lightweight due to
|
| 134 |
+
// steady-state masks
|
| 135 |
+
// }
|
| 136 |
+
// }
|
| 137 |
+
//
|
| 138 |
+
// void host(TensorView<Element, 2, layout::PitchLinear> view) {
|
| 139 |
+
//
|
| 140 |
+
// using Iterator =
|
| 141 |
+
// transform::threadblock::PredicatedTileIteratorResidualLast;
|
| 142 |
+
//
|
| 143 |
+
// typename Iterator::Params params(view.layout());
|
| 144 |
+
//
|
| 145 |
+
// kernel<Iterator>(params, view.data());
|
| 146 |
+
// }
|
| 147 |
+
///
|
| 148 |
+
///
|
| 149 |
+
template <typename Shape,
|
| 150 |
+
typename Element,
|
| 151 |
+
typename Layout,
|
| 152 |
+
int AdvanceRank,
|
| 153 |
+
typename ThreadMap,
|
| 154 |
+
int AccessSize = ThreadMap::kElementsPerAccess,
|
| 155 |
+
bool Gather = false>
|
| 156 |
+
class PredicatedTileIteratorResidualLast;
|
| 157 |
+
|
| 158 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 159 |
+
|
| 160 |
+
/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
|
| 161 |
+
///
|
| 162 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 163 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 164 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 165 |
+
/// MaskedTileIteratorConcept
|
| 166 |
+
///
|
| 167 |
+
template <typename Shape_,
|
| 168 |
+
typename Element_,
|
| 169 |
+
int AdvanceRank,
|
| 170 |
+
typename ThreadMap_,
|
| 171 |
+
int AccessSize,
|
| 172 |
+
bool Gather>
|
| 173 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 174 |
+
Element_,
|
| 175 |
+
layout::PitchLinear,
|
| 176 |
+
AdvanceRank,
|
| 177 |
+
ThreadMap_,
|
| 178 |
+
AccessSize,
|
| 179 |
+
Gather> {
|
| 180 |
+
public:
|
| 181 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 182 |
+
"Specialization for pitch-linear iterator may advance along the "
|
| 183 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 184 |
+
|
| 185 |
+
using Shape = Shape_;
|
| 186 |
+
using Element = Element_;
|
| 187 |
+
using Layout = layout::PitchLinear;
|
| 188 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 189 |
+
using ThreadMap = ThreadMap_;
|
| 190 |
+
|
| 191 |
+
using Index = typename Layout::Index;
|
| 192 |
+
using LongIndex = typename Layout::LongIndex;
|
| 193 |
+
|
| 194 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 195 |
+
using TensorView = TensorView<Element, Layout>;
|
| 196 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 197 |
+
|
| 198 |
+
using Pointer = Element*;
|
| 199 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 200 |
+
|
| 201 |
+
/// Type used for internal memory accesses
|
| 202 |
+
using AccessType =
|
| 203 |
+
AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
|
| 204 |
+
|
| 205 |
+
/// Underlying iterator to compute the addresses
|
| 206 |
+
using TileAccessIterator = PredicatedTileAccessIteratorResidualLast<Shape,
|
| 207 |
+
Element,
|
| 208 |
+
Layout,
|
| 209 |
+
kAdvanceRank,
|
| 210 |
+
ThreadMap,
|
| 211 |
+
AccessType,
|
| 212 |
+
Gather>;
|
| 213 |
+
|
| 214 |
+
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
|
| 215 |
+
|
| 216 |
+
/// Fragment object to be loaded or stored
|
| 217 |
+
using Fragment =
|
| 218 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 219 |
+
|
| 220 |
+
/// Predicate vector stores mask to guard accesses
|
| 221 |
+
using Mask = typename TileAccessIterator::Mask;
|
| 222 |
+
|
| 223 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 224 |
+
class Params {
|
| 225 |
+
public:
|
| 226 |
+
using Base = typename TileAccessIterator::Params::Base;
|
| 227 |
+
|
| 228 |
+
friend PredicatedTileIteratorResidualLast;
|
| 229 |
+
|
| 230 |
+
private:
|
| 231 |
+
/// Parameters object
|
| 232 |
+
typename TileAccessIterator::Params params_;
|
| 233 |
+
|
| 234 |
+
public:
|
| 235 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 236 |
+
CUTLASS_HOST_DEVICE
|
| 237 |
+
Params(Layout const& layout) : params_(layout) {}
|
| 238 |
+
|
| 239 |
+
CUTLASS_HOST_DEVICE
|
| 240 |
+
Params() {}
|
| 241 |
+
|
| 242 |
+
CUTLASS_HOST_DEVICE
|
| 243 |
+
Params(Base const& base) : params_(base) {}
|
| 244 |
+
};
|
| 245 |
+
|
| 246 |
+
private:
|
| 247 |
+
/// Internal pointer type permits fast address arithmetic
|
| 248 |
+
using BytePointer = char*;
|
| 249 |
+
|
| 250 |
+
private:
|
| 251 |
+
//
|
| 252 |
+
// Data members
|
| 253 |
+
//
|
| 254 |
+
|
| 255 |
+
/// Data member to the tile access iterator
|
| 256 |
+
TileAccessIterator address_iterator_;
|
| 257 |
+
|
| 258 |
+
public:
|
| 259 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 260 |
+
/// and thread ID
|
| 261 |
+
CUTLASS_HOST_DEVICE
|
| 262 |
+
PredicatedTileIteratorResidualLast(
|
| 263 |
+
/// Precomputed parameters object
|
| 264 |
+
Params const& params,
|
| 265 |
+
/// Pointer to start of tensor
|
| 266 |
+
Pointer pointer,
|
| 267 |
+
/// Extent of tensor
|
| 268 |
+
TensorCoord extent,
|
| 269 |
+
/// ID of each participating thread
|
| 270 |
+
int thread_id,
|
| 271 |
+
/// Initial offset of threadblock
|
| 272 |
+
TensorCoord const& threadblock_offset,
|
| 273 |
+
/// Gather indices
|
| 274 |
+
int const* indices = nullptr)
|
| 275 |
+
: address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset, indices)
|
| 276 |
+
{
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 280 |
+
/// offset
|
| 281 |
+
CUTLASS_HOST_DEVICE
|
| 282 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 283 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 284 |
+
TensorCoord extent, ///< Extent of tensor
|
| 285 |
+
int thread_id ///< ID of each participating thread
|
| 286 |
+
)
|
| 287 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 288 |
+
{
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
/// Adds a pointer offset in units of Element
|
| 292 |
+
CUTLASS_HOST_DEVICE
|
| 293 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 294 |
+
{
|
| 295 |
+
address_iterator_.add_pointer_offset(pointer_offset);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
/// Advances to the next tile in memory.
|
| 299 |
+
///
|
| 300 |
+
/// The first time this method is called, predicates are updated, and the
|
| 301 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 302 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 303 |
+
/// pointer.
|
| 304 |
+
CUTLASS_HOST_DEVICE
|
| 305 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 306 |
+
{
|
| 307 |
+
if (kAdvanceRank)
|
| 308 |
+
address_iterator_.add_tile_offset({0, 1});
|
| 309 |
+
else
|
| 310 |
+
address_iterator_.add_tile_offset({1, 0});
|
| 311 |
+
|
| 312 |
+
return *this;
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
/// Advances to the next tile in memory.
|
| 316 |
+
///
|
| 317 |
+
/// The first time this method is called, predicates are updated, and the
|
| 318 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 319 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 320 |
+
/// pointer.
|
| 321 |
+
CUTLASS_HOST_DEVICE
|
| 322 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 323 |
+
{
|
| 324 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 325 |
+
operator++();
|
| 326 |
+
return self;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
/// Clears the predicate set efficiently
|
| 330 |
+
CUTLASS_HOST_DEVICE
|
| 331 |
+
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
|
| 332 |
+
|
| 333 |
+
CUTLASS_HOST_DEVICE
|
| 334 |
+
void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); }
|
| 335 |
+
|
| 336 |
+
/// Clears the predicate set efficiently
|
| 337 |
+
CUTLASS_HOST_DEVICE
|
| 338 |
+
void enable_mask() { address_iterator_.enable_mask(); }
|
| 339 |
+
|
| 340 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 341 |
+
CUTLASS_HOST_DEVICE
|
| 342 |
+
void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); }
|
| 343 |
+
|
| 344 |
+
/// Gets the mask
|
| 345 |
+
CUTLASS_HOST_DEVICE
|
| 346 |
+
void get_mask(Mask& mask) { address_iterator_.get_mask(mask); }
|
| 347 |
+
|
| 348 |
+
CUTLASS_DEVICE
|
| 349 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 350 |
+
{
|
| 351 |
+
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
CUTLASS_DEVICE
|
| 355 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 356 |
+
{
|
| 357 |
+
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 358 |
+
|
| 359 |
+
CUTLASS_PRAGMA_UNROLL
|
| 360 |
+
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
|
| 361 |
+
CUTLASS_PRAGMA_UNROLL
|
| 362 |
+
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
|
| 363 |
+
CUTLASS_PRAGMA_UNROLL
|
| 364 |
+
for (int v = 0; v < kAccessesPerVector; ++v) {
|
| 365 |
+
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
|
| 366 |
+
|
| 367 |
+
address_iterator_.set_iteration_index(idx);
|
| 368 |
+
char const* byte_ptr =
|
| 369 |
+
reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset;
|
| 370 |
+
|
| 371 |
+
AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr);
|
| 372 |
+
|
| 373 |
+
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
|
| 374 |
+
frag_ptr[idx], access_ptr, address_iterator_.valid());
|
| 375 |
+
|
| 376 |
+
++address_iterator_;
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
/// Loads a fragment from memory
|
| 383 |
+
CUTLASS_DEVICE
|
| 384 |
+
void load(Fragment& frag) { load_with_byte_offset(frag, 0); }
|
| 385 |
+
|
| 386 |
+
/// Store a fragment to memory
|
| 387 |
+
CUTLASS_DEVICE
|
| 388 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 389 |
+
{
|
| 390 |
+
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
/// Store a fragment to memory
|
| 394 |
+
CUTLASS_DEVICE
|
| 395 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 396 |
+
{
|
| 397 |
+
address_iterator_.set_iteration_index(0);
|
| 398 |
+
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
|
| 399 |
+
|
| 400 |
+
CUTLASS_PRAGMA_UNROLL
|
| 401 |
+
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
|
| 402 |
+
CUTLASS_PRAGMA_UNROLL
|
| 403 |
+
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
|
| 404 |
+
CUTLASS_PRAGMA_UNROLL
|
| 405 |
+
for (int v = 0; v < kAccessesPerVector; ++v) {
|
| 406 |
+
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
|
| 407 |
+
|
| 408 |
+
char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset;
|
| 409 |
+
AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr);
|
| 410 |
+
|
| 411 |
+
if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; }
|
| 412 |
+
++address_iterator_;
|
| 413 |
+
}
|
| 414 |
+
}
|
| 415 |
+
}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
/// Store a fragment to memory
|
| 419 |
+
CUTLASS_DEVICE
|
| 420 |
+
void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
|
| 421 |
+
};
|
| 422 |
+
|
| 423 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 424 |
+
|
| 425 |
+
/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
|
| 426 |
+
///
|
| 427 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 428 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 429 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 430 |
+
/// MaskedTileIteratorConcept
|
| 431 |
+
///
|
| 432 |
+
template <typename Shape_,
|
| 433 |
+
typename Element_,
|
| 434 |
+
int AdvanceRank,
|
| 435 |
+
typename ThreadMap_,
|
| 436 |
+
int AccessSize,
|
| 437 |
+
bool Gather>
|
| 438 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 439 |
+
Element_,
|
| 440 |
+
layout::ColumnMajor,
|
| 441 |
+
AdvanceRank,
|
| 442 |
+
ThreadMap_,
|
| 443 |
+
AccessSize,
|
| 444 |
+
Gather> {
|
| 445 |
+
public:
|
| 446 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 447 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 448 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 449 |
+
|
| 450 |
+
using Shape = Shape_;
|
| 451 |
+
using Element = Element_;
|
| 452 |
+
using Layout = layout::ColumnMajor;
|
| 453 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 454 |
+
using ThreadMap = ThreadMap_;
|
| 455 |
+
|
| 456 |
+
using Index = typename Layout::Index;
|
| 457 |
+
using LongIndex = typename Layout::LongIndex;
|
| 458 |
+
|
| 459 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 460 |
+
using TensorView = TensorView<Element, Layout>;
|
| 461 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 462 |
+
|
| 463 |
+
using Pointer = Element*;
|
| 464 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 465 |
+
|
| 466 |
+
using UnderlyingIterator =
|
| 467 |
+
PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
|
| 468 |
+
Element,
|
| 469 |
+
layout::PitchLinear,
|
| 470 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 471 |
+
ThreadMap,
|
| 472 |
+
AccessSize,
|
| 473 |
+
Gather>;
|
| 474 |
+
|
| 475 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 476 |
+
|
| 477 |
+
/// Fragment object to be loaded or stored
|
| 478 |
+
using Fragment =
|
| 479 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 480 |
+
|
| 481 |
+
/// Predicate vector stores mask to guard accesses
|
| 482 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 483 |
+
|
| 484 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 485 |
+
class Params {
|
| 486 |
+
private:
|
| 487 |
+
friend PredicatedTileIteratorResidualLast;
|
| 488 |
+
|
| 489 |
+
/// Parameters object
|
| 490 |
+
typename UnderlyingIterator::Params params_;
|
| 491 |
+
|
| 492 |
+
public:
|
| 493 |
+
CUTLASS_HOST_DEVICE
|
| 494 |
+
Params() {}
|
| 495 |
+
|
| 496 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 497 |
+
CUTLASS_HOST_DEVICE
|
| 498 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 499 |
+
|
| 500 |
+
CUTLASS_HOST_DEVICE
|
| 501 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 502 |
+
};
|
| 503 |
+
|
| 504 |
+
private:
|
| 505 |
+
//
|
| 506 |
+
// Data members
|
| 507 |
+
//
|
| 508 |
+
|
| 509 |
+
/// Underlying pitch-linear tile iterator
|
| 510 |
+
UnderlyingIterator iterator_;
|
| 511 |
+
|
| 512 |
+
public:
|
| 513 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 514 |
+
/// and thread ID
|
| 515 |
+
CUTLASS_HOST_DEVICE
|
| 516 |
+
PredicatedTileIteratorResidualLast(
|
| 517 |
+
Params const& params, ///< Precomputed parameters object
|
| 518 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 519 |
+
TensorCoord extent, ///< Extent of tensor
|
| 520 |
+
int thread_id, ///< ID of each participating thread
|
| 521 |
+
TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
|
| 522 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 523 |
+
///< gather/scatter at this specialization
|
| 524 |
+
)
|
| 525 |
+
: iterator_(params.params_,
|
| 526 |
+
pointer,
|
| 527 |
+
layout::PitchLinearCoord(extent.row(), extent.column()),
|
| 528 |
+
thread_id,
|
| 529 |
+
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()),
|
| 530 |
+
indices)
|
| 531 |
+
{
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 535 |
+
/// offset
|
| 536 |
+
CUTLASS_HOST_DEVICE
|
| 537 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 538 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 539 |
+
TensorCoord extent, ///< Extent of tensor
|
| 540 |
+
int thread_id ///< ID of each participating thread
|
| 541 |
+
)
|
| 542 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 543 |
+
{
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
/// Adds a pointer offset in units of Element
|
| 547 |
+
CUTLASS_HOST_DEVICE
|
| 548 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 549 |
+
{
|
| 550 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
/// Advances to the next tile in memory.
|
| 554 |
+
///
|
| 555 |
+
/// The first time this method is called, predicates are updated, and the
|
| 556 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 557 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 558 |
+
/// pointer.
|
| 559 |
+
CUTLASS_HOST_DEVICE
|
| 560 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 561 |
+
{
|
| 562 |
+
++iterator_;
|
| 563 |
+
return *this;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
/// Advances to the next tile in memory.
|
| 567 |
+
///
|
| 568 |
+
/// The first time this method is called, predicates are updated, and the
|
| 569 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 570 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 571 |
+
/// pointer.
|
| 572 |
+
CUTLASS_HOST_DEVICE
|
| 573 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 574 |
+
{
|
| 575 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 576 |
+
operator++();
|
| 577 |
+
return self;
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
/// Clears the predicate set efficiently
|
| 581 |
+
CUTLASS_HOST_DEVICE
|
| 582 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 583 |
+
|
| 584 |
+
CUTLASS_HOST_DEVICE
|
| 585 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 586 |
+
|
| 587 |
+
/// Clears the predicate set efficiently
|
| 588 |
+
CUTLASS_HOST_DEVICE
|
| 589 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 590 |
+
|
| 591 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 592 |
+
CUTLASS_HOST_DEVICE
|
| 593 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 594 |
+
|
| 595 |
+
/// Gets the mask
|
| 596 |
+
CUTLASS_HOST_DEVICE
|
| 597 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 598 |
+
|
| 599 |
+
/// Loads a fragment from memory
|
| 600 |
+
CUTLASS_DEVICE
|
| 601 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 602 |
+
{
|
| 603 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
/// Loads a fragment from memory
|
| 607 |
+
CUTLASS_DEVICE
|
| 608 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 609 |
+
{
|
| 610 |
+
iterator_.load_with_byte_offset(frag, byte_offset);
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
/// Loads a fragment from memory
|
| 614 |
+
CUTLASS_DEVICE
|
| 615 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 616 |
+
|
| 617 |
+
/// Store a fragment to memory
|
| 618 |
+
CUTLASS_DEVICE
|
| 619 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 620 |
+
{
|
| 621 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
/// Store a fragment to memory
|
| 625 |
+
CUTLASS_DEVICE
|
| 626 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 627 |
+
{
|
| 628 |
+
iterator_.store_with_byte_offset(frag, byte_offset);
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
/// Store a fragment to memory
|
| 632 |
+
CUTLASS_DEVICE
|
| 633 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 634 |
+
};
|
| 635 |
+
|
| 636 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 637 |
+
|
| 638 |
+
/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
|
| 639 |
+
///
|
| 640 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 641 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 642 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 643 |
+
/// MaskedTileIteratorConcept
|
| 644 |
+
///
|
| 645 |
+
template <typename Shape_,
|
| 646 |
+
typename Element_,
|
| 647 |
+
int AdvanceRank,
|
| 648 |
+
typename ThreadMap_,
|
| 649 |
+
int AccessSize,
|
| 650 |
+
bool Gather>
|
| 651 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 652 |
+
Element_,
|
| 653 |
+
layout::RowMajor,
|
| 654 |
+
AdvanceRank,
|
| 655 |
+
ThreadMap_,
|
| 656 |
+
AccessSize,
|
| 657 |
+
Gather> {
|
| 658 |
+
public:
|
| 659 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 660 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 661 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 662 |
+
|
| 663 |
+
using Shape = Shape_;
|
| 664 |
+
using Element = Element_;
|
| 665 |
+
using Layout = layout::RowMajor;
|
| 666 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 667 |
+
using ThreadMap = ThreadMap_;
|
| 668 |
+
|
| 669 |
+
using Index = typename Layout::Index;
|
| 670 |
+
using LongIndex = typename Layout::LongIndex;
|
| 671 |
+
|
| 672 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 673 |
+
using TensorView = TensorView<Element, Layout>;
|
| 674 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 675 |
+
|
| 676 |
+
using Pointer = Element*;
|
| 677 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 678 |
+
|
| 679 |
+
using UnderlyingIterator =
|
| 680 |
+
PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
|
| 681 |
+
Element,
|
| 682 |
+
layout::PitchLinear,
|
| 683 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 684 |
+
ThreadMap,
|
| 685 |
+
AccessSize,
|
| 686 |
+
Gather>;
|
| 687 |
+
|
| 688 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 689 |
+
|
| 690 |
+
/// Fragment object to be loaded or stored
|
| 691 |
+
using Fragment =
|
| 692 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 693 |
+
|
| 694 |
+
/// Predicate vector stores mask to guard accesses
|
| 695 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 696 |
+
|
| 697 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 698 |
+
class Params {
|
| 699 |
+
private:
|
| 700 |
+
friend PredicatedTileIteratorResidualLast;
|
| 701 |
+
|
| 702 |
+
/// Parameters object
|
| 703 |
+
typename UnderlyingIterator::Params params_;
|
| 704 |
+
|
| 705 |
+
public:
|
| 706 |
+
CUTLASS_HOST_DEVICE
|
| 707 |
+
Params() {}
|
| 708 |
+
|
| 709 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 710 |
+
CUTLASS_HOST_DEVICE
|
| 711 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 712 |
+
|
| 713 |
+
CUTLASS_HOST_DEVICE
|
| 714 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 715 |
+
};
|
| 716 |
+
|
| 717 |
+
private:
|
| 718 |
+
//
|
| 719 |
+
// Data members
|
| 720 |
+
//
|
| 721 |
+
|
| 722 |
+
/// Underlying pitch-linear tile iterator
|
| 723 |
+
UnderlyingIterator iterator_;
|
| 724 |
+
|
| 725 |
+
public:
|
| 726 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 727 |
+
/// and thread ID
|
| 728 |
+
CUTLASS_HOST_DEVICE
|
| 729 |
+
PredicatedTileIteratorResidualLast(
|
| 730 |
+
Params const& params, ///< Precomputed parameters object
|
| 731 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 732 |
+
TensorCoord extent, ///< Extent of tensor
|
| 733 |
+
int thread_id, ///< ID of each participating thread
|
| 734 |
+
TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
|
| 735 |
+
int const* indices = nullptr ///< Gather indices
|
| 736 |
+
)
|
| 737 |
+
: iterator_(params.params_,
|
| 738 |
+
pointer,
|
| 739 |
+
layout::PitchLinearCoord(extent.column(), extent.row()),
|
| 740 |
+
thread_id,
|
| 741 |
+
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()),
|
| 742 |
+
indices)
|
| 743 |
+
{
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 747 |
+
/// offset
|
| 748 |
+
CUTLASS_HOST_DEVICE
|
| 749 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 750 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 751 |
+
TensorCoord extent, ///< Extent of tensor
|
| 752 |
+
int thread_id ///< ID of each participating thread
|
| 753 |
+
)
|
| 754 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 755 |
+
{
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
/// Adds a pointer offset in units of Element
|
| 759 |
+
CUTLASS_HOST_DEVICE
|
| 760 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 761 |
+
{
|
| 762 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
/// Advances to the next tile in memory.
|
| 766 |
+
///
|
| 767 |
+
/// The first time this method is called, predicates are updated, and the
|
| 768 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 769 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 770 |
+
/// pointer.
|
| 771 |
+
CUTLASS_HOST_DEVICE
|
| 772 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 773 |
+
{
|
| 774 |
+
++iterator_;
|
| 775 |
+
return *this;
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
/// Advances to the next tile in memory.
|
| 779 |
+
///
|
| 780 |
+
/// The first time this method is called, predicates are updated, and the
|
| 781 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 782 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 783 |
+
/// pointer.
|
| 784 |
+
CUTLASS_HOST_DEVICE
|
| 785 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 786 |
+
{
|
| 787 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 788 |
+
operator++();
|
| 789 |
+
return self;
|
| 790 |
+
}
|
| 791 |
+
|
| 792 |
+
/// Clears the predicate set efficiently
|
| 793 |
+
CUTLASS_HOST_DEVICE
|
| 794 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 795 |
+
|
| 796 |
+
CUTLASS_HOST_DEVICE
|
| 797 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 798 |
+
|
| 799 |
+
/// Clears the predicate set efficiently
|
| 800 |
+
CUTLASS_HOST_DEVICE
|
| 801 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 802 |
+
|
| 803 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 804 |
+
CUTLASS_HOST_DEVICE
|
| 805 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 806 |
+
|
| 807 |
+
/// Gets the mask
|
| 808 |
+
CUTLASS_HOST_DEVICE
|
| 809 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 810 |
+
|
| 811 |
+
/// Loads a fragment from memory
|
| 812 |
+
CUTLASS_DEVICE
|
| 813 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 814 |
+
{
|
| 815 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
/// Loads a fragment from memory
|
| 819 |
+
CUTLASS_DEVICE
|
| 820 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 821 |
+
{
|
| 822 |
+
iterator_.load_with_byte_offset(frag, byte_offset);
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
/// Loads a fragment from memory
|
| 826 |
+
CUTLASS_DEVICE
|
| 827 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 828 |
+
|
| 829 |
+
/// Store a fragment to memory
|
| 830 |
+
CUTLASS_DEVICE
|
| 831 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 832 |
+
{
|
| 833 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
/// Store a fragment to memory
|
| 837 |
+
CUTLASS_DEVICE
|
| 838 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 839 |
+
{
|
| 840 |
+
iterator_.store_with_byte_offset(frag, byte_offset);
|
| 841 |
+
}
|
| 842 |
+
|
| 843 |
+
/// Store a fragment to memory
|
| 844 |
+
CUTLASS_DEVICE
|
| 845 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 846 |
+
};
|
| 847 |
+
|
| 848 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 849 |
+
|
| 850 |
+
/// Specialization of PredicatedTileIteratorResidualLast for affine rank-2 data.
|
| 851 |
+
///
|
| 852 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 853 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 854 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 855 |
+
/// MaskedTileIteratorConcept
|
| 856 |
+
///
|
| 857 |
+
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
|
| 858 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 859 |
+
Element_,
|
| 860 |
+
layout::AffineRankN<2>,
|
| 861 |
+
AdvanceRank,
|
| 862 |
+
ThreadMap_,
|
| 863 |
+
AccessSize,
|
| 864 |
+
false> {
|
| 865 |
+
public:
|
| 866 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 867 |
+
"Specialization for pitch-linear iterator may advance along the "
|
| 868 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 869 |
+
|
| 870 |
+
using Shape = Shape_;
|
| 871 |
+
using Element = Element_;
|
| 872 |
+
using Layout = layout::AffineRankN<2>;
|
| 873 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 874 |
+
using ThreadMap = ThreadMap_;
|
| 875 |
+
|
| 876 |
+
using Index = typename Layout::Index;
|
| 877 |
+
using LongIndex = typename Layout::LongIndex;
|
| 878 |
+
|
| 879 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 880 |
+
using TensorView = TensorView<Element, Layout>;
|
| 881 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 882 |
+
|
| 883 |
+
using Pointer = Element*;
|
| 884 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 885 |
+
|
| 886 |
+
/// Type used for internal memory accesses
|
| 887 |
+
using AccessType =
|
| 888 |
+
AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
|
| 889 |
+
|
| 890 |
+
/// Underlying iterator to compute the addresses
|
| 891 |
+
using TileAccessIterator = PredicatedTileAccessIteratorResidualLast<Shape,
|
| 892 |
+
Element,
|
| 893 |
+
Layout,
|
| 894 |
+
kAdvanceRank,
|
| 895 |
+
ThreadMap,
|
| 896 |
+
AccessType>;
|
| 897 |
+
|
| 898 |
+
static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
|
| 899 |
+
|
| 900 |
+
/// Fragment object to be loaded or stored
|
| 901 |
+
using Fragment =
|
| 902 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 903 |
+
|
| 904 |
+
/// Predicate vector stores mask to guard accesses
|
| 905 |
+
using Mask = typename TileAccessIterator::Mask;
|
| 906 |
+
|
| 907 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 908 |
+
class Params {
|
| 909 |
+
public:
|
| 910 |
+
friend PredicatedTileIteratorResidualLast;
|
| 911 |
+
|
| 912 |
+
private:
|
| 913 |
+
/// Parameters object
|
| 914 |
+
typename TileAccessIterator::Params params_;
|
| 915 |
+
|
| 916 |
+
public:
|
| 917 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 918 |
+
CUTLASS_HOST_DEVICE
|
| 919 |
+
Params(Layout const& layout) : params_(layout) {}
|
| 920 |
+
|
| 921 |
+
CUTLASS_HOST_DEVICE
|
| 922 |
+
Params() {}
|
| 923 |
+
};
|
| 924 |
+
|
| 925 |
+
private:
|
| 926 |
+
/// Internal pointer type permits fast address arithmetic
|
| 927 |
+
using BytePointer = char*;
|
| 928 |
+
|
| 929 |
+
private:
|
| 930 |
+
//
|
| 931 |
+
// Data members
|
| 932 |
+
//
|
| 933 |
+
|
| 934 |
+
/// Data member to the tile access iterator
|
| 935 |
+
TileAccessIterator address_iterator_;
|
| 936 |
+
|
| 937 |
+
public:
|
| 938 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 939 |
+
/// and thread ID
|
| 940 |
+
CUTLASS_HOST_DEVICE
|
| 941 |
+
PredicatedTileIteratorResidualLast(
|
| 942 |
+
/// Precomputed parameters object
|
| 943 |
+
Params const& params,
|
| 944 |
+
/// Pointer to start of tensor
|
| 945 |
+
Pointer pointer,
|
| 946 |
+
/// Extent of tensor
|
| 947 |
+
TensorCoord extent,
|
| 948 |
+
/// ID of each participating thread
|
| 949 |
+
int thread_id,
|
| 950 |
+
/// Initial offset of threadblock
|
| 951 |
+
TensorCoord const& threadblock_offset,
|
| 952 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 953 |
+
///< gather/scatter at this specialization
|
| 954 |
+
)
|
| 955 |
+
: address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset)
|
| 956 |
+
{
|
| 957 |
+
}
|
| 958 |
+
|
| 959 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 960 |
+
/// offset
|
| 961 |
+
CUTLASS_HOST_DEVICE
|
| 962 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 963 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 964 |
+
TensorCoord extent, ///< Extent of tensor
|
| 965 |
+
int thread_id ///< ID of each participating thread
|
| 966 |
+
)
|
| 967 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 968 |
+
{
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
/// Adds a pointer offset in units of Element
|
| 972 |
+
CUTLASS_HOST_DEVICE
|
| 973 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 974 |
+
{
|
| 975 |
+
address_iterator_.add_pointer_offset(pointer_offset);
|
| 976 |
+
}
|
| 977 |
+
|
| 978 |
+
/// Advances to the next tile in memory.
|
| 979 |
+
///
|
| 980 |
+
/// The first time this method is called, predicates are updated, and the
|
| 981 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 982 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 983 |
+
/// pointer.
|
| 984 |
+
CUTLASS_HOST_DEVICE
|
| 985 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 986 |
+
{
|
| 987 |
+
if (kAdvanceRank)
|
| 988 |
+
address_iterator_.add_tile_offset(make_Coord(0, 1));
|
| 989 |
+
else
|
| 990 |
+
address_iterator_.add_tile_offset(make_Coord(1, 0));
|
| 991 |
+
|
| 992 |
+
return *this;
|
| 993 |
+
}
|
| 994 |
+
|
| 995 |
+
/// Advances to the next tile in memory.
|
| 996 |
+
///
|
| 997 |
+
/// The first time this method is called, predicates are updated, and the
|
| 998 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 999 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1000 |
+
/// pointer.
|
| 1001 |
+
CUTLASS_HOST_DEVICE
|
| 1002 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 1003 |
+
{
|
| 1004 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 1005 |
+
operator++();
|
| 1006 |
+
return self;
|
| 1007 |
+
}
|
| 1008 |
+
|
| 1009 |
+
/// Clears the predicate set efficiently
|
| 1010 |
+
CUTLASS_HOST_DEVICE
|
| 1011 |
+
void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
|
| 1012 |
+
|
| 1013 |
+
CUTLASS_HOST_DEVICE
|
| 1014 |
+
void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); }
|
| 1015 |
+
|
| 1016 |
+
/// Clears the predicate set efficiently
|
| 1017 |
+
CUTLASS_HOST_DEVICE
|
| 1018 |
+
void enable_mask() { address_iterator_.enable_mask(); }
|
| 1019 |
+
|
| 1020 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1021 |
+
CUTLASS_HOST_DEVICE
|
| 1022 |
+
void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); }
|
| 1023 |
+
|
| 1024 |
+
/// Gets the mask
|
| 1025 |
+
CUTLASS_HOST_DEVICE
|
| 1026 |
+
void get_mask(Mask& mask) { address_iterator_.get_mask(mask); }
|
| 1027 |
+
|
| 1028 |
+
CUTLASS_DEVICE
|
| 1029 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 1030 |
+
{
|
| 1031 |
+
load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
|
| 1032 |
+
}
|
| 1033 |
+
|
| 1034 |
+
CUTLASS_DEVICE
|
| 1035 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 1036 |
+
{
|
| 1037 |
+
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 1038 |
+
|
| 1039 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1040 |
+
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
|
| 1041 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1042 |
+
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
|
| 1043 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1044 |
+
for (int v = 0; v < kAccessesPerVector; ++v) {
|
| 1045 |
+
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
|
| 1046 |
+
|
| 1047 |
+
address_iterator_.set_iteration_index(idx);
|
| 1048 |
+
char const* byte_ptr =
|
| 1049 |
+
reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset;
|
| 1050 |
+
|
| 1051 |
+
AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr);
|
| 1052 |
+
|
| 1053 |
+
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
|
| 1054 |
+
frag_ptr[idx], access_ptr, address_iterator_.valid());
|
| 1055 |
+
|
| 1056 |
+
++address_iterator_;
|
| 1057 |
+
}
|
| 1058 |
+
}
|
| 1059 |
+
}
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
/// Loads a fragment from memory
|
| 1063 |
+
CUTLASS_DEVICE
|
| 1064 |
+
void load(Fragment& frag) { load_with_byte_offset(frag, 0); }
|
| 1065 |
+
|
| 1066 |
+
/// Store a fragment to memory
|
| 1067 |
+
CUTLASS_DEVICE
|
| 1068 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 1069 |
+
{
|
| 1070 |
+
store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
|
| 1071 |
+
}
|
| 1072 |
+
|
| 1073 |
+
/// Store a fragment to memory
|
| 1074 |
+
CUTLASS_DEVICE
|
| 1075 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 1076 |
+
{
|
| 1077 |
+
address_iterator_.set_iteration_index(0);
|
| 1078 |
+
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
|
| 1079 |
+
|
| 1080 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1081 |
+
for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
|
| 1082 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1083 |
+
for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
|
| 1084 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1085 |
+
for (int v = 0; v < kAccessesPerVector; ++v) {
|
| 1086 |
+
int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
|
| 1087 |
+
|
| 1088 |
+
char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset;
|
| 1089 |
+
AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr);
|
| 1090 |
+
|
| 1091 |
+
if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; }
|
| 1092 |
+
++address_iterator_;
|
| 1093 |
+
}
|
| 1094 |
+
}
|
| 1095 |
+
}
|
| 1096 |
+
}
|
| 1097 |
+
|
| 1098 |
+
/// Store a fragment to memory
|
| 1099 |
+
CUTLASS_DEVICE
|
| 1100 |
+
void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
|
| 1101 |
+
};
|
| 1102 |
+
|
| 1103 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1104 |
+
|
| 1105 |
+
/// Specialization of PredicatedTileIteratorResidualLast for affine rank 2
|
| 1106 |
+
/// column-major data.
|
| 1107 |
+
///
|
| 1108 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1109 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1110 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1111 |
+
/// MaskedTileIteratorConcept
|
| 1112 |
+
///
|
| 1113 |
+
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
|
| 1114 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 1115 |
+
Element_,
|
| 1116 |
+
layout::AffineRank2ColumnMajor,
|
| 1117 |
+
AdvanceRank,
|
| 1118 |
+
ThreadMap_,
|
| 1119 |
+
AccessSize,
|
| 1120 |
+
false> {
|
| 1121 |
+
public:
|
| 1122 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1123 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1124 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1125 |
+
|
| 1126 |
+
using Shape = Shape_;
|
| 1127 |
+
using Element = Element_;
|
| 1128 |
+
using Layout = layout::AffineRank2ColumnMajor;
|
| 1129 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1130 |
+
using ThreadMap = ThreadMap_;
|
| 1131 |
+
|
| 1132 |
+
using Index = typename Layout::Index;
|
| 1133 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1134 |
+
|
| 1135 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1136 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1137 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1138 |
+
|
| 1139 |
+
using Pointer = Element*;
|
| 1140 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1141 |
+
|
| 1142 |
+
// Map to the underlying AffineRankN<2> layout
|
| 1143 |
+
using UnderlyingIterator =
|
| 1144 |
+
PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
|
| 1145 |
+
Element,
|
| 1146 |
+
layout::AffineRankN<2>,
|
| 1147 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 1148 |
+
ThreadMap,
|
| 1149 |
+
AccessSize>;
|
| 1150 |
+
|
| 1151 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 1152 |
+
|
| 1153 |
+
/// Fragment object to be loaded or stored
|
| 1154 |
+
using Fragment =
|
| 1155 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 1156 |
+
|
| 1157 |
+
/// Predicate vector stores mask to guard accesses
|
| 1158 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1159 |
+
|
| 1160 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1161 |
+
class Params {
|
| 1162 |
+
private:
|
| 1163 |
+
friend PredicatedTileIteratorResidualLast;
|
| 1164 |
+
|
| 1165 |
+
/// Parameters object
|
| 1166 |
+
typename UnderlyingIterator::Params params_;
|
| 1167 |
+
|
| 1168 |
+
public:
|
| 1169 |
+
CUTLASS_HOST_DEVICE
|
| 1170 |
+
Params() {}
|
| 1171 |
+
|
| 1172 |
+
/// Construct the Params object given an AffineRankN<2> tensor's layout
|
| 1173 |
+
CUTLASS_HOST_DEVICE
|
| 1174 |
+
Params(Layout const& layout)
|
| 1175 |
+
: params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1)))
|
| 1176 |
+
{
|
| 1177 |
+
}
|
| 1178 |
+
};
|
| 1179 |
+
|
| 1180 |
+
private:
|
| 1181 |
+
//
|
| 1182 |
+
// Data members
|
| 1183 |
+
//
|
| 1184 |
+
|
| 1185 |
+
/// Underlying AffineRankN<2> tile iterator
|
| 1186 |
+
UnderlyingIterator iterator_;
|
| 1187 |
+
|
| 1188 |
+
public:
|
| 1189 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1190 |
+
/// and thread ID
|
| 1191 |
+
CUTLASS_HOST_DEVICE
|
| 1192 |
+
PredicatedTileIteratorResidualLast(
|
| 1193 |
+
Params const& params, ///< Precomputed parameters object
|
| 1194 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1195 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1196 |
+
int thread_id, ///< ID of each participating thread
|
| 1197 |
+
TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
|
| 1198 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1199 |
+
///< gather/scatter at this specialization
|
| 1200 |
+
)
|
| 1201 |
+
: iterator_(params.params_,
|
| 1202 |
+
pointer,
|
| 1203 |
+
layout::PitchLinearCoord(extent.row(), extent.column()),
|
| 1204 |
+
thread_id,
|
| 1205 |
+
layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()))
|
| 1206 |
+
{
|
| 1207 |
+
}
|
| 1208 |
+
|
| 1209 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 1210 |
+
/// offset
|
| 1211 |
+
CUTLASS_HOST_DEVICE
|
| 1212 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 1213 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1214 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1215 |
+
int thread_id ///< ID of each participating thread
|
| 1216 |
+
)
|
| 1217 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 1218 |
+
{
|
| 1219 |
+
}
|
| 1220 |
+
|
| 1221 |
+
/// Adds a pointer offset in units of Element
|
| 1222 |
+
CUTLASS_HOST_DEVICE
|
| 1223 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1224 |
+
{
|
| 1225 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1226 |
+
}
|
| 1227 |
+
|
| 1228 |
+
/// Advances to the next tile in memory.
|
| 1229 |
+
///
|
| 1230 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1231 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1232 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1233 |
+
/// pointer.
|
| 1234 |
+
CUTLASS_HOST_DEVICE
|
| 1235 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 1236 |
+
{
|
| 1237 |
+
++iterator_;
|
| 1238 |
+
return *this;
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
/// Advances to the next tile in memory.
|
| 1242 |
+
///
|
| 1243 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1244 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1245 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1246 |
+
/// pointer.
|
| 1247 |
+
CUTLASS_HOST_DEVICE
|
| 1248 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 1249 |
+
{
|
| 1250 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 1251 |
+
operator++();
|
| 1252 |
+
return self;
|
| 1253 |
+
}
|
| 1254 |
+
|
| 1255 |
+
/// Clears the predicate set efficiently
|
| 1256 |
+
CUTLASS_HOST_DEVICE
|
| 1257 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1258 |
+
|
| 1259 |
+
CUTLASS_HOST_DEVICE
|
| 1260 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1261 |
+
|
| 1262 |
+
/// Clears the predicate set efficiently
|
| 1263 |
+
CUTLASS_HOST_DEVICE
|
| 1264 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1265 |
+
|
| 1266 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1267 |
+
CUTLASS_HOST_DEVICE
|
| 1268 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1269 |
+
|
| 1270 |
+
/// Gets the mask
|
| 1271 |
+
CUTLASS_HOST_DEVICE
|
| 1272 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1273 |
+
|
| 1274 |
+
/// Loads a fragment from memory
|
| 1275 |
+
CUTLASS_DEVICE
|
| 1276 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 1277 |
+
{
|
| 1278 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 1279 |
+
}
|
| 1280 |
+
|
| 1281 |
+
/// Loads a fragment from memory
|
| 1282 |
+
CUTLASS_DEVICE
|
| 1283 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 1284 |
+
{
|
| 1285 |
+
iterator_.load_with_byte_offset(frag, byte_offset);
|
| 1286 |
+
}
|
| 1287 |
+
|
| 1288 |
+
/// Loads a fragment from memory
|
| 1289 |
+
CUTLASS_DEVICE
|
| 1290 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 1291 |
+
|
| 1292 |
+
/// Store a fragment to memory
|
| 1293 |
+
CUTLASS_DEVICE
|
| 1294 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 1295 |
+
{
|
| 1296 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 1297 |
+
}
|
| 1298 |
+
|
| 1299 |
+
/// Store a fragment to memory
|
| 1300 |
+
CUTLASS_DEVICE
|
| 1301 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 1302 |
+
{
|
| 1303 |
+
iterator_.store_with_byte_offset(frag, byte_offset);
|
| 1304 |
+
}
|
| 1305 |
+
|
| 1306 |
+
/// Store a fragment to memory
|
| 1307 |
+
CUTLASS_DEVICE
|
| 1308 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 1309 |
+
};
|
| 1310 |
+
|
| 1311 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1312 |
+
|
| 1313 |
+
/// Specialization of PredicatedTileIteratorResidualLast for affine rank 2
|
| 1314 |
+
/// row-major data.
|
| 1315 |
+
///
|
| 1316 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1317 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1318 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1319 |
+
/// MaskedTileIteratorConcept
|
| 1320 |
+
///
|
| 1321 |
+
template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
|
| 1322 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 1323 |
+
Element_,
|
| 1324 |
+
layout::AffineRank2RowMajor,
|
| 1325 |
+
AdvanceRank,
|
| 1326 |
+
ThreadMap_,
|
| 1327 |
+
AccessSize,
|
| 1328 |
+
false> {
|
| 1329 |
+
public:
|
| 1330 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1331 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1332 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1333 |
+
|
| 1334 |
+
using Shape = Shape_;
|
| 1335 |
+
using Element = Element_;
|
| 1336 |
+
using Layout = layout::AffineRank2RowMajor;
|
| 1337 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1338 |
+
using ThreadMap = ThreadMap_;
|
| 1339 |
+
|
| 1340 |
+
using Index = typename Layout::Index;
|
| 1341 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1342 |
+
|
| 1343 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1344 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1345 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1346 |
+
|
| 1347 |
+
using Pointer = Element*;
|
| 1348 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1349 |
+
|
| 1350 |
+
// Map to the underlying AffineRankN<2> layout
|
| 1351 |
+
using UnderlyingIterator =
|
| 1352 |
+
PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
|
| 1353 |
+
Element,
|
| 1354 |
+
layout::AffineRankN<2>,
|
| 1355 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 1356 |
+
ThreadMap,
|
| 1357 |
+
AccessSize>;
|
| 1358 |
+
|
| 1359 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 1360 |
+
|
| 1361 |
+
/// Fragment object to be loaded or stored
|
| 1362 |
+
using Fragment =
|
| 1363 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 1364 |
+
|
| 1365 |
+
/// Predicate vector stores mask to guard accesses
|
| 1366 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1367 |
+
|
| 1368 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1369 |
+
class Params {
|
| 1370 |
+
private:
|
| 1371 |
+
friend PredicatedTileIteratorResidualLast;
|
| 1372 |
+
|
| 1373 |
+
/// Parameters object
|
| 1374 |
+
typename UnderlyingIterator::Params params_;
|
| 1375 |
+
|
| 1376 |
+
public:
|
| 1377 |
+
CUTLASS_HOST_DEVICE
|
| 1378 |
+
Params() {}
|
| 1379 |
+
|
| 1380 |
+
/// Construct the Params object given an AffineRankN<2> tensor's layout
|
| 1381 |
+
CUTLASS_HOST_DEVICE
|
| 1382 |
+
Params(Layout const& layout)
|
| 1383 |
+
: params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0)))
|
| 1384 |
+
{
|
| 1385 |
+
}
|
| 1386 |
+
};
|
| 1387 |
+
|
| 1388 |
+
private:
|
| 1389 |
+
//
|
| 1390 |
+
// Data members
|
| 1391 |
+
//
|
| 1392 |
+
|
| 1393 |
+
/// Underlying AffineRankN<2> tile iterator
|
| 1394 |
+
UnderlyingIterator iterator_;
|
| 1395 |
+
|
| 1396 |
+
public:
|
| 1397 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1398 |
+
/// and thread ID
|
| 1399 |
+
CUTLASS_HOST_DEVICE
|
| 1400 |
+
PredicatedTileIteratorResidualLast(
|
| 1401 |
+
Params const& params, ///< Precomputed parameters object
|
| 1402 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1403 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1404 |
+
int thread_id, ///< ID of each participating thread
|
| 1405 |
+
TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
|
| 1406 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1407 |
+
///< gather/scatter at this specialization
|
| 1408 |
+
)
|
| 1409 |
+
: iterator_(params.params_,
|
| 1410 |
+
pointer,
|
| 1411 |
+
layout::PitchLinearCoord(extent.column(), extent.row()),
|
| 1412 |
+
thread_id,
|
| 1413 |
+
layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()))
|
| 1414 |
+
{
|
| 1415 |
+
}
|
| 1416 |
+
|
| 1417 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 1418 |
+
/// offset
|
| 1419 |
+
CUTLASS_HOST_DEVICE
|
| 1420 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 1421 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1422 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1423 |
+
int thread_id ///< ID of each participating thread
|
| 1424 |
+
)
|
| 1425 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 1426 |
+
{
|
| 1427 |
+
}
|
| 1428 |
+
|
| 1429 |
+
/// Adds a pointer offset in units of Element
|
| 1430 |
+
CUTLASS_HOST_DEVICE
|
| 1431 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1432 |
+
{
|
| 1433 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1434 |
+
}
|
| 1435 |
+
|
| 1436 |
+
/// Advances to the next tile in memory.
|
| 1437 |
+
///
|
| 1438 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1439 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1440 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1441 |
+
/// pointer.
|
| 1442 |
+
CUTLASS_HOST_DEVICE
|
| 1443 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 1444 |
+
{
|
| 1445 |
+
++iterator_;
|
| 1446 |
+
return *this;
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
/// Advances to the next tile in memory.
|
| 1450 |
+
///
|
| 1451 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1452 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1453 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1454 |
+
/// pointer.
|
| 1455 |
+
CUTLASS_HOST_DEVICE
|
| 1456 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 1457 |
+
{
|
| 1458 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 1459 |
+
operator++();
|
| 1460 |
+
return self;
|
| 1461 |
+
}
|
| 1462 |
+
|
| 1463 |
+
/// Clears the predicate set efficiently
|
| 1464 |
+
CUTLASS_HOST_DEVICE
|
| 1465 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1466 |
+
|
| 1467 |
+
CUTLASS_HOST_DEVICE
|
| 1468 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1469 |
+
|
| 1470 |
+
/// Clears the predicate set efficiently
|
| 1471 |
+
CUTLASS_HOST_DEVICE
|
| 1472 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1473 |
+
|
| 1474 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1475 |
+
CUTLASS_HOST_DEVICE
|
| 1476 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1477 |
+
|
| 1478 |
+
/// Gets the mask
|
| 1479 |
+
CUTLASS_HOST_DEVICE
|
| 1480 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1481 |
+
|
| 1482 |
+
/// Loads a fragment from memory
|
| 1483 |
+
CUTLASS_DEVICE
|
| 1484 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 1485 |
+
{
|
| 1486 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 1487 |
+
}
|
| 1488 |
+
|
| 1489 |
+
/// Loads a fragment from memory
|
| 1490 |
+
CUTLASS_DEVICE
|
| 1491 |
+
void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
|
| 1492 |
+
{
|
| 1493 |
+
iterator_.load_with_byte_offset(frag, byte_offset);
|
| 1494 |
+
}
|
| 1495 |
+
|
| 1496 |
+
/// Loads a fragment from memory
|
| 1497 |
+
CUTLASS_DEVICE
|
| 1498 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 1499 |
+
|
| 1500 |
+
/// Store a fragment to memory
|
| 1501 |
+
CUTLASS_DEVICE
|
| 1502 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 1503 |
+
{
|
| 1504 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 1505 |
+
}
|
| 1506 |
+
|
| 1507 |
+
/// Store a fragment to memory
|
| 1508 |
+
CUTLASS_DEVICE
|
| 1509 |
+
void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
|
| 1510 |
+
{
|
| 1511 |
+
iterator_.store_with_byte_offset(frag, byte_offset);
|
| 1512 |
+
}
|
| 1513 |
+
|
| 1514 |
+
/// Store a fragment to memory
|
| 1515 |
+
CUTLASS_DEVICE
|
| 1516 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 1517 |
+
};
|
| 1518 |
+
|
| 1519 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1520 |
+
|
| 1521 |
+
/// Specialization of PredicatedTileIteratorResidualLast for interleaved data.
|
| 1522 |
+
/// It is mapped to the congruous layout.
|
| 1523 |
+
///
|
| 1524 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1525 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1526 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1527 |
+
/// MaskedTileIteratorConcept
|
| 1528 |
+
///
|
| 1529 |
+
|
| 1530 |
+
template <typename Shape_,
|
| 1531 |
+
typename Element_,
|
| 1532 |
+
int AdvanceRank,
|
| 1533 |
+
typename ThreadMap_,
|
| 1534 |
+
int AccessSize,
|
| 1535 |
+
int InterleavedK>
|
| 1536 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 1537 |
+
Element_,
|
| 1538 |
+
layout::ColumnMajorInterleaved<InterleavedK>,
|
| 1539 |
+
AdvanceRank,
|
| 1540 |
+
ThreadMap_,
|
| 1541 |
+
AccessSize,
|
| 1542 |
+
false> {
|
| 1543 |
+
public:
|
| 1544 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1545 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1546 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1547 |
+
|
| 1548 |
+
using Shape = Shape_;
|
| 1549 |
+
using Element = Element_;
|
| 1550 |
+
static int const kInterleavedK = InterleavedK;
|
| 1551 |
+
using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
|
| 1552 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1553 |
+
using ThreadMap = ThreadMap_;
|
| 1554 |
+
|
| 1555 |
+
using Index = typename Layout::Index;
|
| 1556 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1557 |
+
|
| 1558 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1559 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1560 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1561 |
+
|
| 1562 |
+
using Pointer = Element*;
|
| 1563 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1564 |
+
|
| 1565 |
+
using UnderlyingIterator = PredicatedTileIteratorResidualLast<
|
| 1566 |
+
layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>,
|
| 1567 |
+
Element,
|
| 1568 |
+
layout::PitchLinear,
|
| 1569 |
+
(kAdvanceRank == 0 ? 0 : 1),
|
| 1570 |
+
ThreadMap,
|
| 1571 |
+
AccessSize>;
|
| 1572 |
+
|
| 1573 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 1574 |
+
|
| 1575 |
+
/// Fragment object to be loaded or stored
|
| 1576 |
+
using Fragment =
|
| 1577 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 1578 |
+
|
| 1579 |
+
/// Predicate vector stores mask to guard accesses
|
| 1580 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1581 |
+
|
| 1582 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1583 |
+
class Params {
|
| 1584 |
+
private:
|
| 1585 |
+
friend PredicatedTileIteratorResidualLast;
|
| 1586 |
+
|
| 1587 |
+
/// Parameters object
|
| 1588 |
+
typename UnderlyingIterator::Params params_;
|
| 1589 |
+
|
| 1590 |
+
public:
|
| 1591 |
+
CUTLASS_HOST_DEVICE
|
| 1592 |
+
Params() {}
|
| 1593 |
+
|
| 1594 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 1595 |
+
CUTLASS_HOST_DEVICE
|
| 1596 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 1597 |
+
|
| 1598 |
+
CUTLASS_HOST_DEVICE
|
| 1599 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 1600 |
+
};
|
| 1601 |
+
|
| 1602 |
+
private:
|
| 1603 |
+
//
|
| 1604 |
+
// Data members
|
| 1605 |
+
//
|
| 1606 |
+
|
| 1607 |
+
/// Underlying pitch-linear tile iterator
|
| 1608 |
+
UnderlyingIterator iterator_;
|
| 1609 |
+
|
| 1610 |
+
public:
|
| 1611 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1612 |
+
/// and thread ID
|
| 1613 |
+
CUTLASS_HOST_DEVICE
|
| 1614 |
+
PredicatedTileIteratorResidualLast(
|
| 1615 |
+
/// Precomputed parameters object
|
| 1616 |
+
Params const& params,
|
| 1617 |
+
/// Pointer to start of tensor
|
| 1618 |
+
Pointer pointer,
|
| 1619 |
+
/// Extent of tensor
|
| 1620 |
+
TensorCoord extent,
|
| 1621 |
+
/// ID of each participating thread
|
| 1622 |
+
int thread_id,
|
| 1623 |
+
/// Initial offset of threadblock
|
| 1624 |
+
TensorCoord const& threadblock_offset,
|
| 1625 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1626 |
+
///< gather/scatter at this specialization
|
| 1627 |
+
)
|
| 1628 |
+
: iterator_(params.params_,
|
| 1629 |
+
pointer,
|
| 1630 |
+
layout::PitchLinearCoord(extent.row() * kInterleavedK,
|
| 1631 |
+
extent.column() / kInterleavedK),
|
| 1632 |
+
thread_id,
|
| 1633 |
+
layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK,
|
| 1634 |
+
threadblock_offset.column() / kInterleavedK))
|
| 1635 |
+
{
|
| 1636 |
+
}
|
| 1637 |
+
|
| 1638 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 1639 |
+
/// offset
|
| 1640 |
+
CUTLASS_HOST_DEVICE
|
| 1641 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 1642 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1643 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1644 |
+
int thread_id ///< ID of each participating thread
|
| 1645 |
+
)
|
| 1646 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 1647 |
+
{
|
| 1648 |
+
}
|
| 1649 |
+
|
| 1650 |
+
/// Adds a pointer offset in units of Element
|
| 1651 |
+
CUTLASS_HOST_DEVICE
|
| 1652 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1653 |
+
{
|
| 1654 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1655 |
+
}
|
| 1656 |
+
|
| 1657 |
+
/// Advances to the next tile in memory.
|
| 1658 |
+
///
|
| 1659 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1660 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1661 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1662 |
+
/// pointer.
|
| 1663 |
+
CUTLASS_HOST_DEVICE
|
| 1664 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 1665 |
+
{
|
| 1666 |
+
++iterator_;
|
| 1667 |
+
return *this;
|
| 1668 |
+
}
|
| 1669 |
+
|
| 1670 |
+
/// Advances to the next tile in memory.
|
| 1671 |
+
///
|
| 1672 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1673 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1674 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1675 |
+
/// pointer.
|
| 1676 |
+
CUTLASS_HOST_DEVICE
|
| 1677 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 1678 |
+
{
|
| 1679 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 1680 |
+
operator++();
|
| 1681 |
+
return self;
|
| 1682 |
+
}
|
| 1683 |
+
|
| 1684 |
+
/// Clears the predicate set efficiently
|
| 1685 |
+
CUTLASS_HOST_DEVICE
|
| 1686 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1687 |
+
|
| 1688 |
+
CUTLASS_HOST_DEVICE
|
| 1689 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1690 |
+
|
| 1691 |
+
/// Clears the predicate set efficiently
|
| 1692 |
+
CUTLASS_HOST_DEVICE
|
| 1693 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1694 |
+
|
| 1695 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1696 |
+
CUTLASS_HOST_DEVICE
|
| 1697 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1698 |
+
|
| 1699 |
+
/// Gets the mask
|
| 1700 |
+
CUTLASS_HOST_DEVICE
|
| 1701 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1702 |
+
|
| 1703 |
+
/// Loads a fragment from memory
|
| 1704 |
+
CUTLASS_DEVICE
|
| 1705 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 1706 |
+
{
|
| 1707 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 1708 |
+
}
|
| 1709 |
+
|
| 1710 |
+
/// Loads a fragment from memory
|
| 1711 |
+
CUTLASS_DEVICE
|
| 1712 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 1713 |
+
|
| 1714 |
+
/// Store a fragment to memory
|
| 1715 |
+
CUTLASS_DEVICE
|
| 1716 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 1717 |
+
{
|
| 1718 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 1719 |
+
}
|
| 1720 |
+
|
| 1721 |
+
/// Store a fragment to memory
|
| 1722 |
+
CUTLASS_DEVICE
|
| 1723 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 1724 |
+
};
|
| 1725 |
+
|
| 1726 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1727 |
+
|
| 1728 |
+
/// Specialization of PredicatedTileIteratorResidualLast for interleaved-32
|
| 1729 |
+
/// data. It is mapped to the congruous layout.
|
| 1730 |
+
///
|
| 1731 |
+
/// Satisfies: ForwardTileIteratorConcept |
|
| 1732 |
+
/// ReadableContiguousTileIteratorConcept |
|
| 1733 |
+
/// WriteableContiguousTileIteratorConcept |
|
| 1734 |
+
/// MaskedTileIteratorConcept
|
| 1735 |
+
///
|
| 1736 |
+
template <typename Shape_,
|
| 1737 |
+
typename Element_,
|
| 1738 |
+
int AdvanceRank,
|
| 1739 |
+
typename ThreadMap_,
|
| 1740 |
+
int AccessSize,
|
| 1741 |
+
int InterleavedK>
|
| 1742 |
+
class PredicatedTileIteratorResidualLast<Shape_,
|
| 1743 |
+
Element_,
|
| 1744 |
+
layout::RowMajorInterleaved<InterleavedK>,
|
| 1745 |
+
AdvanceRank,
|
| 1746 |
+
ThreadMap_,
|
| 1747 |
+
AccessSize,
|
| 1748 |
+
false> {
|
| 1749 |
+
public:
|
| 1750 |
+
static_assert(AdvanceRank == 0 || AdvanceRank == 1,
|
| 1751 |
+
"Specialization for pitch-linear iterator may along advance along the "
|
| 1752 |
+
"contiguous(rank=0) or strided(rank=1) dimension.");
|
| 1753 |
+
|
| 1754 |
+
using Shape = Shape_;
|
| 1755 |
+
using Element = Element_;
|
| 1756 |
+
static int const kInterleavedK = InterleavedK;
|
| 1757 |
+
using Layout = layout::RowMajorInterleaved<kInterleavedK>;
|
| 1758 |
+
static int const kAdvanceRank = AdvanceRank;
|
| 1759 |
+
using ThreadMap = ThreadMap_;
|
| 1760 |
+
|
| 1761 |
+
using Index = typename Layout::Index;
|
| 1762 |
+
using LongIndex = typename Layout::LongIndex;
|
| 1763 |
+
|
| 1764 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 1765 |
+
using TensorView = TensorView<Element, Layout>;
|
| 1766 |
+
using TensorCoord = typename Layout::TensorCoord;
|
| 1767 |
+
|
| 1768 |
+
using Pointer = Element*;
|
| 1769 |
+
using NonConstPointer = typename platform::remove_const<Element>::type*;
|
| 1770 |
+
|
| 1771 |
+
using UnderlyingIterator = PredicatedTileIteratorResidualLast<
|
| 1772 |
+
layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>,
|
| 1773 |
+
Element,
|
| 1774 |
+
layout::PitchLinear,
|
| 1775 |
+
(kAdvanceRank == 0 ? 1 : 0),
|
| 1776 |
+
ThreadMap,
|
| 1777 |
+
AccessSize>;
|
| 1778 |
+
|
| 1779 |
+
using AccessType = typename UnderlyingIterator::AccessType;
|
| 1780 |
+
|
| 1781 |
+
/// Fragment object to be loaded or stored
|
| 1782 |
+
using Fragment =
|
| 1783 |
+
cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 1784 |
+
|
| 1785 |
+
/// Predicate vector stores mask to guard accesses
|
| 1786 |
+
using Mask = typename UnderlyingIterator::Mask;
|
| 1787 |
+
|
| 1788 |
+
/// Parameters object is precomputed state and is host-constructible
|
| 1789 |
+
class Params {
|
| 1790 |
+
private:
|
| 1791 |
+
friend PredicatedTileIteratorResidualLast;
|
| 1792 |
+
|
| 1793 |
+
/// Parameters object
|
| 1794 |
+
typename UnderlyingIterator::Params params_;
|
| 1795 |
+
|
| 1796 |
+
public:
|
| 1797 |
+
CUTLASS_HOST_DEVICE
|
| 1798 |
+
Params() {}
|
| 1799 |
+
|
| 1800 |
+
/// Construct the Params object given a pitch-linear tensor's layout
|
| 1801 |
+
CUTLASS_HOST_DEVICE
|
| 1802 |
+
Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
|
| 1803 |
+
|
| 1804 |
+
CUTLASS_HOST_DEVICE
|
| 1805 |
+
Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
|
| 1806 |
+
};
|
| 1807 |
+
|
| 1808 |
+
private:
|
| 1809 |
+
//
|
| 1810 |
+
// Data members
|
| 1811 |
+
//
|
| 1812 |
+
|
| 1813 |
+
/// Underlying pitch-linear tile iterator
|
| 1814 |
+
UnderlyingIterator iterator_;
|
| 1815 |
+
|
| 1816 |
+
public:
|
| 1817 |
+
/// Constructs a TileIterator from its precomputed state, threadblock offset,
|
| 1818 |
+
/// and thread ID
|
| 1819 |
+
CUTLASS_HOST_DEVICE
|
| 1820 |
+
PredicatedTileIteratorResidualLast(
|
| 1821 |
+
/// Precomputed parameters object
|
| 1822 |
+
Params const& params,
|
| 1823 |
+
/// Pointer to start of tensor
|
| 1824 |
+
Pointer pointer,
|
| 1825 |
+
/// Extent of tensor
|
| 1826 |
+
TensorCoord extent,
|
| 1827 |
+
/// ID of each participating thread
|
| 1828 |
+
int thread_id,
|
| 1829 |
+
/// Initial offset of threadblock
|
| 1830 |
+
TensorCoord const& threadblock_offset,
|
| 1831 |
+
int const* indices = nullptr ///< gather/scatter indices, note no support for
|
| 1832 |
+
///< gather/scatter at this specialization
|
| 1833 |
+
)
|
| 1834 |
+
: iterator_(params.params_,
|
| 1835 |
+
pointer,
|
| 1836 |
+
layout::PitchLinearCoord(extent.column() * kInterleavedK,
|
| 1837 |
+
extent.row() / kInterleavedK),
|
| 1838 |
+
thread_id,
|
| 1839 |
+
layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK,
|
| 1840 |
+
threadblock_offset.row() / kInterleavedK))
|
| 1841 |
+
{
|
| 1842 |
+
}
|
| 1843 |
+
|
| 1844 |
+
/// Construct a PredicatedTileIteratorResidualLast with zero threadblock
|
| 1845 |
+
/// offset
|
| 1846 |
+
CUTLASS_HOST_DEVICE
|
| 1847 |
+
PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
|
| 1848 |
+
Pointer pointer, ///< Pointer to start of tensor
|
| 1849 |
+
TensorCoord extent, ///< Extent of tensor
|
| 1850 |
+
int thread_id ///< ID of each participating thread
|
| 1851 |
+
)
|
| 1852 |
+
: PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
|
| 1853 |
+
{
|
| 1854 |
+
}
|
| 1855 |
+
|
| 1856 |
+
/// Adds a pointer offset in units of Element
|
| 1857 |
+
CUTLASS_HOST_DEVICE
|
| 1858 |
+
void add_pointer_offset(LongIndex pointer_offset)
|
| 1859 |
+
{
|
| 1860 |
+
iterator_.add_pointer_offset(pointer_offset);
|
| 1861 |
+
}
|
| 1862 |
+
|
| 1863 |
+
/// Advances to the next tile in memory.
|
| 1864 |
+
///
|
| 1865 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1866 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1867 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1868 |
+
/// pointer.
|
| 1869 |
+
CUTLASS_HOST_DEVICE
|
| 1870 |
+
PredicatedTileIteratorResidualLast& operator++()
|
| 1871 |
+
{
|
| 1872 |
+
++iterator_;
|
| 1873 |
+
return *this;
|
| 1874 |
+
}
|
| 1875 |
+
|
| 1876 |
+
/// Advances to the next tile in memory.
|
| 1877 |
+
///
|
| 1878 |
+
/// The first time this method is called, predicates are updated, and the
|
| 1879 |
+
/// iterator's internal pointer is reverted to the first "steady state" tile.
|
| 1880 |
+
/// Subsequent calls are lightweight and must only update the internal
|
| 1881 |
+
/// pointer.
|
| 1882 |
+
CUTLASS_HOST_DEVICE
|
| 1883 |
+
PredicatedTileIteratorResidualLast operator++(int)
|
| 1884 |
+
{
|
| 1885 |
+
PredicatedTileIteratorResidualLast self(*this);
|
| 1886 |
+
operator++();
|
| 1887 |
+
return self;
|
| 1888 |
+
}
|
| 1889 |
+
|
| 1890 |
+
/// Clears the predicate set efficiently
|
| 1891 |
+
CUTLASS_HOST_DEVICE
|
| 1892 |
+
void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
|
| 1893 |
+
|
| 1894 |
+
CUTLASS_HOST_DEVICE
|
| 1895 |
+
void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
|
| 1896 |
+
|
| 1897 |
+
/// Clears the predicate set efficiently
|
| 1898 |
+
CUTLASS_HOST_DEVICE
|
| 1899 |
+
void enable_mask() { iterator_.enable_mask(); }
|
| 1900 |
+
|
| 1901 |
+
/// Sets the predicate mask, overriding value stored in predicate iterator
|
| 1902 |
+
CUTLASS_HOST_DEVICE
|
| 1903 |
+
void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
|
| 1904 |
+
|
| 1905 |
+
/// Gets the mask
|
| 1906 |
+
CUTLASS_HOST_DEVICE
|
| 1907 |
+
void get_mask(Mask& mask) { iterator_.get_mask(mask); }
|
| 1908 |
+
|
| 1909 |
+
/// Loads a fragment from memory
|
| 1910 |
+
CUTLASS_DEVICE
|
| 1911 |
+
void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
|
| 1912 |
+
{
|
| 1913 |
+
iterator_.load_with_pointer_offset(frag, pointer_offset);
|
| 1914 |
+
}
|
| 1915 |
+
|
| 1916 |
+
/// Loads a fragment from memory
|
| 1917 |
+
CUTLASS_DEVICE
|
| 1918 |
+
void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
|
| 1919 |
+
|
| 1920 |
+
/// Store a fragment to memory
|
| 1921 |
+
CUTLASS_DEVICE
|
| 1922 |
+
void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
|
| 1923 |
+
{
|
| 1924 |
+
iterator_.store_with_pointer_offset(frag, pointer_offset);
|
| 1925 |
+
}
|
| 1926 |
+
|
| 1927 |
+
/// Store a fragment to memory
|
| 1928 |
+
CUTLASS_DEVICE
|
| 1929 |
+
void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
|
| 1930 |
+
};
|
| 1931 |
+
|
| 1932 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 1933 |
+
|
| 1934 |
+
} // namespace threadblock
|
| 1935 |
+
} // namespace transform
|
| 1936 |
+
} // namespace cutlass
|
| 1937 |
+
|
| 1938 |
+
////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include "warp_iterator_from_smem.h"
|
| 40 |
+
|
| 41 |
+
template <typename WarpIterator>
|
| 42 |
+
struct TransposeWarpIterator {
|
| 43 |
+
using Iterator = char;
|
| 44 |
+
static bool constexpr kSupportsTranspose = false;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
template <
|
| 48 |
+
/// Operand identity
|
| 49 |
+
cutlass::gemm::Operand Operand,
|
| 50 |
+
/// Data type of A elements
|
| 51 |
+
typename Element,
|
| 52 |
+
bool kTranspose>
|
| 53 |
+
struct TransposeWarpIterator<
|
| 54 |
+
cutlass::gemm::warp::WarpIteratorFromSmem<Operand, Element, kTranspose>> {
|
| 55 |
+
using Iterator = cutlass::gemm::warp::WarpIteratorFromSmem<Operand, Element, !kTranspose>;
|
| 56 |
+
static bool constexpr kSupportsTranspose = true;
|
| 57 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holder nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
/*! \file
|
| 39 |
+
\brief Inspired from
|
| 40 |
+
"cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM
|
| 41 |
+
operands from a RowMajor shared-memory layout into registers to use by A100
|
| 42 |
+
TensorCores.
|
| 43 |
+
|
| 44 |
+
The difference with "mma_tensor_op_tile_access_iterator.h" is that:
|
| 45 |
+
(1) We use "ldmatrix" to load tiles, rather than manual loads (slightly
|
| 46 |
+
faster) (2) We support to transpose the operand (eg read `A.transpose()` when
|
| 47 |
+
the shared memory holds `A`)
|
| 48 |
+
|
| 49 |
+
This is only implemented for the specific shapes.
|
| 50 |
+
*/
|
| 51 |
+
#pragma once
|
| 52 |
+
|
| 53 |
+
#include <cutlass/gemm/gemm.h>
|
| 54 |
+
|
| 55 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 56 |
+
namespace cutlass {
|
| 57 |
+
namespace gemm {
|
| 58 |
+
namespace warp {
|
| 59 |
+
|
| 60 |
+
template <
|
| 61 |
+
/// Operand identity
|
| 62 |
+
Operand Operand_,
|
| 63 |
+
/// Data type of A elements
|
| 64 |
+
typename Element_,
|
| 65 |
+
bool kTranspose = false>
|
| 66 |
+
class WarpIteratorFromSmem {
|
| 67 |
+
public:
|
| 68 |
+
/// Shape of tile to load (concept: MatrixShape)
|
| 69 |
+
using Shape = cutlass::MatrixShape<32, 32>;
|
| 70 |
+
|
| 71 |
+
/// Operand tag
|
| 72 |
+
static Operand const kOperand = Operand_;
|
| 73 |
+
|
| 74 |
+
/// Basic check
|
| 75 |
+
static_assert(
|
| 76 |
+
kOperand == Operand::kA || kOperand == Operand::kB,
|
| 77 |
+
"WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma.");
|
| 78 |
+
|
| 79 |
+
/// Element type
|
| 80 |
+
using Element = Element_;
|
| 81 |
+
static_assert(sizeof_bits<Element>::value == 16, "Only supported for half");
|
| 82 |
+
|
| 83 |
+
/// Layout of source tile
|
| 84 |
+
using Layout = cutlass::layout::RowMajor;
|
| 85 |
+
|
| 86 |
+
/// Shape of one matrix product operation (concept: MatrixShape)
|
| 87 |
+
using InstructionShape = cutlass::MatrixShape<16, 8>;
|
| 88 |
+
|
| 89 |
+
/// Delta between *MMA operations (in units of *MMA operations, concept:
|
| 90 |
+
/// MatrixShape)
|
| 91 |
+
static int const kOpDelta = 1;
|
| 92 |
+
|
| 93 |
+
/// Number of participating threads
|
| 94 |
+
static int const kThreads = 32;
|
| 95 |
+
|
| 96 |
+
/// TensorRef type for loading element from a tensor
|
| 97 |
+
using TensorRef = TensorRef<Element, Layout>;
|
| 98 |
+
|
| 99 |
+
/// Index type
|
| 100 |
+
using Index = typename TensorRef::Index;
|
| 101 |
+
|
| 102 |
+
/// Long Index type
|
| 103 |
+
using LongIndex = typename TensorRef::LongIndex;
|
| 104 |
+
|
| 105 |
+
/// Coordinate for an element in the tensor
|
| 106 |
+
using TensorCoord = typename TensorRef::TensorCoord;
|
| 107 |
+
|
| 108 |
+
/// Number of elements accessed per Shared Memory load
|
| 109 |
+
static int const kElementsPerAccess =
|
| 110 |
+
(sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
|
| 111 |
+
|
| 112 |
+
using InstructionCount = MatrixShape<Shape::kRow / InstructionShape::kRow,
|
| 113 |
+
Shape::kColumn / InstructionShape::kColumn>;
|
| 114 |
+
|
| 115 |
+
static int const kIterations = (kOperand == Operand::kA) ? InstructionCount::kColumn
|
| 116 |
+
: InstructionCount::kRow;
|
| 117 |
+
|
| 118 |
+
public:
|
| 119 |
+
//
|
| 120 |
+
// Derived quantities
|
| 121 |
+
//
|
| 122 |
+
|
| 123 |
+
/// Fragment object holding a thread's part of a tile
|
| 124 |
+
using Fragment =
|
| 125 |
+
Array<Element,
|
| 126 |
+
(kOperand == Operand::kA) ? (Shape::kRow* InstructionShape::kColumn / kThreads)
|
| 127 |
+
: (Shape::kColumn* InstructionShape::kRow / kThreads)>;
|
| 128 |
+
|
| 129 |
+
/// Memory access type
|
| 130 |
+
// using AccessType = AlignedArray<Element, kElementsPerAccess>;
|
| 131 |
+
using AccessType = Array<unsigned, 4>;
|
| 132 |
+
|
| 133 |
+
static int constexpr kWarpShapeDivisibleInner =
|
| 134 |
+
(kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
|
| 135 |
+
static int constexpr kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
|
| 136 |
+
static int const kTilesPerInstruction = InstructionShape::kRow / 8;
|
| 137 |
+
|
| 138 |
+
private:
|
| 139 |
+
/// Underlying tensor reference
|
| 140 |
+
TensorRef ref_;
|
| 141 |
+
|
| 142 |
+
/// Origin
|
| 143 |
+
MatrixCoord origin_;
|
| 144 |
+
|
| 145 |
+
/// Iterations in a tile
|
| 146 |
+
int iterations_;
|
| 147 |
+
|
| 148 |
+
public:
|
| 149 |
+
/// Constructor from TensorRef
|
| 150 |
+
CUTLASS_HOST_DEVICE
|
| 151 |
+
WarpIteratorFromSmem(TensorRef const& ref, int lane_id)
|
| 152 |
+
: WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id)
|
| 153 |
+
{
|
| 154 |
+
}
|
| 155 |
+
CUTLASS_HOST_DEVICE
|
| 156 |
+
WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id)
|
| 157 |
+
: ref_(ref), iterations_(0)
|
| 158 |
+
{
|
| 159 |
+
int ldsm_vec_num = (lane_id >> 3);
|
| 160 |
+
if (kOperand == Operand::kA) {
|
| 161 |
+
origin_ = MatrixCoord(lane_id % 8, 0);
|
| 162 |
+
static_assert(InstructionCount::kRow * kAccessesInner * kTilesPerInstruction == 4, "");
|
| 163 |
+
CUTLASS_PRAGMA_UNROLL
|
| 164 |
+
for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
|
| 165 |
+
CUTLASS_PRAGMA_UNROLL
|
| 166 |
+
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
|
| 167 |
+
CUTLASS_PRAGMA_UNROLL
|
| 168 |
+
for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction;
|
| 169 |
+
++access_m_idx) {
|
| 170 |
+
int access_idx =
|
| 171 |
+
access_m_idx +
|
| 172 |
+
kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
|
| 173 |
+
|
| 174 |
+
MatrixCoord offset(access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
|
| 175 |
+
inner_idx * 4 * kElementsPerAccess);
|
| 176 |
+
|
| 177 |
+
if (access_idx == ldsm_vec_num) {
|
| 178 |
+
if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
|
| 179 |
+
origin_ += offset;
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
} else {
|
| 185 |
+
origin_ = MatrixCoord(0, lane_id % 8);
|
| 186 |
+
static_assert(InstructionCount::kColumn * kAccessesInner == 4, "");
|
| 187 |
+
CUTLASS_PRAGMA_UNROLL
|
| 188 |
+
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
|
| 189 |
+
CUTLASS_PRAGMA_UNROLL
|
| 190 |
+
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
|
| 191 |
+
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
|
| 192 |
+
|
| 193 |
+
MatrixCoord offset(inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8);
|
| 194 |
+
|
| 195 |
+
if (access_idx == ldsm_vec_num) {
|
| 196 |
+
if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
|
| 197 |
+
origin_ += offset;
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
ref_.add_coord_offset(origin_);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
/// Advances an iterator along logical dimensions of matrix in units of whole
|
| 207 |
+
/// tiles
|
| 208 |
+
CUTLASS_HOST_DEVICE
|
| 209 |
+
WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset)
|
| 210 |
+
{
|
| 211 |
+
TensorCoord coord_offset(tile_offset.row() * Shape::kRow,
|
| 212 |
+
tile_offset.column() * Shape::kColumn);
|
| 213 |
+
if (kTranspose) { coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()}; }
|
| 214 |
+
origin_ += coord_offset;
|
| 215 |
+
|
| 216 |
+
ref_.add_coord_offset(coord_offset);
|
| 217 |
+
|
| 218 |
+
return *this;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
/// Advances the iterator along the advance dimension
|
| 222 |
+
CUTLASS_DEVICE
|
| 223 |
+
void advance()
|
| 224 |
+
{
|
| 225 |
+
if (kOperand == Operand::kA) {
|
| 226 |
+
add_tile_offset({0, 1});
|
| 227 |
+
} else {
|
| 228 |
+
add_tile_offset({1, 0});
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
iterations_ = 0;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
/// increase iterations in a tile
|
| 235 |
+
CUTLASS_HOST_DEVICE
|
| 236 |
+
WarpIteratorFromSmem& operator++()
|
| 237 |
+
{
|
| 238 |
+
iterations_++;
|
| 239 |
+
|
| 240 |
+
if (iterations_ >= kIterations) advance();
|
| 241 |
+
|
| 242 |
+
return *this;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
/// Loads a fragment from memory at the location pointed to by the iterator.
|
| 246 |
+
CUTLASS_DEVICE
|
| 247 |
+
void load(Fragment& frag) const
|
| 248 |
+
{
|
| 249 |
+
AccessType* access_ptr = reinterpret_cast<AccessType*>(&frag);
|
| 250 |
+
using LoadLayout =
|
| 251 |
+
typename platform::conditional<kTranspose, layout::ColumnMajor, layout::RowMajor>::type;
|
| 252 |
+
|
| 253 |
+
MatrixCoord offset;
|
| 254 |
+
if (kOperand == Operand::kA) {
|
| 255 |
+
offset = MatrixCoord(0, iterations_ * InstructionShape::kColumn);
|
| 256 |
+
} else {
|
| 257 |
+
offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0);
|
| 258 |
+
}
|
| 259 |
+
if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
|
| 260 |
+
cutlass::arch::ldsm<LoadLayout, 4>(access_ptr[0], ref_.data() + ref_.offset(offset));
|
| 261 |
+
}
|
| 262 |
+
};
|
| 263 |
+
|
| 264 |
+
////////////////////////////////////////////////////////////////////////////////
|
| 265 |
+
|
| 266 |
+
} // namespace warp
|
| 267 |
+
} // namespace gemm
|
| 268 |
+
} // namespace cutlass
|
| 269 |
+
////////////////////////////////////////////////////////////////////////////////
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_backward.h
ADDED
|
@@ -0,0 +1,1965 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include <cmath>
|
| 40 |
+
#include <type_traits>
|
| 41 |
+
#include <vector>
|
| 42 |
+
|
| 43 |
+
#include <cuda_fp16.h>
|
| 44 |
+
|
| 45 |
+
#include "cutlass/cutlass.h"
|
| 46 |
+
#include "cutlass/epilogue/thread/linear_combination.h"
|
| 47 |
+
#include "cutlass/epilogue/thread/scale_type.h"
|
| 48 |
+
#include "cutlass/fast_math.h"
|
| 49 |
+
#include "cutlass/functional.h"
|
| 50 |
+
#include "cutlass/gemm/gemm.h"
|
| 51 |
+
#include "cutlass/layout/matrix.h"
|
| 52 |
+
#include "cutlass/layout/vector.h"
|
| 53 |
+
#include "cutlass/numeric_conversion.h"
|
| 54 |
+
#include "cutlass/numeric_types.h"
|
| 55 |
+
#include "cutlass/tensor_ref.h"
|
| 56 |
+
|
| 57 |
+
#include "gemm_kernel_utils.h"
|
| 58 |
+
|
| 59 |
+
#include "cutlass/epilogue/thread/linear_combination_relu.h"
|
| 60 |
+
#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
|
| 61 |
+
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
|
| 62 |
+
#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h"
|
| 63 |
+
#include "cutlass/gemm/device/default_gemm_configuration.h"
|
| 64 |
+
#include "cutlass/gemm/kernel/default_gemm.h"
|
| 65 |
+
#include "cutlass/gemm/threadblock/default_mma.h"
|
| 66 |
+
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
|
| 67 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
|
| 68 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
|
| 69 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
|
| 70 |
+
#include "cutlass/matrix_shape.h"
|
| 71 |
+
#include "cutlass/platform/platform.h"
|
| 72 |
+
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
|
| 73 |
+
#include "cutlass/transform/threadblock/vector_iterator.h"
|
| 74 |
+
#include "epilogue/epilogue_pipelined.h"
|
| 75 |
+
#include "iterators/epilogue_predicated_tile_iterator.h"
|
| 76 |
+
|
| 77 |
+
#include "epilogue/epilogue_grad_bias.h"
|
| 78 |
+
#include "gemm/custom_mma.h"
|
| 79 |
+
#include "gemm/find_default_mma.h"
|
| 80 |
+
#include "gemm/mma_accum_lambda_iterator.h"
|
| 81 |
+
#include "gemm/mma_from_smem.h"
|
| 82 |
+
#include "transform/bias_broadcast.h"
|
| 83 |
+
#include "transform/tile_smem_loader.h"
|
| 84 |
+
|
| 85 |
+
#include <inttypes.h>
|
| 86 |
+
|
| 87 |
+
using namespace gemm_kernel_utils;
|
| 88 |
+
|
| 89 |
+
namespace {
|
| 90 |
+
|
| 91 |
+
template <typename FragmentType, int32_t kNumThreads>
|
| 92 |
+
struct GmemTile {
|
| 93 |
+
/*
|
| 94 |
+
Helper functions to efficient store/load RF to gmem
|
| 95 |
+
|
| 96 |
+
GEMM accumulators have a particular format on A100, and
|
| 97 |
+
it takes some compute/shared-memory to rearrange them to
|
| 98 |
+
a RowMajor or ColumnMajor format in global memory through
|
| 99 |
+
an Epilogue. The same complexity goes for loading into RF.
|
| 100 |
+
|
| 101 |
+
This class loads/stores RF as they are, and can be used for
|
| 102 |
+
efficient accumulation across gemms for instance:
|
| 103 |
+
|
| 104 |
+
```
|
| 105 |
+
GmemTile tile;
|
| 106 |
+
for (int i = 0; i < N; ++i) {
|
| 107 |
+
// ...
|
| 108 |
+
|
| 109 |
+
Fragment accum;
|
| 110 |
+
if (i == 0) {
|
| 111 |
+
accum.clear();
|
| 112 |
+
} else {
|
| 113 |
+
tile.load(accum);
|
| 114 |
+
}
|
| 115 |
+
mma(accum, ...);
|
| 116 |
+
if (i < N-1) {
|
| 117 |
+
// Store for next GEMM
|
| 118 |
+
tile.store(accum);
|
| 119 |
+
} else {
|
| 120 |
+
// Store in tensor (eg RowMajor)
|
| 121 |
+
epilogue(accum);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
// ...
|
| 125 |
+
}
|
| 126 |
+
```
|
| 127 |
+
*/
|
| 128 |
+
|
| 129 |
+
// 128bits per thread
|
| 130 |
+
using AccessType = cutlass::Array<float, 4>;
|
| 131 |
+
static constexpr int32_t kBytes = sizeof(AccessType);
|
| 132 |
+
static constexpr int32_t kStride = kNumThreads * AccessType::kElements;
|
| 133 |
+
static constexpr int32_t kNumIters = FragmentType::kElements / AccessType::kElements;
|
| 134 |
+
static constexpr int32_t kElementsStored = kNumThreads * FragmentType::kElements;
|
| 135 |
+
static_assert(FragmentType::kElements % AccessType::kElements == 0,
|
| 136 |
+
"fragment not aligned on 128 bits");
|
| 137 |
+
|
| 138 |
+
float* ptr;
|
| 139 |
+
|
| 140 |
+
CUTLASS_DEVICE void load(FragmentType& fragment, int thread_id)
|
| 141 |
+
{
|
| 142 |
+
CUTLASS_PRAGMA_UNROLL
|
| 143 |
+
for (int i = 0; i < kNumIters; ++i) {
|
| 144 |
+
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
|
| 145 |
+
ptr + thread_id * AccessType::kElements + i * kStride);
|
| 146 |
+
AccessType sub_fragment;
|
| 147 |
+
cutlass::arch::global_load<AccessType, kBytes>(sub_fragment, gmem_ptr, true);
|
| 148 |
+
CUTLASS_PRAGMA_UNROLL
|
| 149 |
+
for (int j = 0; j < AccessType::kElements; ++j) {
|
| 150 |
+
fragment[i * AccessType::kElements + j] = sub_fragment[j];
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
CUTLASS_DEVICE void store(FragmentType const& fragment, int thread_id)
|
| 156 |
+
{
|
| 157 |
+
CUTLASS_PRAGMA_UNROLL
|
| 158 |
+
for (int i = 0; i < kNumIters; ++i) {
|
| 159 |
+
AccessType* __restrict__ gmem_ptr = reinterpret_cast<AccessType*>(
|
| 160 |
+
ptr + thread_id * AccessType::kElements + i * kStride);
|
| 161 |
+
AccessType sub_fragment;
|
| 162 |
+
CUTLASS_PRAGMA_UNROLL
|
| 163 |
+
for (int j = 0; j < AccessType::kElements; ++j) {
|
| 164 |
+
sub_fragment[j] = fragment[i * AccessType::kElements + j];
|
| 165 |
+
}
|
| 166 |
+
cutlass::arch::global_store<AccessType, kBytes>(sub_fragment, gmem_ptr, true);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
template <typename scalar_t, typename Arch>
|
| 172 |
+
constexpr int getWarpsPerSm()
|
| 173 |
+
{
|
| 174 |
+
constexpr bool is_half = !cutlass::platform::is_same<scalar_t, float>::value;
|
| 175 |
+
if (Arch::kMinComputeCapability >= 80) { return is_half ? 12 : 8; }
|
| 176 |
+
return 8;
|
| 177 |
+
}
|
| 178 |
+
} // namespace
|
| 179 |
+
|
| 180 |
+
template <
|
| 181 |
+
// which arch we target (eg `cutlass::arch::Sm80`)
|
| 182 |
+
typename ArchTag_,
|
| 183 |
+
// input/output type
|
| 184 |
+
typename scalar_t_,
|
| 185 |
+
// run optimized kernel because memory accesses will be aligned
|
| 186 |
+
bool kIsAligned_,
|
| 187 |
+
// use dropout if enabled
|
| 188 |
+
bool kApplyDropout_,
|
| 189 |
+
// when doing a GEMM, preload the next one (uses more shmem)
|
| 190 |
+
bool kPreload_,
|
| 191 |
+
// block dimensions
|
| 192 |
+
int kBlockSizeI_,
|
| 193 |
+
int kBlockSizeJ_,
|
| 194 |
+
// upperbound on `max(value.shape[-1], query.shape[-1])`
|
| 195 |
+
int kMaxK_ = (int)cutlass::platform::numeric_limits<uint32_t>::max(),
|
| 196 |
+
template <typename, typename, typename> class Broadcast1_ = BroadcastNoLoad,
|
| 197 |
+
template <typename, typename, typename> class Broadcast2_ = BroadcastNoLoad>
|
| 198 |
+
struct AttentionBackwardKernel {
|
| 199 |
+
using scalar_t = scalar_t_;
|
| 200 |
+
using output_t = scalar_t;
|
| 201 |
+
using output_accum_t = float;
|
| 202 |
+
using lse_scalar_t = float;
|
| 203 |
+
using accum_t = float;
|
| 204 |
+
using ArchTag = ArchTag_;
|
| 205 |
+
static constexpr bool kIsAligned = kIsAligned_;
|
| 206 |
+
static constexpr bool kApplyDropout = kApplyDropout_;
|
| 207 |
+
static constexpr bool kPreload = kPreload_;
|
| 208 |
+
static constexpr int kBlockSizeI = kBlockSizeI_;
|
| 209 |
+
static constexpr int kBlockSizeJ = kBlockSizeJ_;
|
| 210 |
+
static constexpr int kMaxK = kMaxK_;
|
| 211 |
+
|
| 212 |
+
struct Params {
|
| 213 |
+
// Input tensors
|
| 214 |
+
scalar_t* query_ptr; // [Mq, nH, K]
|
| 215 |
+
scalar_t* key_ptr; // [Mk, nH, K]
|
| 216 |
+
scalar_t* value_ptr; // [Mk, nH, Kv]
|
| 217 |
+
lse_scalar_t* logsumexp_ptr; // [nH, Mq]
|
| 218 |
+
scalar_t* output_ptr; // [Mq, nH, Kv]
|
| 219 |
+
scalar_t* grad_output_ptr; // [Mq, nH, Kv]
|
| 220 |
+
accum_t* delta_ptr; // [nH, Mq]
|
| 221 |
+
int32_t* cu_seqlens_q_ptr = nullptr;
|
| 222 |
+
int32_t* cu_seqlens_k_ptr = nullptr;
|
| 223 |
+
|
| 224 |
+
// Output tensors
|
| 225 |
+
output_t* grad_query_ptr; // [Mq, nH, K]
|
| 226 |
+
output_t* grad_key_ptr; // [Mk, nH, K]
|
| 227 |
+
output_t* grad_value_ptr; // [Mk, nH, Kv]
|
| 228 |
+
|
| 229 |
+
accum_t* grad_bias1_ptr = nullptr;
|
| 230 |
+
accum_t* grad_bias2_ptr = nullptr;
|
| 231 |
+
int32_t B = 0;
|
| 232 |
+
int32_t N = 0;
|
| 233 |
+
scalar_t* bias1_ptr = nullptr;
|
| 234 |
+
scalar_t* bias2_ptr = nullptr;
|
| 235 |
+
|
| 236 |
+
// Accumulators
|
| 237 |
+
union {
|
| 238 |
+
output_accum_t* workspace = nullptr; // [Mq, Kq] + [Mkv, Kq] + [Mkv, Kv]
|
| 239 |
+
output_accum_t* workspace_gk;
|
| 240 |
+
};
|
| 241 |
+
output_accum_t* workspace_gv; // (will be calculated by the kernel)
|
| 242 |
+
output_accum_t* workspace_gq; // (will be calculated by the kernel)
|
| 243 |
+
|
| 244 |
+
// Scale
|
| 245 |
+
accum_t scale;
|
| 246 |
+
|
| 247 |
+
// Dimensions/strides
|
| 248 |
+
int32_t head_dim = -1;
|
| 249 |
+
int32_t head_dim_value = -1;
|
| 250 |
+
int32_t num_queries = -1;
|
| 251 |
+
int32_t num_keys = -1;
|
| 252 |
+
int32_t num_heads = -1;
|
| 253 |
+
|
| 254 |
+
int32_t q_strideM;
|
| 255 |
+
int32_t k_strideM;
|
| 256 |
+
int32_t v_strideM;
|
| 257 |
+
int32_t gO_strideM;
|
| 258 |
+
int32_t gB_strideM;
|
| 259 |
+
int8_t gQKV_strideM_multiplier = 1; // 3 for packed, 1 otherwise
|
| 260 |
+
|
| 261 |
+
// RNG sequence offset based on batch_id and head_id
|
| 262 |
+
unsigned long long dropout_batch_head_rng_offset;
|
| 263 |
+
float dropout_prob = 0.0f;
|
| 264 |
+
|
| 265 |
+
CUTLASS_HOST_DEVICE int32_t o_strideM() const { return head_dim_value * num_heads; }
|
| 266 |
+
CUTLASS_HOST_DEVICE int32_t gQ_strideM() const
|
| 267 |
+
{
|
| 268 |
+
return gQKV_strideM_multiplier * num_heads * head_dim;
|
| 269 |
+
}
|
| 270 |
+
CUTLASS_HOST_DEVICE int32_t gK_strideM() const
|
| 271 |
+
{
|
| 272 |
+
return gQKV_strideM_multiplier * num_heads * head_dim;
|
| 273 |
+
}
|
| 274 |
+
CUTLASS_HOST_DEVICE int32_t gV_strideM() const
|
| 275 |
+
{
|
| 276 |
+
return gQKV_strideM_multiplier * num_heads * head_dim_value;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
// Everything below is only used in `advance_to_block`
|
| 280 |
+
// and shouldn't use registers
|
| 281 |
+
int64_t o_strideH;
|
| 282 |
+
int32_t q_strideH;
|
| 283 |
+
int32_t k_strideH;
|
| 284 |
+
int32_t v_strideH;
|
| 285 |
+
int64_t o_strideB;
|
| 286 |
+
int64_t q_strideB;
|
| 287 |
+
int64_t k_strideB;
|
| 288 |
+
int64_t v_strideB;
|
| 289 |
+
int64_t lse_strideB;
|
| 290 |
+
int64_t lse_strideH;
|
| 291 |
+
int64_t delta_strideB;
|
| 292 |
+
int64_t delta_strideH;
|
| 293 |
+
int32_t num_batches;
|
| 294 |
+
|
| 295 |
+
int64_t gO_strideB = 0;
|
| 296 |
+
int64_t gQ_strideB = 0;
|
| 297 |
+
int64_t gK_strideB = 0;
|
| 298 |
+
int64_t gV_strideB = 0;
|
| 299 |
+
int64_t gB_strideB = 0;
|
| 300 |
+
int64_t gO_strideH = 0;
|
| 301 |
+
int64_t gQ_strideH = 0;
|
| 302 |
+
int64_t gK_strideH = 0;
|
| 303 |
+
int64_t gV_strideH = 0;
|
| 304 |
+
int64_t gB_strideH = 0;
|
| 305 |
+
|
| 306 |
+
CUTLASS_DEVICE bool advance_to_block()
|
| 307 |
+
{
|
| 308 |
+
int64_t batch_id = blockIdx.z;
|
| 309 |
+
int32_t head_id = blockIdx.y;
|
| 310 |
+
|
| 311 |
+
if (kNeedsAccumGradQ || kNeedsAccumGradK || kNeedsAccumGradV) {
|
| 312 |
+
assert(workspace_size() == 0 || workspace != nullptr);
|
| 313 |
+
|
| 314 |
+
workspace += (batch_id * num_heads + head_id) * workspace_strideBH();
|
| 315 |
+
workspace = warp_uniform(workspace);
|
| 316 |
+
workspace_gv = workspace + workspace_elements_gk();
|
| 317 |
+
workspace_gq = workspace_gv + workspace_elements_gv();
|
| 318 |
+
} else {
|
| 319 |
+
workspace = nullptr;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
// Advance pointers that depend on the total concatenated
|
| 323 |
+
// number of queries, as `num_queries` is modified in the block
|
| 324 |
+
// below
|
| 325 |
+
dropout_batch_head_rng_offset = batch_id * (num_heads * num_queries * num_keys) +
|
| 326 |
+
head_id * (num_queries * num_keys);
|
| 327 |
+
logsumexp_ptr += batch_id * lse_strideB + head_id * lse_strideH;
|
| 328 |
+
|
| 329 |
+
query_ptr += batch_id * q_strideB + head_id * q_strideH;
|
| 330 |
+
key_ptr += batch_id * k_strideB + head_id * k_strideH;
|
| 331 |
+
value_ptr += batch_id * v_strideB + head_id * v_strideH;
|
| 332 |
+
output_ptr += batch_id * o_strideB + head_id * o_strideH;
|
| 333 |
+
grad_output_ptr += batch_id * gO_strideB + head_id * gO_strideH;
|
| 334 |
+
delta_ptr += batch_id * delta_strideB + head_id * delta_strideH;
|
| 335 |
+
|
| 336 |
+
grad_query_ptr += batch_id * gQ_strideB + head_id * gQ_strideH;
|
| 337 |
+
grad_key_ptr += batch_id * gK_strideB + head_id * gK_strideH;
|
| 338 |
+
grad_value_ptr += batch_id * gV_strideB + head_id * gV_strideH;
|
| 339 |
+
using broadcast_1 = Broadcast1_<typename MatmulQK::BiasLoader::ThreadMap,
|
| 340 |
+
typename MatmulQK::BiasLoader::Shape,
|
| 341 |
+
scalar_t>;
|
| 342 |
+
using broadcast_2 = Broadcast2_<typename MatmulQK::BiasLoader::ThreadMap,
|
| 343 |
+
typename MatmulQK::BiasLoader::Shape,
|
| 344 |
+
scalar_t>;
|
| 345 |
+
|
| 346 |
+
if (broadcast_1::kEnable && grad_bias1_ptr) {
|
| 347 |
+
grad_bias1_ptr += batch_id * num_queries;
|
| 348 |
+
}
|
| 349 |
+
if (broadcast_2::kEnable && grad_bias2_ptr) {
|
| 350 |
+
auto strideB = num_heads * num_queries * num_keys;
|
| 351 |
+
auto strideH = num_queries * num_keys;
|
| 352 |
+
grad_bias2_ptr += (batch_id / N) * strideB + head_id * strideH;
|
| 353 |
+
}
|
| 354 |
+
if (broadcast_1::kEnable && bias1_ptr) {
|
| 355 |
+
bias1_ptr = broadcast_1::advance(bias1_ptr,
|
| 356 |
+
batch_id / N,
|
| 357 |
+
batch_id % N,
|
| 358 |
+
head_id,
|
| 359 |
+
num_queries * N,
|
| 360 |
+
num_queries,
|
| 361 |
+
0);
|
| 362 |
+
}
|
| 363 |
+
if (broadcast_2::kEnable && bias2_ptr) {
|
| 364 |
+
auto strideB = num_heads * num_queries * num_keys;
|
| 365 |
+
auto strideH = num_queries * num_keys;
|
| 366 |
+
bias2_ptr = broadcast_2::advance(
|
| 367 |
+
bias2_ptr, batch_id / N, batch_id % N, head_id, strideB, 0, strideH);
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
num_queries = warp_uniform(num_queries);
|
| 371 |
+
num_keys = warp_uniform(num_keys);
|
| 372 |
+
|
| 373 |
+
query_ptr = warp_uniform(query_ptr);
|
| 374 |
+
key_ptr = warp_uniform(key_ptr);
|
| 375 |
+
value_ptr = warp_uniform(value_ptr);
|
| 376 |
+
logsumexp_ptr = warp_uniform(logsumexp_ptr);
|
| 377 |
+
output_ptr = warp_uniform(output_ptr);
|
| 378 |
+
grad_output_ptr = warp_uniform(grad_output_ptr);
|
| 379 |
+
delta_ptr = warp_uniform(delta_ptr);
|
| 380 |
+
|
| 381 |
+
grad_query_ptr = warp_uniform(grad_query_ptr);
|
| 382 |
+
grad_key_ptr = warp_uniform(grad_key_ptr);
|
| 383 |
+
grad_value_ptr = warp_uniform(grad_value_ptr);
|
| 384 |
+
if (broadcast_1::kEnable) {
|
| 385 |
+
grad_bias1_ptr = warp_uniform(grad_bias1_ptr);
|
| 386 |
+
bias1_ptr = warp_uniform(bias1_ptr);
|
| 387 |
+
}
|
| 388 |
+
if (broadcast_2::kEnable) {
|
| 389 |
+
grad_bias2_ptr = warp_uniform(grad_bias2_ptr);
|
| 390 |
+
bias2_ptr = warp_uniform(bias2_ptr);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
return true;
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
__host__ dim3 getBlocksGrid() const { return dim3(1, num_heads, num_batches); }
|
| 397 |
+
__host__ dim3 getThreadsGrid() const { return dim3(kWarpSize * kNumWarpsPerBlock, 1, 1); }
|
| 398 |
+
CUTLASS_HOST_DEVICE int64_t workspace_elements_gk() const
|
| 399 |
+
{
|
| 400 |
+
if (!kNeedsAccumGradK) { return 0; }
|
| 401 |
+
return align_up(num_keys, (int32_t)kBlockSizeJ) *
|
| 402 |
+
align_up(head_dim, (int32_t)kBlockSizeI);
|
| 403 |
+
}
|
| 404 |
+
CUTLASS_HOST_DEVICE int64_t workspace_elements_gv() const
|
| 405 |
+
{
|
| 406 |
+
if (!kNeedsAccumGradV) { return 0; }
|
| 407 |
+
return align_up(num_keys, (int32_t)kBlockSizeJ) *
|
| 408 |
+
align_up(head_dim_value, (int32_t)kBlockSizeI);
|
| 409 |
+
}
|
| 410 |
+
CUTLASS_HOST_DEVICE int64_t workspace_elements_gq() const
|
| 411 |
+
{
|
| 412 |
+
if (!kNeedsAccumGradQ) { return 0; }
|
| 413 |
+
if (num_keys <= kBlockSizeJ) { return 0; }
|
| 414 |
+
return align_up(num_queries, (int32_t)kBlockSizeI) *
|
| 415 |
+
align_up(head_dim, (int32_t)kBlockSizeJ);
|
| 416 |
+
}
|
| 417 |
+
CUTLASS_HOST_DEVICE int64_t workspace_strideBH() const
|
| 418 |
+
{
|
| 419 |
+
// Aligned on 128bits
|
| 420 |
+
return align_up(
|
| 421 |
+
workspace_elements_gk() + workspace_elements_gv() + workspace_elements_gq(),
|
| 422 |
+
int64_t(4));
|
| 423 |
+
}
|
| 424 |
+
CUTLASS_HOST_DEVICE int64_t workspace_size() const
|
| 425 |
+
{
|
| 426 |
+
// Returns size of buffer we need to run this kernel
|
| 427 |
+
return num_batches * num_heads * workspace_strideBH() * sizeof(float);
|
| 428 |
+
}
|
| 429 |
+
};
|
| 430 |
+
|
| 431 |
+
static constexpr int64_t kWarpSize = 32;
|
| 432 |
+
|
| 433 |
+
// If this is true, we store and accumulate dK/dV in RF
|
| 434 |
+
// rather than going back to gmem every time
|
| 435 |
+
static constexpr bool kIsHalf = cutlass::sizeof_bits<scalar_t>::value <= 16;
|
| 436 |
+
static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI;
|
| 437 |
+
static_assert(!kPreload || (kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF),
|
| 438 |
+
"preload MMA not supported");
|
| 439 |
+
static constexpr bool kPrologueQK = kPreload;
|
| 440 |
+
static constexpr bool kPrologueGV = kPreload;
|
| 441 |
+
static constexpr bool kPrologueDOV = kPreload;
|
| 442 |
+
static constexpr bool kPrologueGQ = kPreload;
|
| 443 |
+
static constexpr bool kPrologueGK = kPreload;
|
| 444 |
+
|
| 445 |
+
static constexpr int64_t kNumWarpsPerBlock = (kBlockSizeI * kBlockSizeJ) / (32 * 32);
|
| 446 |
+
|
| 447 |
+
// Compute delta for the f16 kernels
|
| 448 |
+
// TODO: Figure out why it's slower on the f32 kernels
|
| 449 |
+
// (something due to RF pressure?)
|
| 450 |
+
// TODO: Remove condition on `kOutputInRF` - this is needed to work
|
| 451 |
+
// around a compiler bug on V100, not exactly sure why but I spent
|
| 452 |
+
// too much time on this already. Reproducible with
|
| 453 |
+
// (B, Mq, Mkv, K) = (1, 1, 1, 136) for instance
|
| 454 |
+
static constexpr bool kKernelComputesDelta =
|
| 455 |
+
kIsHalf && (kOutputInRF || ArchTag::kMinComputeCapability != 70);
|
| 456 |
+
|
| 457 |
+
static constexpr bool kNeedsAccumGradQ =
|
| 458 |
+
!cutlass::platform::is_same<output_accum_t, output_t>::value;
|
| 459 |
+
static constexpr bool kNeedsAccumGradK =
|
| 460 |
+
!kOutputInRF && !cutlass::platform::is_same<output_accum_t, output_t>::value;
|
| 461 |
+
static constexpr bool kNeedsAccumGradV =
|
| 462 |
+
!kOutputInRF && !cutlass::platform::is_same<output_accum_t, output_t>::value;
|
| 463 |
+
|
| 464 |
+
// Launch bounds
|
| 465 |
+
static constexpr int64_t kNumThreads = kWarpSize * kNumWarpsPerBlock;
|
| 466 |
+
static constexpr int64_t kMinBlocksPerSm =
|
| 467 |
+
getWarpsPerSm<scalar_t, ArchTag>() / kNumWarpsPerBlock;
|
| 468 |
+
|
| 469 |
+
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
|
| 470 |
+
using DefaultConfig =
|
| 471 |
+
typename cutlass::gemm::device::DefaultGemmConfiguration<typename GemmType::OpClass,
|
| 472 |
+
ArchTag,
|
| 473 |
+
scalar_t,
|
| 474 |
+
scalar_t,
|
| 475 |
+
scalar_t, // ElementC
|
| 476 |
+
accum_t // ElementAccumulator
|
| 477 |
+
>;
|
| 478 |
+
static constexpr auto kOptimalAlignement =
|
| 479 |
+
cutlass::platform::max(DefaultConfig::kAlignmentA, DefaultConfig::kAlignmentB);
|
| 480 |
+
static constexpr auto kMinimumAlignment = GemmType::kMinimumAlignment;
|
| 481 |
+
|
| 482 |
+
struct MatmulQK {
|
| 483 |
+
/*
|
| 484 |
+
attn_T = k_j @ q_i.transpose(-2, -1) # matmul
|
| 485 |
+
attn_T = (attn_T - logsumexp[i_start:i_end].unsqueeze(1).transpose(-2,
|
| 486 |
+
-1)).exp() # epilogue
|
| 487 |
+
|
| 488 |
+
with attn_T.shape = (kBlockSizeJ, kBlockSizeI)
|
| 489 |
+
*/
|
| 490 |
+
using ThreadblockShape =
|
| 491 |
+
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
|
| 492 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 493 |
+
using DefaultMma = typename cutlass::gemm::threadblock::DefaultMma<
|
| 494 |
+
scalar_t, // ElementA
|
| 495 |
+
cutlass::layout::RowMajor, // LayoutA
|
| 496 |
+
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
|
| 497 |
+
scalar_t, // ElementB
|
| 498 |
+
cutlass::layout::ColumnMajor, // LayoutB
|
| 499 |
+
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
|
| 500 |
+
accum_t, // ElementC
|
| 501 |
+
cutlass::layout::RowMajor, // LayoutC
|
| 502 |
+
typename GemmType::OpClass,
|
| 503 |
+
ArchTag,
|
| 504 |
+
ThreadblockShape,
|
| 505 |
+
WarpShape,
|
| 506 |
+
typename GemmType::InstructionShape,
|
| 507 |
+
DefaultConfig::kStages,
|
| 508 |
+
typename GemmType::Operator,
|
| 509 |
+
false, // AccumulatorsInRowMajor = false,
|
| 510 |
+
cutlass::gemm::SharedMemoryClearOption::kNone>;
|
| 511 |
+
using MmaCore = typename DefaultMma::MmaCore;
|
| 512 |
+
using Mma = typename MakeCustomMma<typename DefaultMma::ThreadblockMma, kMaxK>::Mma;
|
| 513 |
+
|
| 514 |
+
// used for efficient load of bias tile (Bij) from global memory to shared
|
| 515 |
+
// memory
|
| 516 |
+
using BiasLoader =
|
| 517 |
+
TileSmemLoader<scalar_t,
|
| 518 |
+
// Bij is applied to transposed attn matrix tile (Pij.T). Bij is loaded
|
| 519 |
+
// row-major but needs to have transposed shape so we get the same
|
| 520 |
+
// elements.
|
| 521 |
+
cutlass::MatrixShape<ThreadblockShape::kN, ThreadblockShape::kM>,
|
| 522 |
+
MmaCore::kThreads,
|
| 523 |
+
// input restriction: kv_len has to be a multiple of this value
|
| 524 |
+
128 / cutlass::sizeof_bits<scalar_t>::value>;
|
| 525 |
+
|
| 526 |
+
// Epilogue to store to shared-memory in a format that we can use later for
|
| 527 |
+
// the second matmul
|
| 528 |
+
using B2bGemm =
|
| 529 |
+
typename cutlass::gemm::threadblock::B2bGemm<typename Mma::Operator::IteratorC,
|
| 530 |
+
typename Mma::Operator,
|
| 531 |
+
scalar_t,
|
| 532 |
+
WarpShape,
|
| 533 |
+
ThreadblockShape>;
|
| 534 |
+
using AccumLambdaIterator =
|
| 535 |
+
typename DefaultMmaAccumLambdaIterator<typename Mma::Operator::IteratorC,
|
| 536 |
+
accum_t,
|
| 537 |
+
kWarpSize>::Iterator;
|
| 538 |
+
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
|
| 539 |
+
};
|
| 540 |
+
|
| 541 |
+
struct MatmulGradV {
|
| 542 |
+
/*
|
| 543 |
+
grad_v[j_start:j_end] += attn_T @ do_i # matmul
|
| 544 |
+
|
| 545 |
+
Dimensions: (kBlockSizeJ * kNumWarpsPerBlock, kBlockSizeI, K)
|
| 546 |
+
(we might need to iterate multiple times on K)
|
| 547 |
+
*/
|
| 548 |
+
using ThreadblockShape =
|
| 549 |
+
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
|
| 550 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 551 |
+
using InstructionShape = typename GemmType::InstructionShape;
|
| 552 |
+
|
| 553 |
+
using DefaultGemm =
|
| 554 |
+
cutlass::gemm::kernel::DefaultGemm<scalar_t, // ElementA,
|
| 555 |
+
cutlass::layout::RowMajor, // LayoutA,
|
| 556 |
+
DefaultConfig::kAlignmentA,
|
| 557 |
+
scalar_t, // ElementB,
|
| 558 |
+
cutlass::layout::RowMajor, // LayoutB,
|
| 559 |
+
kIsAligned ? DefaultConfig::kAlignmentB
|
| 560 |
+
: GemmType::kMinimumAlignment,
|
| 561 |
+
output_t,
|
| 562 |
+
cutlass::layout::RowMajor, // LayoutC,
|
| 563 |
+
accum_t,
|
| 564 |
+
typename GemmType::OpClass,
|
| 565 |
+
ArchTag,
|
| 566 |
+
ThreadblockShape,
|
| 567 |
+
WarpShape,
|
| 568 |
+
typename GemmType::InstructionShape,
|
| 569 |
+
typename DefaultConfig::EpilogueOutputOp,
|
| 570 |
+
void, // ThreadblockSwizzle - not used
|
| 571 |
+
DefaultConfig::kStages,
|
| 572 |
+
false, // SplitKSerial
|
| 573 |
+
typename GemmType::Operator>;
|
| 574 |
+
|
| 575 |
+
// if dropout:
|
| 576 |
+
// for computing dVj += (Pij.T * Zij) @ dOi
|
| 577 |
+
// Pij_dropped.T = Pij.T * Zij is computed on the fly as fragments of
|
| 578 |
+
// Pij.T are loaded in. The reason we do it this way is because Pij.T and
|
| 579 |
+
// Zij are reused in later steps, while Pij_dropped.T is only needed in
|
| 580 |
+
// this step. computing Pij_dropped.T on the fly allows us to avoid
|
| 581 |
+
// keeping all 3 of Pij_dropped.T, Pij.T, and Zij in shared memory at the
|
| 582 |
+
// same time.
|
| 583 |
+
// if no dropout:
|
| 584 |
+
// for computing dVj += Pij.T @ dOi
|
| 585 |
+
using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
|
| 586 |
+
typename DefaultGemm::Mma,
|
| 587 |
+
typename MatmulQK::AccumulatorSharedStorage,
|
| 588 |
+
kApplyDropout>; // kScaleOperandA
|
| 589 |
+
|
| 590 |
+
using Mma = typename DefaultMmaFromSmem::Mma;
|
| 591 |
+
using WarpIteratorA = typename DefaultMmaFromSmem::WarpIteratorA;
|
| 592 |
+
using IteratorB = typename Mma::IteratorB;
|
| 593 |
+
using WarpCount = typename Mma::WarpCount;
|
| 594 |
+
|
| 595 |
+
// Epilogue
|
| 596 |
+
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
|
| 597 |
+
using DefaultEpilogue = typename DefaultGemm::Epilogue;
|
| 598 |
+
using OutputTileIterator =
|
| 599 |
+
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
|
| 600 |
+
typename DefaultEpilogue::OutputTileIterator>::Iterator;
|
| 601 |
+
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
|
| 602 |
+
};
|
| 603 |
+
|
| 604 |
+
struct MatmulDOIVJ {
|
| 605 |
+
/*
|
| 606 |
+
doi_t_vj = do_i @ v_j.transpose(-2, -1) # matmul
|
| 607 |
+
tmp = (doi_t_vj - Di.unsqueeze(1)) * attn # inplace / epilogue?
|
| 608 |
+
*/
|
| 609 |
+
using ThreadblockShape =
|
| 610 |
+
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
|
| 611 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 612 |
+
|
| 613 |
+
using ElementC = accum_t; // CSY: Change it for better accuracy
|
| 614 |
+
using ElementAccum = accum_t;
|
| 615 |
+
|
| 616 |
+
// no-op output op - epilogue just stores result to global memory
|
| 617 |
+
using BiasGradEpilogueOutputOp = typename cutlass::epilogue::thread::LinearCombination<
|
| 618 |
+
ElementC,
|
| 619 |
+
DefaultConfig::EpilogueOutputOp::kCount,
|
| 620 |
+
typename DefaultConfig::EpilogueOutputOp::ElementAccumulator,
|
| 621 |
+
typename DefaultConfig::EpilogueOutputOp::ElementCompute,
|
| 622 |
+
cutlass::epilogue::thread::ScaleType::Nothing>;
|
| 623 |
+
|
| 624 |
+
using DefaultGemm = typename cutlass::gemm::kernel::DefaultGemm<
|
| 625 |
+
scalar_t, // ElementA
|
| 626 |
+
cutlass::layout::RowMajor, // LayoutA
|
| 627 |
+
kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment,
|
| 628 |
+
scalar_t, // ElementB
|
| 629 |
+
cutlass::layout::ColumnMajor, // LayoutB
|
| 630 |
+
kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment,
|
| 631 |
+
ElementC, // ElementC
|
| 632 |
+
cutlass::layout::RowMajor, // LayoutC
|
| 633 |
+
ElementAccum, // ElementAccumulator
|
| 634 |
+
typename GemmType::OpClass,
|
| 635 |
+
ArchTag,
|
| 636 |
+
ThreadblockShape,
|
| 637 |
+
WarpShape,
|
| 638 |
+
typename GemmType::InstructionShape,
|
| 639 |
+
BiasGradEpilogueOutputOp, // EpilogueOutputOp
|
| 640 |
+
void, // ThreadblockSwizzle (not used)
|
| 641 |
+
// multiple preloads, dropout Zij tile, and 3 stages push us over shared
|
| 642 |
+
// memory capacity on A100. set a ceiling on number of stages to save
|
| 643 |
+
// shared memory if dropout is in use.
|
| 644 |
+
kPreload && kApplyDropout && (kBlockSizeI * kBlockSizeJ > 64 * 64)
|
| 645 |
+
? cutlass::const_min(2, DefaultConfig::kStages)
|
| 646 |
+
: DefaultConfig::kStages, // Stages
|
| 647 |
+
false, // SplitKSerial
|
| 648 |
+
typename GemmType::Operator,
|
| 649 |
+
cutlass::gemm::SharedMemoryClearOption::kNone>;
|
| 650 |
+
using Mma = typename MakeCustomMma<typename DefaultGemm::Mma, kMaxK>::Mma;
|
| 651 |
+
|
| 652 |
+
// epilogue used to write bias gradient, which is just the output of this
|
| 653 |
+
// matmul with some operations applied to the fragment
|
| 654 |
+
using BiasGradEpilogue = typename DefaultGemm::Epilogue;
|
| 655 |
+
|
| 656 |
+
// Epilogue to store to shared-memory in a format that we can use later for
|
| 657 |
+
// the second matmul
|
| 658 |
+
using B2bGemm =
|
| 659 |
+
typename cutlass::gemm::threadblock::B2bGemm<typename Mma::Operator::IteratorC,
|
| 660 |
+
typename Mma::Operator,
|
| 661 |
+
scalar_t,
|
| 662 |
+
WarpShape,
|
| 663 |
+
ThreadblockShape>;
|
| 664 |
+
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
|
| 665 |
+
};
|
| 666 |
+
|
| 667 |
+
struct MatmulGradQ {
|
| 668 |
+
// grad_q <- tmp @ k_j
|
| 669 |
+
using ThreadblockShape =
|
| 670 |
+
cutlass::gemm::GemmShape<kBlockSizeI, kBlockSizeJ, GemmType::ThreadK>;
|
| 671 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 672 |
+
using InstructionShape = typename GemmType::InstructionShape;
|
| 673 |
+
|
| 674 |
+
using DefaultGemm =
|
| 675 |
+
cutlass::gemm::kernel::DefaultGemm<scalar_t, // ElementA,
|
| 676 |
+
cutlass::layout::RowMajor, // LayoutA,
|
| 677 |
+
DefaultConfig::kAlignmentA,
|
| 678 |
+
scalar_t, // ElementB,
|
| 679 |
+
cutlass::layout::RowMajor, // LayoutB,
|
| 680 |
+
kIsAligned ? DefaultConfig::kAlignmentB
|
| 681 |
+
: GemmType::kMinimumAlignment,
|
| 682 |
+
output_t,
|
| 683 |
+
cutlass::layout::RowMajor, // LayoutC,
|
| 684 |
+
accum_t,
|
| 685 |
+
typename GemmType::OpClass,
|
| 686 |
+
ArchTag,
|
| 687 |
+
ThreadblockShape,
|
| 688 |
+
WarpShape,
|
| 689 |
+
typename GemmType::InstructionShape,
|
| 690 |
+
typename DefaultConfig::EpilogueOutputOp,
|
| 691 |
+
void, // ThreadblockSwizzle - not used
|
| 692 |
+
DefaultConfig::kStages,
|
| 693 |
+
false, // SplitKSerial
|
| 694 |
+
typename GemmType::Operator>;
|
| 695 |
+
|
| 696 |
+
using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
|
| 697 |
+
typename DefaultGemm::Mma,
|
| 698 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage,
|
| 699 |
+
false>; // kScaleOperandA
|
| 700 |
+
using Mma = typename DefaultMmaFromSmem::Mma;
|
| 701 |
+
using IteratorB = typename Mma::IteratorB;
|
| 702 |
+
using WarpCount = typename Mma::WarpCount;
|
| 703 |
+
|
| 704 |
+
// Epilogue
|
| 705 |
+
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
|
| 706 |
+
using DefaultEpilogue = typename DefaultGemm::Epilogue;
|
| 707 |
+
using OutputTileIterator =
|
| 708 |
+
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
|
| 709 |
+
typename DefaultEpilogue::OutputTileIterator>::Iterator;
|
| 710 |
+
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
|
| 711 |
+
};
|
| 712 |
+
struct MatmulGradK {
|
| 713 |
+
// grad_k <- tmp.transpose(-2, -1) @ q_i
|
| 714 |
+
using ThreadblockShape =
|
| 715 |
+
cutlass::gemm::GemmShape<kBlockSizeJ, kBlockSizeI, GemmType::ThreadK>;
|
| 716 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 717 |
+
using InstructionShape = typename GemmType::InstructionShape;
|
| 718 |
+
|
| 719 |
+
using DefaultGemm =
|
| 720 |
+
cutlass::gemm::kernel::DefaultGemm<scalar_t, // ElementA,
|
| 721 |
+
cutlass::layout::RowMajor, // LayoutA,
|
| 722 |
+
DefaultConfig::kAlignmentA,
|
| 723 |
+
scalar_t, // ElementB,
|
| 724 |
+
cutlass::layout::RowMajor, // LayoutB,
|
| 725 |
+
kIsAligned ? DefaultConfig::kAlignmentB
|
| 726 |
+
: GemmType::kMinimumAlignment,
|
| 727 |
+
output_t,
|
| 728 |
+
cutlass::layout::RowMajor, // LayoutC,
|
| 729 |
+
accum_t,
|
| 730 |
+
typename GemmType::OpClass,
|
| 731 |
+
ArchTag,
|
| 732 |
+
ThreadblockShape,
|
| 733 |
+
WarpShape,
|
| 734 |
+
typename GemmType::InstructionShape,
|
| 735 |
+
typename DefaultConfig::EpilogueOutputOp,
|
| 736 |
+
void, // ThreadblockSwizzle - not used
|
| 737 |
+
DefaultConfig::kStages,
|
| 738 |
+
false, // SplitKSerial
|
| 739 |
+
typename GemmType::Operator>;
|
| 740 |
+
|
| 741 |
+
using DefaultMmaFromSmemN = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
|
| 742 |
+
typename DefaultGemm::Mma,
|
| 743 |
+
typename MatmulQK::AccumulatorSharedStorage,
|
| 744 |
+
false>; // kScaleOperandA
|
| 745 |
+
using DefaultMmaFromSmemT = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
|
| 746 |
+
typename DefaultGemm::Mma,
|
| 747 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage,
|
| 748 |
+
false, // kScaleOperandA
|
| 749 |
+
kPreload>; // kTransposeA
|
| 750 |
+
using DefaultMmaFromSmem =
|
| 751 |
+
typename cutlass::platform::conditional<DefaultMmaFromSmemT::kIsTransposedA,
|
| 752 |
+
DefaultMmaFromSmemT,
|
| 753 |
+
DefaultMmaFromSmemN>::type;
|
| 754 |
+
using Mma = typename DefaultMmaFromSmem::Mma;
|
| 755 |
+
using IteratorB = typename Mma::IteratorB;
|
| 756 |
+
using WarpCount = typename Mma::WarpCount;
|
| 757 |
+
|
| 758 |
+
// Epilogue
|
| 759 |
+
using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp;
|
| 760 |
+
using DefaultEpilogue = typename DefaultGemm::Epilogue;
|
| 761 |
+
using OutputTileIterator =
|
| 762 |
+
typename cutlass::epilogue::threadblock::MakePrefetchableIterator<
|
| 763 |
+
typename DefaultEpilogue::OutputTileIterator>::Iterator;
|
| 764 |
+
using AccumTileGmem = GmemTile<typename Mma::FragmentC, (int)kNumThreads>;
|
| 765 |
+
};
|
| 766 |
+
|
| 767 |
+
using broadcast_1 = Broadcast1_<typename MatmulQK::BiasLoader::ThreadMap,
|
| 768 |
+
typename MatmulQK::BiasLoader::Shape,
|
| 769 |
+
scalar_t>;
|
| 770 |
+
using broadcast_2 = Broadcast2_<typename MatmulQK::BiasLoader::ThreadMap,
|
| 771 |
+
typename MatmulQK::BiasLoader::Shape,
|
| 772 |
+
scalar_t>;
|
| 773 |
+
|
| 774 |
+
// shared storage for keeping Zij matrix. not needed if we aren't using
|
| 775 |
+
// dropout, in which case we use an empty array to save shared memory
|
| 776 |
+
using ZijSharedStorage = typename cutlass::platform::conditional<
|
| 777 |
+
kApplyDropout,
|
| 778 |
+
typename MatmulQK::AccumulatorSharedStorage,
|
| 779 |
+
// dummy shared storage object that takes up no space.
|
| 780 |
+
typename cutlass::gemm::threadblock::AccumulatorSharedStorage<
|
| 781 |
+
#ifdef _WIN32
|
| 782 |
+
// windows builds throw the error:
|
| 783 |
+
// "type containing an unknown-size array is not allowed"
|
| 784 |
+
// if we try to make Zij shared storage zero-sized.
|
| 785 |
+
// To get around this just make it sized 1 on windows.
|
| 786 |
+
typename cutlass::gemm::GemmShape<1, 1, 0>,
|
| 787 |
+
#else
|
| 788 |
+
typename cutlass::gemm::GemmShape<0, 0, 0>,
|
| 789 |
+
#endif
|
| 790 |
+
typename MatmulQK::AccumulatorSharedStorage::Element,
|
| 791 |
+
typename MatmulQK::AccumulatorSharedStorage::Layout,
|
| 792 |
+
typename cutlass::MatrixShape<0, 0>>>::type;
|
| 793 |
+
|
| 794 |
+
struct SharedStoragePrologue {
|
| 795 |
+
struct {
|
| 796 |
+
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
|
| 797 |
+
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
|
| 798 |
+
} persistent;
|
| 799 |
+
union {
|
| 800 |
+
struct {
|
| 801 |
+
// part1 - after Q.K / dV / dO.V
|
| 802 |
+
union {
|
| 803 |
+
// 1. efficient load of bias tile Bij, which is then applied to Pij
|
| 804 |
+
// typename MatmulQK::BiasLoader::SmemTile bias;
|
| 805 |
+
cutlass::AlignedBuffer<float, MatmulQK::BiasLoader::Shape::kCount> bias;
|
| 806 |
+
// 4. store Pij. it is needed:
|
| 807 |
+
// - in dVj += (Pij.T * Zij) @ dOi
|
| 808 |
+
// - in dSij = Pij * (dPij - Di)
|
| 809 |
+
// 6. dVj += (Pij.T * Zij) @ dOi
|
| 810 |
+
// 10. write to fragment
|
| 811 |
+
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
|
| 812 |
+
};
|
| 813 |
+
// 5. store Zij. it is needed:
|
| 814 |
+
// - to compute Pij_dropped = Pij * Zij on the fly as fragments of Pij
|
| 815 |
+
// are loaded for the computation of dVj.
|
| 816 |
+
// - to compute dPij = (dOi @ Vj.T) * Zij
|
| 817 |
+
// 6. used in dVj += (Pij.T * Zij) @ dOi
|
| 818 |
+
// 9. used in dPij = dPij_dropped * Zij
|
| 819 |
+
ZijSharedStorage zij;
|
| 820 |
+
|
| 821 |
+
union {
|
| 822 |
+
// 2. prologue for dVj
|
| 823 |
+
// 6. workspace for dVj += (Pij.T * Zij) @ dOi
|
| 824 |
+
typename MatmulGradV::Mma::SharedStorage mm_gradV;
|
| 825 |
+
// 7. dVj epilogue
|
| 826 |
+
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
|
| 827 |
+
};
|
| 828 |
+
|
| 829 |
+
// 3. prologue for dPij_dropped
|
| 830 |
+
// 8. used in dPij_dropped = dOi @ Vj.T
|
| 831 |
+
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
|
| 832 |
+
} part1;
|
| 833 |
+
|
| 834 |
+
struct {
|
| 835 |
+
// part2 - dQ
|
| 836 |
+
union {
|
| 837 |
+
typename MatmulQK::AccumulatorSharedStorage
|
| 838 |
+
tmpT_shared_storage; // (from part1)
|
| 839 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
|
| 840 |
+
};
|
| 841 |
+
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
|
| 842 |
+
typename MatmulGradQ::Mma::SharedStorage mm_gradQ; // (preload)
|
| 843 |
+
union {
|
| 844 |
+
// store dB = dSij to global memory
|
| 845 |
+
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
|
| 846 |
+
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
|
| 847 |
+
};
|
| 848 |
+
|
| 849 |
+
} part2;
|
| 850 |
+
|
| 851 |
+
struct {
|
| 852 |
+
// part3 - after last iteration on dQ's epilogue / dK
|
| 853 |
+
union {
|
| 854 |
+
typename MatmulQK::AccumulatorSharedStorage
|
| 855 |
+
tmpT_shared_storage; // (from part1)
|
| 856 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
|
| 857 |
+
};
|
| 858 |
+
typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload)
|
| 859 |
+
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue_lastIter;
|
| 860 |
+
|
| 861 |
+
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
|
| 862 |
+
} part3;
|
| 863 |
+
|
| 864 |
+
struct {
|
| 865 |
+
// part4 - after last iteration on dK's epilogue / preload next K.Q_t
|
| 866 |
+
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
|
| 867 |
+
|
| 868 |
+
// If we reach end of current key, dump RF->gmem with "final" epilogues
|
| 869 |
+
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue_final;
|
| 870 |
+
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue_final;
|
| 871 |
+
} part4;
|
| 872 |
+
};
|
| 873 |
+
// ===========================================
|
| 874 |
+
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
|
| 875 |
+
CUTLASS_DEVICE auto& FIELDNAME() { return INSIDE_STRUCT.FIELDNAME; }
|
| 876 |
+
|
| 877 |
+
FIELD(persistent, di)
|
| 878 |
+
FIELD(persistent, mm_qk_k)
|
| 879 |
+
FIELD(part1, bias)
|
| 880 |
+
FIELD(part1, attn_shared_storage)
|
| 881 |
+
FIELD(part1, zij)
|
| 882 |
+
FIELD(part1, mm_gradV)
|
| 883 |
+
FIELD(part1, gradV_epilogue)
|
| 884 |
+
FIELD(part1, mm_doivj)
|
| 885 |
+
FIELD(part2, mm_gradK)
|
| 886 |
+
FIELD(part2, mm_gradQ)
|
| 887 |
+
FIELD(part2, gradB_epilogue)
|
| 888 |
+
FIELD(part2, gradQ_epilogue)
|
| 889 |
+
FIELD(part2, tmp_shared_storage)
|
| 890 |
+
FIELD(part3, tmpT_shared_storage)
|
| 891 |
+
FIELD(part3, gradQ_epilogue_lastIter)
|
| 892 |
+
FIELD(part3, gradK_epilogue)
|
| 893 |
+
FIELD(part4, mm_qk_q)
|
| 894 |
+
FIELD(part4, gradK_epilogue_final)
|
| 895 |
+
FIELD(part4, gradV_epilogue_final)
|
| 896 |
+
};
|
| 897 |
+
|
| 898 |
+
struct SharedStorageNoPrologue {
|
| 899 |
+
struct {
|
| 900 |
+
cutlass::Array<accum_t, kBlockSizeI> di; // (do_i * o_i).sum(-1)
|
| 901 |
+
} persistent;
|
| 902 |
+
union {
|
| 903 |
+
struct {
|
| 904 |
+
// part1 - Q.K matmul
|
| 905 |
+
typename MatmulQK::Mma::SharedStorageA mm_qk_k;
|
| 906 |
+
typename MatmulQK::Mma::SharedStorageB mm_qk_q;
|
| 907 |
+
} part1;
|
| 908 |
+
|
| 909 |
+
struct {
|
| 910 |
+
// part2 - compute gradV
|
| 911 |
+
union {
|
| 912 |
+
// 1. efficient load of bias tile Bij, which is then applied to Pij
|
| 913 |
+
cutlass::AlignedBuffer<float, MatmulQK::BiasLoader::Shape::kCount> bias;
|
| 914 |
+
// 2. store Pij to shared memory. it is needed:
|
| 915 |
+
// - in this step, where it is used in dVj += (Pij.T * Zij) @ dOi
|
| 916 |
+
// - in next step where it is used in dSij = Pij * (dPij - Di)
|
| 917 |
+
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
|
| 918 |
+
};
|
| 919 |
+
// 3. store Zij. it is needed:
|
| 920 |
+
// - in this step, where it is used to compute Pij_dropped = Pij * Zij
|
| 921 |
+
// on the
|
| 922 |
+
// fly as fragments of Pij are loaded for the computation of dVj.
|
| 923 |
+
// - later to compute dPij = (dOi @ Vj.T) * Zij
|
| 924 |
+
ZijSharedStorage zij;
|
| 925 |
+
|
| 926 |
+
union {
|
| 927 |
+
typename MatmulGradV::Mma::SharedStorage mm_gradV;
|
| 928 |
+
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue;
|
| 929 |
+
};
|
| 930 |
+
} part2;
|
| 931 |
+
|
| 932 |
+
struct {
|
| 933 |
+
// part3 - DO.V matmul
|
| 934 |
+
union {
|
| 935 |
+
// first compute dPij = (dOi @ Vj.T) * Zij
|
| 936 |
+
// and dSij = Pij * (dPij - Di)
|
| 937 |
+
struct {
|
| 938 |
+
// (from part2) - Pij for computing dSij = Pij * (dPij - Di)
|
| 939 |
+
typename MatmulQK::AccumulatorSharedStorage attn_shared_storage;
|
| 940 |
+
// (from part2) - Zij for computing dPij = dPij_dropped * Zij
|
| 941 |
+
ZijSharedStorage zij;
|
| 942 |
+
// matmul to compute dOiVj
|
| 943 |
+
typename MatmulDOIVJ::Mma::SharedStorage mm_doivj;
|
| 944 |
+
};
|
| 945 |
+
// then store dB = dSij to global memory
|
| 946 |
+
typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue;
|
| 947 |
+
};
|
| 948 |
+
} part3;
|
| 949 |
+
|
| 950 |
+
struct {
|
| 951 |
+
// part4 - compute gradQ
|
| 952 |
+
typename MatmulQK::AccumulatorSharedStorage tmpT_shared_storage; // (from part2)
|
| 953 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
|
| 954 |
+
union {
|
| 955 |
+
typename MatmulGradQ::Mma::SharedStorage mm_gradQ;
|
| 956 |
+
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue;
|
| 957 |
+
typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue_lastIter;
|
| 958 |
+
};
|
| 959 |
+
} part4;
|
| 960 |
+
|
| 961 |
+
struct {
|
| 962 |
+
// part5 - compute gradK
|
| 963 |
+
typename MatmulQK::AccumulatorSharedStorage tmpT_shared_storage; // (from part2)
|
| 964 |
+
typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage;
|
| 965 |
+
union {
|
| 966 |
+
typename MatmulGradK::Mma::SharedStorage mm_gradK;
|
| 967 |
+
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue;
|
| 968 |
+
};
|
| 969 |
+
} part5;
|
| 970 |
+
|
| 971 |
+
struct {
|
| 972 |
+
// part6 - store RF accumulated into gmem
|
| 973 |
+
typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue_final;
|
| 974 |
+
typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue_final;
|
| 975 |
+
} part6;
|
| 976 |
+
};
|
| 977 |
+
// ===========================================
|
| 978 |
+
#define FIELD(INSIDE_STRUCT, FIELDNAME) \
|
| 979 |
+
CUTLASS_DEVICE auto& FIELDNAME() { return INSIDE_STRUCT.FIELDNAME; }
|
| 980 |
+
|
| 981 |
+
FIELD(persistent, di)
|
| 982 |
+
FIELD(part1, mm_qk_k)
|
| 983 |
+
FIELD(part1, mm_qk_q)
|
| 984 |
+
FIELD(part2, bias)
|
| 985 |
+
FIELD(part2, attn_shared_storage)
|
| 986 |
+
FIELD(part2, zij)
|
| 987 |
+
FIELD(part2, mm_gradV)
|
| 988 |
+
FIELD(part2, gradV_epilogue)
|
| 989 |
+
FIELD(part3, mm_doivj)
|
| 990 |
+
FIELD(part3, gradB_epilogue)
|
| 991 |
+
FIELD(part4, tmpT_shared_storage)
|
| 992 |
+
FIELD(part4, tmp_shared_storage)
|
| 993 |
+
FIELD(part4, mm_gradQ)
|
| 994 |
+
FIELD(part4, gradQ_epilogue)
|
| 995 |
+
FIELD(part4, gradQ_epilogue_lastIter)
|
| 996 |
+
FIELD(part5, mm_gradK)
|
| 997 |
+
FIELD(part5, gradK_epilogue)
|
| 998 |
+
FIELD(part6, gradK_epilogue_final)
|
| 999 |
+
FIELD(part6, gradV_epilogue_final)
|
| 1000 |
+
};
|
| 1001 |
+
|
| 1002 |
+
using SharedStorage = typename cutlass::platform::
|
| 1003 |
+
conditional<kPreload, SharedStoragePrologue, SharedStorageNoPrologue>::type;
|
| 1004 |
+
|
| 1005 |
+
struct OutputFragments {
|
| 1006 |
+
typename MatmulGradV::Mma::FragmentC gradV;
|
| 1007 |
+
typename MatmulGradK::Mma::FragmentC gradK;
|
| 1008 |
+
|
| 1009 |
+
CUTLASS_DEVICE void clear()
|
| 1010 |
+
{
|
| 1011 |
+
gradV.clear();
|
| 1012 |
+
gradK.clear();
|
| 1013 |
+
}
|
| 1014 |
+
};
|
| 1015 |
+
|
| 1016 |
+
static bool __host__ check_supported(Params const& p)
|
| 1017 |
+
{
|
| 1018 |
+
CHECK_ALIGNED_PTR(p.query_ptr, kMinimumAlignment);
|
| 1019 |
+
CHECK_ALIGNED_PTR(p.key_ptr, kMinimumAlignment);
|
| 1020 |
+
CHECK_ALIGNED_PTR(p.value_ptr, kMinimumAlignment);
|
| 1021 |
+
CHECK_ALIGNED_PTR(p.output_ptr, kMinimumAlignment);
|
| 1022 |
+
CHECK_ALIGNED_PTR(p.grad_output_ptr, kMinimumAlignment);
|
| 1023 |
+
EVOFORMER_CHECK(p.lse_strideH % 8 == 0, "LSE is not correctly aligned");
|
| 1024 |
+
EVOFORMER_CHECK(p.lse_strideB % 8 == 0, "LSE is not correctly aligned");
|
| 1025 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.q_strideH % kMinimumAlignment == 0,
|
| 1026 |
+
"query is not correctly aligned (strideH)");
|
| 1027 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.k_strideH % kMinimumAlignment == 0,
|
| 1028 |
+
"key is not correctly aligned (strideH)");
|
| 1029 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.v_strideH % kMinimumAlignment == 0,
|
| 1030 |
+
"value is not correctly aligned (strideH)");
|
| 1031 |
+
EVOFORMER_CHECK(p.num_batches <= 1 || p.q_strideB % kMinimumAlignment == 0,
|
| 1032 |
+
"query is not correctly aligned (strideB)");
|
| 1033 |
+
EVOFORMER_CHECK(p.num_batches <= 1 || p.k_strideB % kMinimumAlignment == 0,
|
| 1034 |
+
"key is not correctly aligned (strideB)");
|
| 1035 |
+
EVOFORMER_CHECK(p.num_batches <= 1 || p.v_strideB % kMinimumAlignment == 0,
|
| 1036 |
+
"value is not correctly aligned (strideB)");
|
| 1037 |
+
EVOFORMER_CHECK(p.q_strideM % kMinimumAlignment == 0,
|
| 1038 |
+
"query is not correctly aligned (strideM)");
|
| 1039 |
+
EVOFORMER_CHECK(p.k_strideM % kMinimumAlignment == 0,
|
| 1040 |
+
"key is not correctly aligned (strideM)");
|
| 1041 |
+
EVOFORMER_CHECK(p.v_strideM % kMinimumAlignment == 0,
|
| 1042 |
+
"value is not correctly aligned (strideM)");
|
| 1043 |
+
EVOFORMER_CHECK(p.dropout_prob <= 1.0f && p.dropout_prob >= 0.0f,
|
| 1044 |
+
"Invalid value for `dropout_prob`");
|
| 1045 |
+
EVOFORMER_CHECK(kApplyDropout || p.dropout_prob == 0.0f,
|
| 1046 |
+
"Set `kApplyDropout`=True to support `dropout_prob > 0`");
|
| 1047 |
+
EVOFORMER_CHECK(p.head_dim > 0, "Invalid value for `head_dim`");
|
| 1048 |
+
EVOFORMER_CHECK(p.head_dim_value > 0, "Invalid value for `head_dim_value`");
|
| 1049 |
+
EVOFORMER_CHECK(p.num_queries > 0, "Invalid value for `num_queries`");
|
| 1050 |
+
EVOFORMER_CHECK(p.num_keys > 0, "Invalid value for `num_keys`");
|
| 1051 |
+
EVOFORMER_CHECK(p.num_heads > 0, "Invalid value for `num_heads`");
|
| 1052 |
+
EVOFORMER_CHECK(p.num_batches > 0, "Invalid value for `num_batches`");
|
| 1053 |
+
EVOFORMER_CHECK(p.head_dim <= kMaxK, "kMaxK: Expected `head_dim < kMaxK`");
|
| 1054 |
+
EVOFORMER_CHECK(p.head_dim_value <= kMaxK, "kMaxK: Expected `head_dim_value < kMaxK`");
|
| 1055 |
+
return true;
|
| 1056 |
+
}
|
| 1057 |
+
|
| 1058 |
+
static CUTLASS_DEVICE void attention_kernel(Params p)
|
| 1059 |
+
{
|
| 1060 |
+
extern __shared__ char smem_buffer[];
|
| 1061 |
+
SharedStorage& shared_storage = *((SharedStorage*)smem_buffer);
|
| 1062 |
+
|
| 1063 |
+
uint16_t thread_id = threadIdx.x;
|
| 1064 |
+
uint8_t warp_id = warp_uniform(thread_id / 32);
|
| 1065 |
+
uint8_t lane_id = thread_id % 32;
|
| 1066 |
+
|
| 1067 |
+
if (kPrologueQK) {
|
| 1068 |
+
prologueQkNextIteration<true>(shared_storage, p, 0, 0, warp_id, lane_id);
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
// Computes (dO*out).sum(-1) and writes it to `p.delta_ptr`
|
| 1072 |
+
if (kKernelComputesDelta) {
|
| 1073 |
+
constexpr int kOptimalElements = 128 / cutlass::sizeof_bits<scalar_t>::value;
|
| 1074 |
+
if (p.head_dim_value % kOptimalElements == 0) {
|
| 1075 |
+
for (int query_start = 0; query_start < p.num_queries; query_start += kBlockSizeI) {
|
| 1076 |
+
computeDelta<kOptimalElements>(p, query_start, warp_id, lane_id);
|
| 1077 |
+
}
|
| 1078 |
+
} else {
|
| 1079 |
+
for (int query_start = 0; query_start < p.num_queries; query_start += kBlockSizeI) {
|
| 1080 |
+
computeDelta<1>(p, query_start, warp_id, lane_id);
|
| 1081 |
+
}
|
| 1082 |
+
}
|
| 1083 |
+
__syncthreads();
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
OutputFragments output_frags;
|
| 1087 |
+
|
| 1088 |
+
int32_t key_start = 0;
|
| 1089 |
+
int32_t key_end = p.num_keys / kBlockSizeJ * kBlockSizeJ;
|
| 1090 |
+
for (; key_start < key_end; key_start += kBlockSizeJ) {
|
| 1091 |
+
output_frags.clear();
|
| 1092 |
+
int32_t query_start = getQueryStart(p, key_start);
|
| 1093 |
+
int32_t query_end =
|
| 1094 |
+
query_start + (p.num_queries - query_start) / kBlockSizeI * kBlockSizeI;
|
| 1095 |
+
for (; query_start < query_end; query_start += kBlockSizeI) {
|
| 1096 |
+
processBlockIJ<true>(
|
| 1097 |
+
shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id);
|
| 1098 |
+
}
|
| 1099 |
+
// last (partial) query
|
| 1100 |
+
if (query_start < p.num_queries) {
|
| 1101 |
+
processBlockIJ<false>(
|
| 1102 |
+
shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id);
|
| 1103 |
+
}
|
| 1104 |
+
if (kOutputInRF) {
|
| 1105 |
+
writeFragsToGmem<true>(
|
| 1106 |
+
shared_storage, output_frags, p, key_start, warp_id, lane_id);
|
| 1107 |
+
} else if (getQueryStart(p, key_start) >= p.num_queries) {
|
| 1108 |
+
zfillGradKV<true>(p, key_start, warp_id, lane_id);
|
| 1109 |
+
}
|
| 1110 |
+
__syncthreads();
|
| 1111 |
+
}
|
| 1112 |
+
// Last (partial) key
|
| 1113 |
+
if (key_start != p.num_keys) {
|
| 1114 |
+
output_frags.clear();
|
| 1115 |
+
int32_t query_start = getQueryStart(p, key_start);
|
| 1116 |
+
for (; query_start < p.num_queries; query_start += kBlockSizeI) {
|
| 1117 |
+
warp_id = warp_uniform(warp_id);
|
| 1118 |
+
processBlockIJ<false>(
|
| 1119 |
+
shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id);
|
| 1120 |
+
}
|
| 1121 |
+
if (kOutputInRF) {
|
| 1122 |
+
writeFragsToGmem<false>(
|
| 1123 |
+
shared_storage, output_frags, p, key_start, warp_id, lane_id);
|
| 1124 |
+
} else if (getQueryStart(p, key_start) >= p.num_queries) {
|
| 1125 |
+
zfillGradKV<false>(p, key_start, warp_id, lane_id);
|
| 1126 |
+
}
|
| 1127 |
+
}
|
| 1128 |
+
}
|
| 1129 |
+
|
| 1130 |
+
static CUTLASS_DEVICE void loadDi(cutlass::Array<accum_t, kBlockSizeI>& di,
|
| 1131 |
+
Params const& p,
|
| 1132 |
+
int32_t query_start)
|
| 1133 |
+
{
|
| 1134 |
+
int32_t thread_id = threadIdx.x + threadIdx.y * blockDim.x;
|
| 1135 |
+
if (thread_id < kBlockSizeI) {
|
| 1136 |
+
accum_t di_rf = accum_t(0);
|
| 1137 |
+
if (query_start + thread_id < p.num_queries) {
|
| 1138 |
+
di_rf = p.delta_ptr[query_start + thread_id];
|
| 1139 |
+
}
|
| 1140 |
+
di[thread_id] = di_rf;
|
| 1141 |
+
}
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
template <bool skipBoundsChecks>
|
| 1145 |
+
static CUTLASS_DEVICE void zfillGradKV(Params const& p,
|
| 1146 |
+
int32_t key_start,
|
| 1147 |
+
uint8_t warp_id,
|
| 1148 |
+
uint8_t lane_id)
|
| 1149 |
+
{
|
| 1150 |
+
constexpr int kThreadsPerKey = 8;
|
| 1151 |
+
constexpr int kParallelKeys = kNumThreads / kThreadsPerKey;
|
| 1152 |
+
static_assert(kBlockSizeJ % kParallelKeys == 0, "");
|
| 1153 |
+
// This function is not really optimized, but should rarely be used
|
| 1154 |
+
// It's only used when some keys are "useless" and don't attend to
|
| 1155 |
+
// any query, due to causal masking
|
| 1156 |
+
int thread_id = 32 * warp_id + lane_id;
|
| 1157 |
+
int k_shift = lane_id % kThreadsPerKey;
|
| 1158 |
+
|
| 1159 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1160 |
+
for (int j = 0; j < kBlockSizeJ; j += kParallelKeys) {
|
| 1161 |
+
int key = key_start + j + (thread_id / kThreadsPerKey);
|
| 1162 |
+
if (!skipBoundsChecks && key >= p.num_keys) { continue; }
|
| 1163 |
+
auto gv_ptr = p.grad_value_ptr + key * p.gV_strideM();
|
| 1164 |
+
auto gk_ptr = p.grad_key_ptr + key * p.gK_strideM();
|
| 1165 |
+
|
| 1166 |
+
for (int k = k_shift; k < p.head_dim_value; k += kThreadsPerKey) {
|
| 1167 |
+
gv_ptr[k] = scalar_t(0);
|
| 1168 |
+
}
|
| 1169 |
+
for (int k = k_shift; k < p.head_dim; k += kThreadsPerKey) { gk_ptr[k] = scalar_t(0); }
|
| 1170 |
+
}
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
template <bool skipBoundsChecks>
|
| 1174 |
+
static CUTLASS_DEVICE void processBlockIJ(SharedStorage& shared_storage,
|
| 1175 |
+
OutputFragments& output_frags,
|
| 1176 |
+
Params& p,
|
| 1177 |
+
int32_t query_start,
|
| 1178 |
+
int32_t key_start,
|
| 1179 |
+
uint8_t warp_id,
|
| 1180 |
+
uint8_t lane_id)
|
| 1181 |
+
{
|
| 1182 |
+
cutlass::MatrixCoord no_offset{0, 0};
|
| 1183 |
+
accum_t scale = p.scale;
|
| 1184 |
+
int16_t thread_id = 32 * warp_id + lane_id;
|
| 1185 |
+
auto rematerializeThreadIds = [&]() {
|
| 1186 |
+
// Prevents `nvcc` from keeping values deduced from
|
| 1187 |
+
// `thread_id`, `warp_id`, ... in RF - to reduce register pressure
|
| 1188 |
+
warp_id = warp_uniform(thread_id / 32);
|
| 1189 |
+
lane_id = thread_id % 32;
|
| 1190 |
+
thread_id = 32 * warp_id + lane_id;
|
| 1191 |
+
};
|
| 1192 |
+
|
| 1193 |
+
bool isFirstQuery = (query_start == getQueryStart(p, key_start));
|
| 1194 |
+
int32_t next_query, next_key;
|
| 1195 |
+
incrIteration(p, query_start, key_start, next_query, next_key);
|
| 1196 |
+
bool isLastQuery = next_key != key_start;
|
| 1197 |
+
__syncthreads();
|
| 1198 |
+
loadDi(shared_storage.di(), p, query_start);
|
| 1199 |
+
|
| 1200 |
+
int32_t num_queries_in_block =
|
| 1201 |
+
skipBoundsChecks ? MatmulQK::Mma::Shape::kN
|
| 1202 |
+
: warp_uniform(cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kN,
|
| 1203 |
+
p.num_queries - query_start));
|
| 1204 |
+
int32_t num_keys_in_block =
|
| 1205 |
+
skipBoundsChecks ? MatmulQK::Mma::Shape::kM
|
| 1206 |
+
: warp_uniform(cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kM,
|
| 1207 |
+
p.num_keys - key_start));
|
| 1208 |
+
|
| 1209 |
+
auto prologueGradV = [&](int col) {
|
| 1210 |
+
typename MatmulGradV::Mma::IteratorB iterator_dO(
|
| 1211 |
+
{int32_t(p.gO_strideM)},
|
| 1212 |
+
p.grad_output_ptr + query_start * p.gO_strideM + col,
|
| 1213 |
+
{num_queries_in_block, p.head_dim_value - col},
|
| 1214 |
+
thread_id,
|
| 1215 |
+
no_offset);
|
| 1216 |
+
MatmulGradV::Mma::prologue(
|
| 1217 |
+
shared_storage.mm_gradV(), iterator_dO, thread_id, num_queries_in_block);
|
| 1218 |
+
};
|
| 1219 |
+
auto prologueGradQ = [&](int col) {
|
| 1220 |
+
typename MatmulGradQ::Mma::IteratorB iterator_K(
|
| 1221 |
+
{int32_t(p.k_strideM)},
|
| 1222 |
+
p.key_ptr + key_start * p.k_strideM + col,
|
| 1223 |
+
{num_keys_in_block, p.head_dim - col},
|
| 1224 |
+
thread_id,
|
| 1225 |
+
no_offset);
|
| 1226 |
+
MatmulGradQ::Mma::prologue(
|
| 1227 |
+
shared_storage.mm_gradQ(), iterator_K, thread_id, num_keys_in_block);
|
| 1228 |
+
};
|
| 1229 |
+
auto prologueGradK = [&](int col) {
|
| 1230 |
+
typename MatmulGradK::Mma::IteratorB iterator_Q(
|
| 1231 |
+
{int32_t(p.q_strideM)},
|
| 1232 |
+
p.query_ptr + query_start * p.q_strideM + col,
|
| 1233 |
+
{num_queries_in_block, p.head_dim - col},
|
| 1234 |
+
thread_id,
|
| 1235 |
+
no_offset);
|
| 1236 |
+
MatmulGradK::Mma::prologue(
|
| 1237 |
+
shared_storage.mm_gradK(), iterator_Q, thread_id, num_queries_in_block);
|
| 1238 |
+
};
|
| 1239 |
+
auto prologueDOV = [&]() {
|
| 1240 |
+
typename MatmulDOIVJ::Mma::IteratorA iterator_A(
|
| 1241 |
+
{int32_t(p.gO_strideM)},
|
| 1242 |
+
p.grad_output_ptr + query_start * p.gO_strideM,
|
| 1243 |
+
{num_queries_in_block, p.head_dim_value},
|
| 1244 |
+
thread_id,
|
| 1245 |
+
no_offset);
|
| 1246 |
+
typename MatmulDOIVJ::Mma::IteratorB iterator_B({int32_t(p.v_strideM)},
|
| 1247 |
+
p.value_ptr + key_start * p.v_strideM,
|
| 1248 |
+
{p.head_dim_value, num_keys_in_block},
|
| 1249 |
+
thread_id,
|
| 1250 |
+
no_offset);
|
| 1251 |
+
MatmulDOIVJ::Mma::prologue(
|
| 1252 |
+
shared_storage.mm_doivj(), iterator_A, iterator_B, thread_id, p.head_dim_value);
|
| 1253 |
+
};
|
| 1254 |
+
|
| 1255 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1256 |
+
// MatmulQK
|
| 1257 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1258 |
+
{
|
| 1259 |
+
using Mma = typename MatmulQK::Mma;
|
| 1260 |
+
|
| 1261 |
+
cutlass::gemm::GemmCoord problem_size(num_keys_in_block,
|
| 1262 |
+
num_queries_in_block,
|
| 1263 |
+
p.head_dim // k
|
| 1264 |
+
);
|
| 1265 |
+
|
| 1266 |
+
// k_j
|
| 1267 |
+
typename Mma::IteratorA iterator_A({int32_t(p.k_strideM)},
|
| 1268 |
+
p.key_ptr + key_start * p.k_strideM,
|
| 1269 |
+
{problem_size.m(), problem_size.k()},
|
| 1270 |
+
thread_id,
|
| 1271 |
+
no_offset);
|
| 1272 |
+
|
| 1273 |
+
// q_i.transpose(-2, -1)
|
| 1274 |
+
typename Mma::IteratorB iterator_B({int32_t(p.q_strideM)},
|
| 1275 |
+
p.query_ptr + query_start * p.q_strideM,
|
| 1276 |
+
{problem_size.k(), problem_size.n()},
|
| 1277 |
+
thread_id,
|
| 1278 |
+
no_offset);
|
| 1279 |
+
|
| 1280 |
+
Mma mma(
|
| 1281 |
+
shared_storage.mm_qk_k(), shared_storage.mm_qk_q(), thread_id, warp_id, lane_id);
|
| 1282 |
+
|
| 1283 |
+
typename Mma::FragmentC accum;
|
| 1284 |
+
|
| 1285 |
+
accum.clear();
|
| 1286 |
+
|
| 1287 |
+
auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
|
| 1288 |
+
|
| 1289 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 1290 |
+
mma.set_prologue_done(kPrologueQK);
|
| 1291 |
+
mma.set_zero_outside_bounds(!skipBoundsChecks);
|
| 1292 |
+
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
|
| 1293 |
+
|
| 1294 |
+
// Epilogue: add LSE + exp and store that to our shared memory buffer
|
| 1295 |
+
// shmem <- (matmul_result -
|
| 1296 |
+
// logsumexp[i_start:i_end].unsqueeze(1)).exp()
|
| 1297 |
+
int warp_idx_mn_0 = warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
|
| 1298 |
+
auto output_tile_coords = cutlass::MatrixCoord{
|
| 1299 |
+
warp_idx_mn_0 % Mma::Base::WarpCount::kM, warp_idx_mn_0 / Mma::Base::WarpCount::kM};
|
| 1300 |
+
|
| 1301 |
+
if (broadcast_1::kEnable || broadcast_2::kEnable) {
|
| 1302 |
+
cutlass::TensorRef<float, cutlass::layout::RowMajor> bias_tensor_ref(
|
| 1303 |
+
shared_storage.bias().data(),
|
| 1304 |
+
cutlass::layout::RowMajor(MatmulQK::ThreadblockShape::kM));
|
| 1305 |
+
using Shape = cutlass::MatrixShape<MatmulQK::ThreadblockShape::kM,
|
| 1306 |
+
MatmulQK::ThreadblockShape::kN>;
|
| 1307 |
+
AttentionBiasEpilogue<Shape,
|
| 1308 |
+
scalar_t,
|
| 1309 |
+
MatmulQK::MmaCore::kThreads,
|
| 1310 |
+
Broadcast1_,
|
| 1311 |
+
Broadcast2_>
|
| 1312 |
+
bias_epilogue;
|
| 1313 |
+
bias_epilogue(bias_tensor_ref,
|
| 1314 |
+
p.bias1_ptr + key_start,
|
| 1315 |
+
p.bias2_ptr + query_start * p.num_keys + key_start,
|
| 1316 |
+
thread_id,
|
| 1317 |
+
{num_queries_in_block, num_keys_in_block},
|
| 1318 |
+
p.num_keys);
|
| 1319 |
+
// Pij += Bij, Pij is in register fragment and Bij is in shared memory
|
| 1320 |
+
auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset(
|
| 1321 |
+
lane_id, warp_id, output_tile_coords);
|
| 1322 |
+
MatmulQK::AccumLambdaIterator::iterateRows(
|
| 1323 |
+
lane_offset,
|
| 1324 |
+
[&](int accum_n) {},
|
| 1325 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 1326 |
+
// remember we are transposed
|
| 1327 |
+
accum[idx] = accum[idx] * scale + bias_tensor_ref.at({accum_n, accum_m});
|
| 1328 |
+
},
|
| 1329 |
+
[&](int accum_n) {});
|
| 1330 |
+
} else {
|
| 1331 |
+
accum = cutlass::multiplies<typename Mma::FragmentC>()(scale, accum);
|
| 1332 |
+
}
|
| 1333 |
+
|
| 1334 |
+
__syncthreads();
|
| 1335 |
+
if (kPrologueGV) { prologueGradV(0); }
|
| 1336 |
+
if (kPrologueDOV) { prologueDOV(); }
|
| 1337 |
+
|
| 1338 |
+
MatmulQK::B2bGemm::accumApplyLSEToSmem(shared_storage.attn_shared_storage(),
|
| 1339 |
+
accum,
|
| 1340 |
+
p.logsumexp_ptr + query_start,
|
| 1341 |
+
problem_size.n(),
|
| 1342 |
+
thread_id,
|
| 1343 |
+
warp_id,
|
| 1344 |
+
lane_id,
|
| 1345 |
+
output_tile_coords);
|
| 1346 |
+
|
| 1347 |
+
__syncthreads();
|
| 1348 |
+
}
|
| 1349 |
+
rematerializeThreadIds();
|
| 1350 |
+
|
| 1351 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1352 |
+
// GradV matmul
|
| 1353 |
+
//
|
| 1354 |
+
// grad_v[j_start:j_end] += attn_T @ do_i
|
| 1355 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1356 |
+
constexpr bool kSingleIterationGradV = kMaxK <= MatmulGradV::ThreadblockShape::kN;
|
| 1357 |
+
for (int col = 0; col < (kSingleIterationGradV ? 1 : p.head_dim_value);
|
| 1358 |
+
col += MatmulGradV::ThreadblockShape::kN) {
|
| 1359 |
+
using Mma = typename MatmulGradV::Mma;
|
| 1360 |
+
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
|
| 1361 |
+
|
| 1362 |
+
cutlass::gemm::GemmCoord problem_size(
|
| 1363 |
+
num_keys_in_block, p.head_dim_value - col, num_queries_in_block);
|
| 1364 |
+
auto createEpilogueIter = [&]() {
|
| 1365 |
+
return typename MatmulGradV::OutputTileIterator(
|
| 1366 |
+
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
|
| 1367 |
+
p.grad_value_ptr + key_start * p.gV_strideM() + col,
|
| 1368 |
+
{num_keys_in_block, p.head_dim_value - col},
|
| 1369 |
+
thread_id);
|
| 1370 |
+
};
|
| 1371 |
+
typename Mma::IteratorB iterator_B({int32_t(p.gO_strideM)},
|
| 1372 |
+
p.grad_output_ptr + query_start * p.gO_strideM + col,
|
| 1373 |
+
{num_queries_in_block, p.head_dim_value - col},
|
| 1374 |
+
thread_id,
|
| 1375 |
+
no_offset);
|
| 1376 |
+
|
| 1377 |
+
// if dropout: dVj += (Pij.T * Zij) @ dOi
|
| 1378 |
+
// otherwise: dVj += Pij.T @ dOi
|
| 1379 |
+
Mma mma(shared_storage.mm_gradV(),
|
| 1380 |
+
// operand A: Pij
|
| 1381 |
+
typename MatmulGradV::WarpIteratorA(
|
| 1382 |
+
shared_storage.attn_shared_storage().accum_ref(), lane_id),
|
| 1383 |
+
// if we're using dropout, operand A is Pij_dropped = Pij * Zij
|
| 1384 |
+
// which is computed on the fly as fragments of Pij are loaded in
|
| 1385 |
+
typename Mma::WarpIteratorAScale(shared_storage.zij().accum_ref(), lane_id),
|
| 1386 |
+
thread_id,
|
| 1387 |
+
warp_id,
|
| 1388 |
+
lane_id);
|
| 1389 |
+
|
| 1390 |
+
int storage_id = col / MatmulGradV::ThreadblockShape::kN;
|
| 1391 |
+
AccumTileGmem gmem_tile{p.workspace_gv + storage_id * AccumTileGmem::kElementsStored};
|
| 1392 |
+
if (!kOutputInRF) {
|
| 1393 |
+
if (isFirstQuery || !kNeedsAccumGradV) {
|
| 1394 |
+
output_frags.gradV.clear();
|
| 1395 |
+
} else {
|
| 1396 |
+
gmem_tile.load(output_frags.gradV, thread_id);
|
| 1397 |
+
}
|
| 1398 |
+
}
|
| 1399 |
+
mma.set_prologue_done(kPrologueGV);
|
| 1400 |
+
|
| 1401 |
+
auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
|
| 1402 |
+
|
| 1403 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 1404 |
+
__syncthreads();
|
| 1405 |
+
|
| 1406 |
+
mma(gemm_k_iterations, output_frags.gradV, iterator_B, output_frags.gradV);
|
| 1407 |
+
__syncthreads();
|
| 1408 |
+
if (kPrologueGV && !kSingleIterationGradV &&
|
| 1409 |
+
col + MatmulGradV::ThreadblockShape::kN < p.head_dim_value) {
|
| 1410 |
+
prologueGradV(col + MatmulGradV::ThreadblockShape::kN);
|
| 1411 |
+
}
|
| 1412 |
+
|
| 1413 |
+
if (!kOutputInRF) {
|
| 1414 |
+
if (kNeedsAccumGradV && !isLastQuery) {
|
| 1415 |
+
gmem_tile.store(output_frags.gradV, thread_id);
|
| 1416 |
+
} else {
|
| 1417 |
+
accumulateInGmem<MatmulGradV>(shared_storage.gradV_epilogue(),
|
| 1418 |
+
output_frags.gradV,
|
| 1419 |
+
createEpilogueIter(),
|
| 1420 |
+
isFirstQuery || kNeedsAccumGradV,
|
| 1421 |
+
warp_id,
|
| 1422 |
+
lane_id);
|
| 1423 |
+
}
|
| 1424 |
+
}
|
| 1425 |
+
}
|
| 1426 |
+
__syncthreads();
|
| 1427 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1428 |
+
// MatmulDOIVJ
|
| 1429 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1430 |
+
{
|
| 1431 |
+
using Mma = typename MatmulDOIVJ::Mma;
|
| 1432 |
+
// do_i
|
| 1433 |
+
typename Mma::IteratorA iterator_A({int32_t(p.gO_strideM)},
|
| 1434 |
+
p.grad_output_ptr + query_start * p.gO_strideM,
|
| 1435 |
+
{num_queries_in_block, p.head_dim_value},
|
| 1436 |
+
thread_id,
|
| 1437 |
+
no_offset);
|
| 1438 |
+
|
| 1439 |
+
// v_j.transpose(-2, -1)
|
| 1440 |
+
typename Mma::IteratorB iterator_B({int32_t(p.v_strideM)},
|
| 1441 |
+
p.value_ptr + key_start * p.v_strideM,
|
| 1442 |
+
{p.head_dim_value, num_keys_in_block},
|
| 1443 |
+
thread_id,
|
| 1444 |
+
no_offset);
|
| 1445 |
+
|
| 1446 |
+
Mma mma(shared_storage.mm_doivj(), thread_id, warp_id, lane_id);
|
| 1447 |
+
mma.set_prologue_done(kPrologueDOV);
|
| 1448 |
+
mma.set_zero_outside_bounds(!skipBoundsChecks);
|
| 1449 |
+
|
| 1450 |
+
typename Mma::FragmentC accum;
|
| 1451 |
+
|
| 1452 |
+
accum.clear();
|
| 1453 |
+
|
| 1454 |
+
auto gemm_k_iterations = (p.head_dim_value + Mma::Shape::kK - 1) / Mma::Shape::kK;
|
| 1455 |
+
|
| 1456 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 1457 |
+
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
|
| 1458 |
+
__syncthreads();
|
| 1459 |
+
if (kPrologueGQ) { prologueGradQ(0); }
|
| 1460 |
+
if (kPrologueGK) { prologueGradK(0); }
|
| 1461 |
+
|
| 1462 |
+
int warp_idx_mn_0 = warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN);
|
| 1463 |
+
auto output_tile_coords = cutlass::MatrixCoord{
|
| 1464 |
+
warp_idx_mn_0 % Mma::Base::WarpCount::kM, warp_idx_mn_0 / Mma::Base::WarpCount::kM};
|
| 1465 |
+
// TODO: This must be terribly inefficient. There must be a better way
|
| 1466 |
+
// tmp [RF] <- (accum [RF] - Di [smem] ) * attn_T.T [smem]
|
| 1467 |
+
// attn_shared_storage [smem] <- tmp.T
|
| 1468 |
+
// tmp_shared_storage [smem] <- tmp
|
| 1469 |
+
{
|
| 1470 |
+
using LambdaIterator =
|
| 1471 |
+
typename DefaultMmaAccumLambdaIterator<typename Mma::Operator::IteratorC,
|
| 1472 |
+
typename MatmulDOIVJ::ElementAccum,
|
| 1473 |
+
kWarpSize>::Iterator;
|
| 1474 |
+
auto lane_offset =
|
| 1475 |
+
LambdaIterator::get_lane_offset(lane_id, warp_id, output_tile_coords);
|
| 1476 |
+
|
| 1477 |
+
auto attn_T = shared_storage.attn_shared_storage().accum_ref();
|
| 1478 |
+
accum_t current_di;
|
| 1479 |
+
// dSij = (dPij - Di) * Pij
|
| 1480 |
+
LambdaIterator::iterateRows(
|
| 1481 |
+
lane_offset,
|
| 1482 |
+
[&](int accum_m) { current_di = shared_storage.di()[accum_m]; },
|
| 1483 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 1484 |
+
if (skipBoundsChecks ||
|
| 1485 |
+
(accum_m < num_queries_in_block && accum_n < num_keys_in_block)) {
|
| 1486 |
+
accum_t attn = attn_T.at({accum_n, accum_m});
|
| 1487 |
+
accum[idx] = (accum[idx] - current_di) * attn;
|
| 1488 |
+
} else {
|
| 1489 |
+
accum[idx] = 0;
|
| 1490 |
+
}
|
| 1491 |
+
},
|
| 1492 |
+
[&](int accum_m) {
|
| 1493 |
+
|
| 1494 |
+
});
|
| 1495 |
+
|
| 1496 |
+
using DefaultGemm = typename MatmulDOIVJ::DefaultGemm;
|
| 1497 |
+
using OutputOp = typename MatmulDOIVJ::BiasGradEpilogueOutputOp;
|
| 1498 |
+
if (broadcast_1::kEnable && p.grad_bias1_ptr) {
|
| 1499 |
+
using Epilogue =
|
| 1500 |
+
typename BiasGradEpilogueAffineRankN<ArchTag,
|
| 1501 |
+
2,
|
| 1502 |
+
typename MatmulDOIVJ::ThreadblockShape,
|
| 1503 |
+
typename DefaultGemm::Mma::Operator,
|
| 1504 |
+
DefaultGemm::kPartitionsK,
|
| 1505 |
+
OutputOp,
|
| 1506 |
+
OutputOp::kCount>::Epilogue;
|
| 1507 |
+
cutlass::layout::AffineRankN<2> layout({0, 1});
|
| 1508 |
+
auto dst_ptr = p.grad_bias1_ptr + key_start;
|
| 1509 |
+
typename Epilogue::OutputTileIterator output_iter(
|
| 1510 |
+
{layout},
|
| 1511 |
+
dst_ptr,
|
| 1512 |
+
{num_queries_in_block, num_keys_in_block},
|
| 1513 |
+
(int)thread_id);
|
| 1514 |
+
Epilogue epilogue(shared_storage.gradB_epilogue(),
|
| 1515 |
+
(int)thread_id,
|
| 1516 |
+
(int)warp_id,
|
| 1517 |
+
(int)lane_id);
|
| 1518 |
+
epilogue(OutputOp(1), output_iter, accum);
|
| 1519 |
+
}
|
| 1520 |
+
|
| 1521 |
+
if (broadcast_2::kEnable && p.grad_bias2_ptr) {
|
| 1522 |
+
if (broadcast_1::kEnable) { __syncthreads(); }
|
| 1523 |
+
using Epilogue =
|
| 1524 |
+
typename BiasGradEpilogue<ArchTag,
|
| 1525 |
+
typename MatmulDOIVJ::ThreadblockShape,
|
| 1526 |
+
typename DefaultGemm::Mma::Operator,
|
| 1527 |
+
DefaultGemm::kPartitionsK,
|
| 1528 |
+
OutputOp,
|
| 1529 |
+
OutputOp::kCount>::Epilogue;
|
| 1530 |
+
typename Epilogue::OutputTileIterator::Params params{p.num_keys};
|
| 1531 |
+
auto dst_ptr = p.grad_bias2_ptr + query_start * p.num_keys + key_start;
|
| 1532 |
+
typename Epilogue::OutputTileIterator output_iter(
|
| 1533 |
+
params, dst_ptr, {num_queries_in_block, num_keys_in_block}, (int)thread_id);
|
| 1534 |
+
Epilogue epilogue(shared_storage.gradB_epilogue(),
|
| 1535 |
+
(int)thread_id,
|
| 1536 |
+
(int)warp_id,
|
| 1537 |
+
(int)lane_id);
|
| 1538 |
+
epilogue(OutputOp(1), output_iter, accum);
|
| 1539 |
+
}
|
| 1540 |
+
|
| 1541 |
+
accum = accum * scale;
|
| 1542 |
+
|
| 1543 |
+
__syncthreads();
|
| 1544 |
+
if (!MatmulGradK::DefaultMmaFromSmem::kIsTransposedA) {
|
| 1545 |
+
auto tmpT = shared_storage.tmpT_shared_storage().accum_ref();
|
| 1546 |
+
// attn <- attn_T.T
|
| 1547 |
+
LambdaIterator::iterateRows(
|
| 1548 |
+
lane_offset,
|
| 1549 |
+
[&](int accum_m) {},
|
| 1550 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 1551 |
+
tmpT.at({accum_n, accum_m}) = scalar_t(accum[idx]);
|
| 1552 |
+
},
|
| 1553 |
+
[&](int accum_m) {});
|
| 1554 |
+
}
|
| 1555 |
+
}
|
| 1556 |
+
|
| 1557 |
+
MatmulDOIVJ::B2bGemm::accumToSmem(
|
| 1558 |
+
shared_storage.tmp_shared_storage(), accum, lane_id, output_tile_coords);
|
| 1559 |
+
__syncthreads();
|
| 1560 |
+
}
|
| 1561 |
+
p.head_dim = warp_uniform(p.head_dim);
|
| 1562 |
+
p.k_strideM = warp_uniform(p.k_strideM);
|
| 1563 |
+
rematerializeThreadIds();
|
| 1564 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1565 |
+
// GradQ matmul
|
| 1566 |
+
//
|
| 1567 |
+
// grad_q[i_start:i_end] += tmp @ k_j
|
| 1568 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1569 |
+
// Skip the loop & associated branches if we know at compile time the number
|
| 1570 |
+
// of iterations
|
| 1571 |
+
constexpr bool kSingleIterationGradQ = kMaxK <= MatmulGradQ::ThreadblockShape::kN;
|
| 1572 |
+
for (int col = 0; col < (kSingleIterationGradQ ? 1 : p.head_dim);
|
| 1573 |
+
col += MatmulGradQ::ThreadblockShape::kN) {
|
| 1574 |
+
using Mma = typename MatmulGradQ::Mma;
|
| 1575 |
+
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
|
| 1576 |
+
|
| 1577 |
+
cutlass::gemm::GemmCoord problem_size(
|
| 1578 |
+
num_queries_in_block,
|
| 1579 |
+
false ? MatmulGradQ::ThreadblockShape::kN : p.head_dim - col,
|
| 1580 |
+
num_keys_in_block);
|
| 1581 |
+
|
| 1582 |
+
// k_j
|
| 1583 |
+
typename Mma::IteratorB iterator_B({int32_t(p.k_strideM)},
|
| 1584 |
+
p.key_ptr + key_start * p.k_strideM + col,
|
| 1585 |
+
{problem_size.k(), problem_size.n()},
|
| 1586 |
+
thread_id,
|
| 1587 |
+
no_offset);
|
| 1588 |
+
|
| 1589 |
+
auto a = shared_storage.tmp_shared_storage().accum_ref();
|
| 1590 |
+
Mma mma(shared_storage.mm_gradQ(),
|
| 1591 |
+
shared_storage.tmp_shared_storage(),
|
| 1592 |
+
thread_id,
|
| 1593 |
+
warp_id,
|
| 1594 |
+
lane_id,
|
| 1595 |
+
problem_size.k());
|
| 1596 |
+
|
| 1597 |
+
typename Mma::FragmentC accum;
|
| 1598 |
+
|
| 1599 |
+
bool isFirst = key_start == 0;
|
| 1600 |
+
int col_id = col / MatmulGradQ::ThreadblockShape::kN;
|
| 1601 |
+
int num_cols =
|
| 1602 |
+
kSingleIterationGradQ ? 1 : ceil_div(p.head_dim, MatmulGradQ::ThreadblockShape::kN);
|
| 1603 |
+
int storage_id = (col_id + query_start / kBlockSizeI * num_cols);
|
| 1604 |
+
AccumTileGmem gmem_tile{p.workspace_gq + storage_id * AccumTileGmem::kElementsStored};
|
| 1605 |
+
if (isFirst || !kNeedsAccumGradQ) {
|
| 1606 |
+
accum.clear();
|
| 1607 |
+
} else {
|
| 1608 |
+
gmem_tile.load(accum, thread_id);
|
| 1609 |
+
}
|
| 1610 |
+
|
| 1611 |
+
auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
|
| 1612 |
+
|
| 1613 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 1614 |
+
__syncthreads();
|
| 1615 |
+
mma.set_prologue_done(kPrologueGQ);
|
| 1616 |
+
mma(gemm_k_iterations, accum, iterator_B, accum);
|
| 1617 |
+
__syncthreads();
|
| 1618 |
+
bool isLastColumn = kSingleIterationGradQ ||
|
| 1619 |
+
(col + MatmulGradQ::ThreadblockShape::kN >= p.head_dim);
|
| 1620 |
+
if (kPrologueGQ && !isLastColumn) {
|
| 1621 |
+
prologueGradQ(col + MatmulGradQ::ThreadblockShape::kN);
|
| 1622 |
+
}
|
| 1623 |
+
|
| 1624 |
+
// Output results
|
| 1625 |
+
int32_t next_query, next_key;
|
| 1626 |
+
incrIteration(p, p.num_queries, key_start, next_query, next_key);
|
| 1627 |
+
bool isLast = next_query > query_start || next_key >= p.num_keys;
|
| 1628 |
+
if (kNeedsAccumGradQ && !isLast) {
|
| 1629 |
+
gmem_tile.store(accum, thread_id);
|
| 1630 |
+
} else {
|
| 1631 |
+
typename MatmulGradQ::OutputTileIterator output_it(
|
| 1632 |
+
typename MatmulGradQ::OutputTileIterator::Params{p.gQ_strideM()},
|
| 1633 |
+
p.grad_query_ptr + query_start * p.gQ_strideM() + col,
|
| 1634 |
+
{problem_size.m(), problem_size.n()},
|
| 1635 |
+
thread_id);
|
| 1636 |
+
accumulateInGmem<MatmulGradQ>(isLastColumn
|
| 1637 |
+
? shared_storage.gradQ_epilogue_lastIter()
|
| 1638 |
+
: shared_storage.gradQ_epilogue(),
|
| 1639 |
+
accum,
|
| 1640 |
+
output_it,
|
| 1641 |
+
isFirst || kNeedsAccumGradQ,
|
| 1642 |
+
warp_id,
|
| 1643 |
+
lane_id);
|
| 1644 |
+
}
|
| 1645 |
+
}
|
| 1646 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1647 |
+
// GradK matmul
|
| 1648 |
+
//
|
| 1649 |
+
// grad_k[i_start:i_end] += tmp.transpose(-2, -1) @ q_i
|
| 1650 |
+
/////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1651 |
+
rematerializeThreadIds();
|
| 1652 |
+
|
| 1653 |
+
constexpr bool kSingleIterationGradK = kMaxK <= MatmulGradK::ThreadblockShape::kN;
|
| 1654 |
+
for (int col = 0; col < (kSingleIterationGradK ? 1 : p.head_dim);
|
| 1655 |
+
col += MatmulGradK::ThreadblockShape::kN) {
|
| 1656 |
+
using Mma = typename MatmulGradK::Mma;
|
| 1657 |
+
using AccumTileGmem = typename MatmulGradQ::AccumTileGmem;
|
| 1658 |
+
|
| 1659 |
+
cutlass::gemm::GemmCoord problem_size(
|
| 1660 |
+
num_keys_in_block,
|
| 1661 |
+
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col,
|
| 1662 |
+
num_queries_in_block);
|
| 1663 |
+
auto createEpilogueIter = [&]() {
|
| 1664 |
+
return typename MatmulGradK::OutputTileIterator(
|
| 1665 |
+
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
|
| 1666 |
+
p.grad_key_ptr + key_start * p.gK_strideM() + col,
|
| 1667 |
+
{num_keys_in_block,
|
| 1668 |
+
false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col},
|
| 1669 |
+
thread_id);
|
| 1670 |
+
};
|
| 1671 |
+
|
| 1672 |
+
// q_i
|
| 1673 |
+
typename Mma::IteratorB iterator_B({int32_t(p.q_strideM)},
|
| 1674 |
+
p.query_ptr + query_start * p.q_strideM + col,
|
| 1675 |
+
{problem_size.k(), problem_size.n()},
|
| 1676 |
+
thread_id,
|
| 1677 |
+
no_offset);
|
| 1678 |
+
|
| 1679 |
+
auto getTmp = [&](int) { return &shared_storage.tmp_shared_storage(); };
|
| 1680 |
+
auto getTmpT = [&](int) { return &shared_storage.tmpT_shared_storage(); };
|
| 1681 |
+
// this is basically:
|
| 1682 |
+
// opA = kIsTransposedA ? getTmp() : getTmpT();
|
| 1683 |
+
bool constexpr kIsTransposedA = MatmulGradK::DefaultMmaFromSmem::kIsTransposedA;
|
| 1684 |
+
auto& opA =
|
| 1685 |
+
*call_conditional<kIsTransposedA, decltype(getTmp), decltype(getTmpT)>::apply(
|
| 1686 |
+
getTmp, getTmpT, 0);
|
| 1687 |
+
Mma mma(shared_storage.mm_gradK(), opA, thread_id, warp_id, lane_id, problem_size.k());
|
| 1688 |
+
|
| 1689 |
+
int storage_id = col / MatmulGradK::ThreadblockShape::kN;
|
| 1690 |
+
AccumTileGmem gmem_tile{p.workspace_gk + storage_id * AccumTileGmem::kElementsStored};
|
| 1691 |
+
if (!kOutputInRF) {
|
| 1692 |
+
if (isFirstQuery || !kNeedsAccumGradK) {
|
| 1693 |
+
output_frags.gradK.clear();
|
| 1694 |
+
} else {
|
| 1695 |
+
gmem_tile.load(output_frags.gradK, thread_id);
|
| 1696 |
+
}
|
| 1697 |
+
}
|
| 1698 |
+
mma.set_prologue_done(kPrologueGK);
|
| 1699 |
+
|
| 1700 |
+
auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
|
| 1701 |
+
|
| 1702 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 1703 |
+
__syncthreads();
|
| 1704 |
+
|
| 1705 |
+
mma(gemm_k_iterations, output_frags.gradK, iterator_B, output_frags.gradK);
|
| 1706 |
+
__syncthreads();
|
| 1707 |
+
bool isLastColumn = kSingleIterationGradK ||
|
| 1708 |
+
col + MatmulGradK::ThreadblockShape::kN >= p.head_dim;
|
| 1709 |
+
if (kPrologueGK && !isLastColumn) {
|
| 1710 |
+
prologueGradK(col + MatmulGradK::ThreadblockShape::kN);
|
| 1711 |
+
}
|
| 1712 |
+
|
| 1713 |
+
if (kPrologueQK && isLastColumn) {
|
| 1714 |
+
int32_t next_query, next_key;
|
| 1715 |
+
incrIteration(p, query_start, key_start, next_query, next_key);
|
| 1716 |
+
DISPATCH_BOOL(next_key != key_start, kForceReloadK, ([&]() {
|
| 1717 |
+
prologueQkNextIteration<kForceReloadK>(
|
| 1718 |
+
shared_storage, p, next_query, next_key, warp_id, lane_id);
|
| 1719 |
+
}));
|
| 1720 |
+
}
|
| 1721 |
+
|
| 1722 |
+
// Output results
|
| 1723 |
+
if (!kOutputInRF) {
|
| 1724 |
+
if (kNeedsAccumGradK && !isLastQuery) {
|
| 1725 |
+
gmem_tile.store(output_frags.gradK, thread_id);
|
| 1726 |
+
} else {
|
| 1727 |
+
accumulateInGmem<MatmulGradK>(isLastColumn
|
| 1728 |
+
? shared_storage.gradK_epilogue_final()
|
| 1729 |
+
: shared_storage.gradK_epilogue(),
|
| 1730 |
+
output_frags.gradK,
|
| 1731 |
+
createEpilogueIter(),
|
| 1732 |
+
isFirstQuery || kNeedsAccumGradK,
|
| 1733 |
+
warp_id,
|
| 1734 |
+
lane_id);
|
| 1735 |
+
__syncthreads();
|
| 1736 |
+
}
|
| 1737 |
+
}
|
| 1738 |
+
}
|
| 1739 |
+
}
|
| 1740 |
+
|
| 1741 |
+
static CUTLASS_DEVICE int32_t getQueryStart(Params const& p, int32_t key_start) { return 0; };
|
| 1742 |
+
|
| 1743 |
+
static CUTLASS_DEVICE void incrIteration(Params const& p,
|
| 1744 |
+
int32_t query_start,
|
| 1745 |
+
int32_t key_start,
|
| 1746 |
+
int32_t& next_query,
|
| 1747 |
+
int32_t& next_key)
|
| 1748 |
+
{
|
| 1749 |
+
next_query = query_start + kBlockSizeI;
|
| 1750 |
+
next_key = key_start;
|
| 1751 |
+
if (next_query >= p.num_queries) {
|
| 1752 |
+
next_key = key_start + kBlockSizeJ;
|
| 1753 |
+
next_query = getQueryStart(p, next_key);
|
| 1754 |
+
}
|
| 1755 |
+
}
|
| 1756 |
+
|
| 1757 |
+
template <bool kForceReloadK>
|
| 1758 |
+
static CUTLASS_DEVICE void prologueQkNextIteration(SharedStorage& shared_storage,
|
| 1759 |
+
Params const& p,
|
| 1760 |
+
int32_t query_start,
|
| 1761 |
+
int32_t key_start,
|
| 1762 |
+
uint8_t warp_id,
|
| 1763 |
+
uint8_t lane_id)
|
| 1764 |
+
{
|
| 1765 |
+
if (query_start >= p.num_queries || key_start >= p.num_keys) { return; }
|
| 1766 |
+
|
| 1767 |
+
static constexpr bool kReloadK = kForceReloadK || !MatmulQK::Mma::kSmemContainsEntireMat;
|
| 1768 |
+
int thread_id = 32 * warp_id + lane_id;
|
| 1769 |
+
typename MatmulQK::Mma::IteratorA iterator_A({int32_t(p.k_strideM)},
|
| 1770 |
+
p.key_ptr + key_start * p.k_strideM,
|
| 1771 |
+
{p.num_keys - key_start, p.head_dim},
|
| 1772 |
+
thread_id,
|
| 1773 |
+
cutlass::MatrixCoord{0, 0});
|
| 1774 |
+
|
| 1775 |
+
typename MatmulQK::Mma::IteratorB iterator_B({int32_t(p.q_strideM)},
|
| 1776 |
+
p.query_ptr + query_start * p.q_strideM,
|
| 1777 |
+
{p.head_dim, p.num_queries - query_start},
|
| 1778 |
+
thread_id,
|
| 1779 |
+
cutlass::MatrixCoord{0, 0});
|
| 1780 |
+
|
| 1781 |
+
MatmulQK::Mma::prologue<kReloadK, true>(shared_storage.mm_qk_k(),
|
| 1782 |
+
shared_storage.mm_qk_q(),
|
| 1783 |
+
iterator_A,
|
| 1784 |
+
iterator_B,
|
| 1785 |
+
thread_id,
|
| 1786 |
+
p.head_dim);
|
| 1787 |
+
}
|
| 1788 |
+
|
| 1789 |
+
template <bool skipBoundsChecks>
|
| 1790 |
+
static CUTLASS_DEVICE void writeFragsToGmem(SharedStorage& shared_storage,
|
| 1791 |
+
OutputFragments& output_frags,
|
| 1792 |
+
Params const& p,
|
| 1793 |
+
int32_t key_start,
|
| 1794 |
+
uint8_t warp_id,
|
| 1795 |
+
uint8_t lane_id)
|
| 1796 |
+
{
|
| 1797 |
+
uint16_t thread_id = 32 * warp_id + lane_id;
|
| 1798 |
+
int32_t num_keys_in_block =
|
| 1799 |
+
skipBoundsChecks
|
| 1800 |
+
? MatmulQK::Mma::Shape::kM
|
| 1801 |
+
: cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start);
|
| 1802 |
+
typename MatmulGradV::OutputTileIterator outputV_it(
|
| 1803 |
+
typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()},
|
| 1804 |
+
p.grad_value_ptr + key_start * p.gV_strideM(),
|
| 1805 |
+
{num_keys_in_block, p.head_dim_value},
|
| 1806 |
+
thread_id);
|
| 1807 |
+
accumulateInGmem<MatmulGradV>(shared_storage.gradV_epilogue_final(),
|
| 1808 |
+
output_frags.gradV,
|
| 1809 |
+
outputV_it,
|
| 1810 |
+
true,
|
| 1811 |
+
warp_id,
|
| 1812 |
+
lane_id);
|
| 1813 |
+
|
| 1814 |
+
typename MatmulGradK::OutputTileIterator outputK_it(
|
| 1815 |
+
typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()},
|
| 1816 |
+
p.grad_key_ptr + key_start * p.gK_strideM(),
|
| 1817 |
+
{num_keys_in_block, false ? MatmulGradK::ThreadblockShape::kN : p.head_dim},
|
| 1818 |
+
thread_id);
|
| 1819 |
+
accumulateInGmem<MatmulGradK>(shared_storage.gradK_epilogue_final(),
|
| 1820 |
+
output_frags.gradK,
|
| 1821 |
+
outputK_it,
|
| 1822 |
+
true,
|
| 1823 |
+
warp_id,
|
| 1824 |
+
lane_id);
|
| 1825 |
+
}
|
| 1826 |
+
|
| 1827 |
+
template <typename MatmulT>
|
| 1828 |
+
static CUTLASS_DEVICE void accumulateInGmem(
|
| 1829 |
+
typename MatmulT::DefaultEpilogue::SharedStorage& epilogue_smem,
|
| 1830 |
+
typename MatmulT::Mma::FragmentC const& accum,
|
| 1831 |
+
typename MatmulT::OutputTileIterator output_it,
|
| 1832 |
+
bool first,
|
| 1833 |
+
uint8_t warp_id,
|
| 1834 |
+
uint8_t lane_id)
|
| 1835 |
+
{
|
| 1836 |
+
using DefaultEpilogue = typename MatmulT::DefaultEpilogue;
|
| 1837 |
+
using DefaultOutputOp = typename MatmulT::DefaultOutputOp;
|
| 1838 |
+
using Mma = typename MatmulT::Mma;
|
| 1839 |
+
int thread_id = 32 * warp_id + lane_id;
|
| 1840 |
+
DISPATCH_BOOL(
|
| 1841 |
+
first, kIsFirst, ([&]() {
|
| 1842 |
+
static constexpr auto ScaleType =
|
| 1843 |
+
kIsFirst ? cutlass::epilogue::thread::ScaleType::Nothing
|
| 1844 |
+
: cutlass::epilogue::thread::ScaleType::NoBetaScaling;
|
| 1845 |
+
using EpilogueOutputOp = typename cutlass::epilogue::thread::LinearCombination<
|
| 1846 |
+
typename DefaultOutputOp::ElementOutput,
|
| 1847 |
+
DefaultOutputOp::kCount,
|
| 1848 |
+
typename DefaultOutputOp::ElementAccumulator,
|
| 1849 |
+
typename DefaultOutputOp::ElementCompute,
|
| 1850 |
+
ScaleType>;
|
| 1851 |
+
using Epilogue = typename cutlass::epilogue::threadblock::EpiloguePipelined<
|
| 1852 |
+
typename DefaultEpilogue::Shape,
|
| 1853 |
+
typename Mma::Operator,
|
| 1854 |
+
DefaultEpilogue::kPartitionsK,
|
| 1855 |
+
typename MatmulT::OutputTileIterator,
|
| 1856 |
+
typename DefaultEpilogue::AccumulatorFragmentIterator,
|
| 1857 |
+
typename DefaultEpilogue::WarpTileIterator,
|
| 1858 |
+
typename DefaultEpilogue::SharedLoadIterator,
|
| 1859 |
+
EpilogueOutputOp,
|
| 1860 |
+
typename DefaultEpilogue::Padding,
|
| 1861 |
+
DefaultEpilogue::kFragmentsPerIteration,
|
| 1862 |
+
true // IterationsUnroll
|
| 1863 |
+
>;
|
| 1864 |
+
EpilogueOutputOp rescale({1, 1});
|
| 1865 |
+
Epilogue epilogue(epilogue_smem, thread_id, warp_id, lane_id);
|
| 1866 |
+
epilogue(rescale, output_it, accum, output_it);
|
| 1867 |
+
}));
|
| 1868 |
+
}
|
| 1869 |
+
|
| 1870 |
+
template <int kElementsPerAccess>
|
| 1871 |
+
static CUTLASS_DEVICE void computeDelta(Params const& p,
|
| 1872 |
+
int32_t query_start,
|
| 1873 |
+
uint8_t warp_id,
|
| 1874 |
+
uint8_t lane_id)
|
| 1875 |
+
{
|
| 1876 |
+
// Each thread computes one value for Delta
|
| 1877 |
+
// Depending on warp configuration, we might have multiple
|
| 1878 |
+
// threads of the same warp working on the same row
|
| 1879 |
+
using AccessType = cutlass::Array<scalar_t, kElementsPerAccess>;
|
| 1880 |
+
static_assert(kNumThreads >= kBlockSizeI, "");
|
| 1881 |
+
static constexpr int kNumThreadsPerLine = kNumThreads / kBlockSizeI;
|
| 1882 |
+
int16_t thread_id = 32 * warp_id + lane_id;
|
| 1883 |
+
|
| 1884 |
+
int16_t laneFirstCol = kElementsPerAccess * (lane_id % kNumThreadsPerLine);
|
| 1885 |
+
int16_t laneRow = thread_id / kNumThreadsPerLine;
|
| 1886 |
+
bool rowPred = (query_start + laneRow) < p.num_queries;
|
| 1887 |
+
bool pred = rowPred;
|
| 1888 |
+
|
| 1889 |
+
// on windows, previous syntax __restrict__ AccessType*
|
| 1890 |
+
// resulted in error: "restrict" is not allowed
|
| 1891 |
+
const AccessType* __restrict__ grad_output_ptr = reinterpret_cast<const AccessType*>(
|
| 1892 |
+
p.grad_output_ptr + (query_start + laneRow) * p.gO_strideM + laneFirstCol);
|
| 1893 |
+
const AccessType* __restrict__ output_ptr = reinterpret_cast<const AccessType*>(
|
| 1894 |
+
p.output_ptr + (query_start + laneRow) * p.o_strideM() + laneFirstCol);
|
| 1895 |
+
|
| 1896 |
+
static constexpr int64_t kMaxIters = kMaxK / (kElementsPerAccess * kNumThreadsPerLine);
|
| 1897 |
+
constexpr int kPipelineStages = 2;
|
| 1898 |
+
accum_t delta_value = accum_t(0);
|
| 1899 |
+
using GlobalLoad = cutlass::arch::global_load<AccessType, sizeof(AccessType)>;
|
| 1900 |
+
AccessType frag_grad_output[kPipelineStages];
|
| 1901 |
+
AccessType frag_output[kPipelineStages];
|
| 1902 |
+
|
| 1903 |
+
auto loadAndIncrement = [&](int ld_pos, bool is_valid) {
|
| 1904 |
+
frag_grad_output[ld_pos].clear();
|
| 1905 |
+
frag_output[ld_pos].clear();
|
| 1906 |
+
GlobalLoad(frag_grad_output[ld_pos], grad_output_ptr, is_valid);
|
| 1907 |
+
GlobalLoad(frag_output[ld_pos], output_ptr, is_valid);
|
| 1908 |
+
grad_output_ptr += kNumThreadsPerLine;
|
| 1909 |
+
output_ptr += kNumThreadsPerLine;
|
| 1910 |
+
};
|
| 1911 |
+
|
| 1912 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1913 |
+
for (int iter = 0; iter < kPipelineStages - 1; ++iter) {
|
| 1914 |
+
int ld_pos = iter % kPipelineStages;
|
| 1915 |
+
pred = pred && (laneFirstCol + iter * kElementsPerAccess * kNumThreadsPerLine) <
|
| 1916 |
+
p.head_dim_value;
|
| 1917 |
+
loadAndIncrement(ld_pos, pred);
|
| 1918 |
+
}
|
| 1919 |
+
auto columnIteration = [&](int iter) {
|
| 1920 |
+
// Load for next iter
|
| 1921 |
+
int ld_pos = (iter + kPipelineStages - 1) % kPipelineStages;
|
| 1922 |
+
pred = pred && (laneFirstCol + (iter + kPipelineStages - 1) * kElementsPerAccess *
|
| 1923 |
+
kNumThreadsPerLine) < p.head_dim_value;
|
| 1924 |
+
loadAndIncrement(ld_pos, pred);
|
| 1925 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1926 |
+
for (int i = 0; i < AccessType::kElements; ++i) {
|
| 1927 |
+
delta_value += accum_t(frag_output[iter % kPipelineStages][i]) *
|
| 1928 |
+
accum_t(frag_grad_output[iter % kPipelineStages][i]);
|
| 1929 |
+
}
|
| 1930 |
+
};
|
| 1931 |
+
|
| 1932 |
+
// If we have a small lower-bound for K, we can unroll the loop
|
| 1933 |
+
if (kMaxK <= 256) {
|
| 1934 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1935 |
+
for (int iter = 0; iter < kMaxIters; ++iter) { columnIteration(iter); }
|
| 1936 |
+
} else {
|
| 1937 |
+
int num_iters = ceil_div(p.head_dim_value, kElementsPerAccess * kNumThreadsPerLine) *
|
| 1938 |
+
(kElementsPerAccess * kNumThreadsPerLine);
|
| 1939 |
+
for (int iter = 0; iter < num_iters; ++iter) { columnIteration(iter); }
|
| 1940 |
+
}
|
| 1941 |
+
|
| 1942 |
+
// Reduce between workers
|
| 1943 |
+
static_assert(kNumThreadsPerLine == 1 || kNumThreadsPerLine == 2 || kNumThreadsPerLine == 4,
|
| 1944 |
+
"");
|
| 1945 |
+
CUTLASS_PRAGMA_UNROLL
|
| 1946 |
+
for (int i = 1; i < kNumThreadsPerLine; i *= 2) {
|
| 1947 |
+
delta_value = delta_value + __shfl_xor_sync(0xffffffff, delta_value, i);
|
| 1948 |
+
}
|
| 1949 |
+
|
| 1950 |
+
// Store in gmem
|
| 1951 |
+
if (rowPred) { p.delta_ptr[query_start + laneRow] = delta_value; }
|
| 1952 |
+
}
|
| 1953 |
+
};
|
| 1954 |
+
|
| 1955 |
+
template <typename AK>
|
| 1956 |
+
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
|
| 1957 |
+
attention_kernel_backward_batched_impl(typename AK::Params p)
|
| 1958 |
+
{
|
| 1959 |
+
if (!p.advance_to_block()) { return; }
|
| 1960 |
+
AK::attention_kernel(p);
|
| 1961 |
+
}
|
| 1962 |
+
|
| 1963 |
+
template <typename AK>
|
| 1964 |
+
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
|
| 1965 |
+
attention_kernel_backward_batched(typename AK::Params params);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_forward.h
ADDED
|
@@ -0,0 +1,986 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice, this
|
| 9 |
+
* list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 22 |
+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 23 |
+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 24 |
+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 25 |
+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 26 |
+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 27 |
+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 28 |
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 29 |
+
*
|
| 30 |
+
**************************************************************************************************/
|
| 31 |
+
|
| 32 |
+
// Copyright (c) Microsoft Corporation.
|
| 33 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 34 |
+
|
| 35 |
+
// DeepSpeed Team
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include <curand_kernel.h>
|
| 40 |
+
#include <cmath>
|
| 41 |
+
#include <vector>
|
| 42 |
+
|
| 43 |
+
#include "cutlass/bfloat16.h"
|
| 44 |
+
#include "cutlass/fast_math.h"
|
| 45 |
+
#include "cutlass/gemm/gemm.h"
|
| 46 |
+
#include "cutlass/layout/matrix.h"
|
| 47 |
+
#include "cutlass/layout/vector.h"
|
| 48 |
+
#include "cutlass/matrix.h"
|
| 49 |
+
#include "cutlass/numeric_types.h"
|
| 50 |
+
#include "cutlass/tensor_ref.h"
|
| 51 |
+
|
| 52 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
|
| 53 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
|
| 54 |
+
#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
|
| 55 |
+
#include "cutlass/gemm/device/default_gemm_configuration.h"
|
| 56 |
+
#include "cutlass/gemm/kernel/default_gemm.h"
|
| 57 |
+
#include "cutlass/gemm/threadblock/default_mma.h"
|
| 58 |
+
#include "cutlass/gemm/threadblock/default_mma_core_simt.h"
|
| 59 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
|
| 60 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
|
| 61 |
+
#include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
|
| 62 |
+
#include "cutlass/gemm/threadblock/threadblock_swizzle.h"
|
| 63 |
+
#include "cutlass/matrix_shape.h"
|
| 64 |
+
#include "cutlass/platform/platform.h"
|
| 65 |
+
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
|
| 66 |
+
#include "epilogue/epilogue_pipelined.h"
|
| 67 |
+
#include "epilogue/epilogue_rescale_output.h"
|
| 68 |
+
#include "gemm/find_default_mma.h"
|
| 69 |
+
#include "gemm/mma_from_smem.h"
|
| 70 |
+
#include "gemm_kernel_utils.h"
|
| 71 |
+
#include "transform/bias_broadcast.h"
|
| 72 |
+
#include "transform/tile_smem_loader.h"
|
| 73 |
+
|
| 74 |
+
#include <inttypes.h>
|
| 75 |
+
|
| 76 |
+
using namespace gemm_kernel_utils;
|
| 77 |
+
|
| 78 |
+
namespace {
|
| 79 |
+
template <typename scalar_t, typename Arch>
|
| 80 |
+
constexpr int getWarpsPerSm()
|
| 81 |
+
{
|
| 82 |
+
return (Arch::kMinComputeCapability >= 80 && !cutlass::platform::is_same<scalar_t, float>::value
|
| 83 |
+
? 16
|
| 84 |
+
: 12);
|
| 85 |
+
}
|
| 86 |
+
static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value)
|
| 87 |
+
{
|
| 88 |
+
// source: https://stackoverflow.com/a/51549250
|
| 89 |
+
return (value >= 0) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value)))
|
| 90 |
+
: __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
|
| 91 |
+
}
|
| 92 |
+
} // namespace
|
| 93 |
+
|
| 94 |
+
template <
|
| 95 |
+
// The datatype of Q/K/V
|
| 96 |
+
typename scalar_t_,
|
| 97 |
+
// Architecture we are targeting (eg `cutlass::arch::Sm80`)
|
| 98 |
+
typename ArchTag,
|
| 99 |
+
// If Q/K/V are correctly aligned in memory and we can run a fast kernel
|
| 100 |
+
bool isAligned_,
|
| 101 |
+
int kQueriesPerBlock,
|
| 102 |
+
int kKeysPerBlock_,
|
| 103 |
+
bool kSingleValueIteration_, // = `value.shape[-1] <= kKeysPerBlock`
|
| 104 |
+
// This is quite slower on V100 for some reason
|
| 105 |
+
// Set to false if you know at compile-time you will never need dropout
|
| 106 |
+
bool kSupportsBias_ = false,
|
| 107 |
+
template <typename, typename, typename> class Broadcast1_ = BroadcastNoLoad,
|
| 108 |
+
template <typename, typename, typename> class Broadcast2_ = BroadcastNoLoad>
|
| 109 |
+
struct AttentionKernel {
|
| 110 |
+
using scalar_t = scalar_t_;
|
| 111 |
+
using accum_t = float;
|
| 112 |
+
using lse_scalar_t = float;
|
| 113 |
+
using output_t = scalar_t;
|
| 114 |
+
// Accumulator between 2 iterations
|
| 115 |
+
// Using `accum_t` improves perf on f16 at the cost of
|
| 116 |
+
// numerical errors
|
| 117 |
+
using output_accum_t = accum_t;
|
| 118 |
+
static constexpr bool kSupportsBias = kSupportsBias_;
|
| 119 |
+
static constexpr int kKeysPerBlock = kKeysPerBlock_;
|
| 120 |
+
static constexpr bool kIsAligned = isAligned_;
|
| 121 |
+
static constexpr bool kSingleValueIteration = kSingleValueIteration_;
|
| 122 |
+
static constexpr int32_t kAlignLSE = 32; // block size of backward
|
| 123 |
+
static constexpr bool kPreloadV =
|
| 124 |
+
ArchTag::kMinComputeCapability >= 80 && cutlass::sizeof_bits<scalar_t>::value == 16;
|
| 125 |
+
static constexpr bool kKeepOutputInRF = kSingleValueIteration;
|
| 126 |
+
static constexpr bool kNeedsOutputAccumulatorBuffer =
|
| 127 |
+
!kKeepOutputInRF && !cutlass::platform::is_same<output_accum_t, output_t>::value;
|
| 128 |
+
|
| 129 |
+
static_assert(kQueriesPerBlock % 32 == 0, "");
|
| 130 |
+
static_assert(kKeysPerBlock % 32 == 0, "");
|
| 131 |
+
static constexpr int kNumWarpsPerBlock = kQueriesPerBlock * kKeysPerBlock / (32 * 32);
|
| 132 |
+
static constexpr int kWarpSize = 32;
|
| 133 |
+
|
| 134 |
+
// Launch bounds
|
| 135 |
+
static constexpr int kNumThreads = kWarpSize * kNumWarpsPerBlock;
|
| 136 |
+
static constexpr int kMinBlocksPerSm = getWarpsPerSm<scalar_t, ArchTag>() / kNumWarpsPerBlock;
|
| 137 |
+
|
| 138 |
+
struct Params {
|
| 139 |
+
// Input tensors
|
| 140 |
+
scalar_t* query_ptr; // [num_queries, num_heads, head_dim]
|
| 141 |
+
scalar_t* key_ptr; // [num_keys, num_heads, head_dim]
|
| 142 |
+
scalar_t* value_ptr; // [num_keys, num_heads, head_dim_value]
|
| 143 |
+
|
| 144 |
+
// Output tensors
|
| 145 |
+
output_t* output_ptr; // [num_queries, num_heads, head_dim_value]
|
| 146 |
+
output_accum_t* output_accum_ptr; // [num_queries, num_heads, head_dim_value]
|
| 147 |
+
lse_scalar_t* logsumexp_ptr; // [num_heads, num_queries] - can be null
|
| 148 |
+
|
| 149 |
+
// Scale
|
| 150 |
+
accum_t scale;
|
| 151 |
+
|
| 152 |
+
// Dimensions/strides
|
| 153 |
+
int32_t head_dim;
|
| 154 |
+
int32_t head_dim_value;
|
| 155 |
+
int32_t num_queries;
|
| 156 |
+
int32_t num_keys;
|
| 157 |
+
|
| 158 |
+
int32_t q_strideM;
|
| 159 |
+
int32_t k_strideM;
|
| 160 |
+
int32_t v_strideM;
|
| 161 |
+
// int32_t bias_strideM = 0;
|
| 162 |
+
|
| 163 |
+
int32_t o_strideM = 0;
|
| 164 |
+
|
| 165 |
+
// Everything below is only used in `advance_to_block`
|
| 166 |
+
// and shouldn't use registers
|
| 167 |
+
int32_t q_strideH;
|
| 168 |
+
int32_t k_strideH;
|
| 169 |
+
int32_t v_strideH;
|
| 170 |
+
// int32_t bias_strideH = 0;
|
| 171 |
+
|
| 172 |
+
int64_t q_strideB;
|
| 173 |
+
int64_t k_strideB;
|
| 174 |
+
int64_t v_strideB;
|
| 175 |
+
// int32_t bias_strideB = 0;
|
| 176 |
+
|
| 177 |
+
int32_t num_batches;
|
| 178 |
+
int32_t num_heads;
|
| 179 |
+
|
| 180 |
+
// Parameters for biases
|
| 181 |
+
scalar_t* bias1_ptr = nullptr;
|
| 182 |
+
scalar_t* bias2_ptr = nullptr;
|
| 183 |
+
int32_t B = 0;
|
| 184 |
+
int32_t N = 0;
|
| 185 |
+
|
| 186 |
+
// Moves pointers to what we should process
|
| 187 |
+
// Returns "false" if there is no work to do
|
| 188 |
+
CUTLASS_DEVICE bool advance_to_block()
|
| 189 |
+
{
|
| 190 |
+
auto batch_id = blockIdx.z;
|
| 191 |
+
auto head_id = blockIdx.y;
|
| 192 |
+
auto query_start = blockIdx.x * kQueriesPerBlock;
|
| 193 |
+
|
| 194 |
+
auto lse_dim = ceil_div((int32_t)num_queries, kAlignLSE) * kAlignLSE;
|
| 195 |
+
|
| 196 |
+
query_ptr += batch_id * q_strideB;
|
| 197 |
+
key_ptr += batch_id * k_strideB;
|
| 198 |
+
value_ptr += batch_id * v_strideB;
|
| 199 |
+
output_ptr += int64_t(batch_id * num_queries) * o_strideM;
|
| 200 |
+
if (output_accum_ptr != nullptr) {
|
| 201 |
+
output_accum_ptr += int64_t(batch_id * num_queries) * (head_dim_value * num_heads);
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
int64_t q_start = 0, k_start = 0;
|
| 205 |
+
// Advance to the current batch / head / query_start
|
| 206 |
+
query_ptr += (q_start + query_start) * q_strideM + head_id * q_strideH;
|
| 207 |
+
key_ptr += k_start * k_strideM + head_id * k_strideH;
|
| 208 |
+
|
| 209 |
+
value_ptr += k_start * v_strideM + head_id * v_strideH;
|
| 210 |
+
output_ptr += int64_t(q_start + query_start) * o_strideM + head_id * head_dim_value;
|
| 211 |
+
|
| 212 |
+
if (output_accum_ptr != nullptr) {
|
| 213 |
+
output_accum_ptr += int64_t(q_start + query_start) * (head_dim_value * num_heads) +
|
| 214 |
+
head_id * head_dim_value;
|
| 215 |
+
} else {
|
| 216 |
+
// Accumulate directly in the destination buffer (eg for f32)
|
| 217 |
+
output_accum_ptr = (accum_t*)output_ptr;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
if (logsumexp_ptr != nullptr) {
|
| 221 |
+
// lse[batch_id, head_id, query_start]
|
| 222 |
+
logsumexp_ptr += batch_id * lse_dim * num_heads + head_id * lse_dim + query_start;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
using broadcast_1 = Broadcast1_<typename MM0::BiasLoader::ThreadMap,
|
| 226 |
+
typename MM0::BiasLoader::Shape,
|
| 227 |
+
scalar_t>;
|
| 228 |
+
if (kSupportsBias && broadcast_1::kEnable && bias1_ptr) {
|
| 229 |
+
bias1_ptr = broadcast_1::advance(bias1_ptr,
|
| 230 |
+
batch_id / N,
|
| 231 |
+
batch_id % N,
|
| 232 |
+
head_id,
|
| 233 |
+
num_queries * N,
|
| 234 |
+
num_queries,
|
| 235 |
+
0);
|
| 236 |
+
}
|
| 237 |
+
using broadcast_2 = Broadcast2_<typename MM0::BiasLoader::ThreadMap,
|
| 238 |
+
typename MM0::BiasLoader::Shape,
|
| 239 |
+
scalar_t>;
|
| 240 |
+
if (kSupportsBias && broadcast_2::kEnable && bias2_ptr) {
|
| 241 |
+
auto strideB = num_heads * num_queries * num_keys;
|
| 242 |
+
auto strideH = num_queries * num_keys;
|
| 243 |
+
bias2_ptr = broadcast_2::advance(
|
| 244 |
+
bias2_ptr, batch_id / N, batch_id % N, head_id, strideB, 0, strideH);
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
num_queries -= query_start;
|
| 248 |
+
num_batches = 0; // no longer used after
|
| 249 |
+
|
| 250 |
+
// If num_queries == 1, and there is only one key head we're wasting
|
| 251 |
+
// 15/16th of tensor core compute In that case :
|
| 252 |
+
// - we only launch kernels for head_id % kQueriesPerBlock == 0
|
| 253 |
+
// - we iterate over heads instead of queries (strideM = strideH)
|
| 254 |
+
if (num_queries == 1 && k_strideH == 0 && v_strideH == 0) {
|
| 255 |
+
if (head_id % kQueriesPerBlock != 0) return false;
|
| 256 |
+
q_strideM = q_strideH;
|
| 257 |
+
num_queries = num_heads;
|
| 258 |
+
num_heads = 1; // unused but here for intent
|
| 259 |
+
o_strideM = head_dim_value;
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
// Make sure the compiler knows these variables are the same on all
|
| 263 |
+
// the threads of the warp.
|
| 264 |
+
query_ptr = warp_uniform(query_ptr);
|
| 265 |
+
key_ptr = warp_uniform(key_ptr);
|
| 266 |
+
value_ptr = warp_uniform(value_ptr);
|
| 267 |
+
output_ptr = warp_uniform(output_ptr);
|
| 268 |
+
output_accum_ptr = warp_uniform(output_accum_ptr);
|
| 269 |
+
logsumexp_ptr = warp_uniform(logsumexp_ptr);
|
| 270 |
+
num_queries = warp_uniform(num_queries);
|
| 271 |
+
num_keys = warp_uniform(num_keys);
|
| 272 |
+
num_heads = warp_uniform(num_heads);
|
| 273 |
+
head_dim = warp_uniform(head_dim);
|
| 274 |
+
head_dim_value = warp_uniform(head_dim_value);
|
| 275 |
+
o_strideM = warp_uniform(o_strideM);
|
| 276 |
+
if (kSupportsBias && broadcast_1::kEnable) { bias1_ptr = warp_uniform(bias1_ptr); }
|
| 277 |
+
if (kSupportsBias && broadcast_2::kEnable) { bias2_ptr = warp_uniform(bias2_ptr); }
|
| 278 |
+
return true;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
__host__ dim3 getBlocksGrid() const
|
| 282 |
+
{
|
| 283 |
+
return dim3(ceil_div(num_queries, (int32_t)kQueriesPerBlock), num_heads, num_batches);
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
__host__ dim3 getThreadsGrid() const { return dim3(kWarpSize, kNumWarpsPerBlock, 1); }
|
| 287 |
+
};
|
| 288 |
+
|
| 289 |
+
struct MM0 {
|
| 290 |
+
/*
|
| 291 |
+
In this first matmul, we compute a block of `Q @ K.T`.
|
| 292 |
+
While the calculation result is still hot in registers, we update
|
| 293 |
+
`mi`, `m_prime`, `s_prime` in shared-memory, and then store this value
|
| 294 |
+
into a shared-memory ("AccumulatorSharedStorage") that is used later as
|
| 295 |
+
operand A for the second matmul (see MM1)
|
| 296 |
+
*/
|
| 297 |
+
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
|
| 298 |
+
|
| 299 |
+
using OpClass = typename GemmType::OpClass;
|
| 300 |
+
using DefaultConfig =
|
| 301 |
+
typename cutlass::gemm::device::DefaultGemmConfiguration<OpClass,
|
| 302 |
+
ArchTag,
|
| 303 |
+
scalar_t,
|
| 304 |
+
scalar_t,
|
| 305 |
+
scalar_t, // ElementC
|
| 306 |
+
accum_t // ElementAccumulator
|
| 307 |
+
>;
|
| 308 |
+
static constexpr int kAlignmentA = kIsAligned ? DefaultConfig::kAlignmentA
|
| 309 |
+
: GemmType::kMinimumAlignment;
|
| 310 |
+
static constexpr int kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB
|
| 311 |
+
: GemmType::kMinimumAlignment;
|
| 312 |
+
using ThreadblockShape =
|
| 313 |
+
cutlass::gemm::GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>;
|
| 314 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 315 |
+
using DefaultMma = typename cutlass::gemm::threadblock::FindDefaultMma<
|
| 316 |
+
scalar_t, // ElementA,
|
| 317 |
+
cutlass::layout::RowMajor, // LayoutA,
|
| 318 |
+
kAlignmentA,
|
| 319 |
+
scalar_t, // ElementB,
|
| 320 |
+
cutlass::layout::ColumnMajor, // LayoutB,
|
| 321 |
+
kAlignmentB,
|
| 322 |
+
accum_t,
|
| 323 |
+
cutlass::layout::RowMajor, // LayoutC,
|
| 324 |
+
OpClass,
|
| 325 |
+
ArchTag, // ArchTag
|
| 326 |
+
ThreadblockShape, // ThreadblockShape
|
| 327 |
+
WarpShape, // WarpShape
|
| 328 |
+
typename GemmType::InstructionShape, // InstructionShape
|
| 329 |
+
DefaultConfig::kStages, // Should use `DefaultConfig::kStages`, but that
|
| 330 |
+
// uses too much smem
|
| 331 |
+
typename GemmType::Operator // Operator
|
| 332 |
+
>::DefaultMma;
|
| 333 |
+
using MmaCore = typename DefaultMma::MmaCore;
|
| 334 |
+
using IteratorA = typename DefaultMma::IteratorA;
|
| 335 |
+
using IteratorB = typename DefaultMma::IteratorB;
|
| 336 |
+
using Mma = typename DefaultMma::ThreadblockMma;
|
| 337 |
+
using AccumLambdaIterator =
|
| 338 |
+
typename DefaultMmaAccumLambdaIterator<typename Mma::Operator::IteratorC,
|
| 339 |
+
accum_t,
|
| 340 |
+
kWarpSize>::Iterator;
|
| 341 |
+
static_assert(MmaCore::WarpCount::kM * MmaCore::WarpCount::kN * MmaCore::WarpCount::kK ==
|
| 342 |
+
kNumWarpsPerBlock,
|
| 343 |
+
"");
|
| 344 |
+
|
| 345 |
+
// used for efficient load of bias tile Bij from global to shared memory
|
| 346 |
+
using BiasLoader =
|
| 347 |
+
TileSmemLoader<scalar_t,
|
| 348 |
+
cutlass::MatrixShape<kQueriesPerBlock, kKeysPerBlock>,
|
| 349 |
+
MmaCore::kThreads,
|
| 350 |
+
// input restriction: kv_len has to be a multiple of this value
|
| 351 |
+
128 / cutlass::sizeof_bits<scalar_t>::value>;
|
| 352 |
+
|
| 353 |
+
// Epilogue to store to shared-memory in a format that we can use later for
|
| 354 |
+
// the second matmul
|
| 355 |
+
using B2bGemm =
|
| 356 |
+
typename cutlass::gemm::threadblock::B2bGemm<typename Mma::Operator::IteratorC,
|
| 357 |
+
typename Mma::Operator,
|
| 358 |
+
scalar_t,
|
| 359 |
+
WarpShape,
|
| 360 |
+
ThreadblockShape>;
|
| 361 |
+
using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage;
|
| 362 |
+
};
|
| 363 |
+
|
| 364 |
+
struct MM1 {
|
| 365 |
+
/**
|
| 366 |
+
Second matmul: perform `attn @ V` where `attn` is the attention (not
|
| 367 |
+
normalized) and stored in shared memory
|
| 368 |
+
*/
|
| 369 |
+
using GemmType = DefaultGemmType<ArchTag, scalar_t>;
|
| 370 |
+
|
| 371 |
+
using OpClass = typename GemmType::OpClass;
|
| 372 |
+
using DefaultConfig =
|
| 373 |
+
typename cutlass::gemm::device::DefaultGemmConfiguration<OpClass,
|
| 374 |
+
ArchTag,
|
| 375 |
+
scalar_t,
|
| 376 |
+
scalar_t,
|
| 377 |
+
output_accum_t, // ElementC
|
| 378 |
+
accum_t // ElementAccumulator
|
| 379 |
+
>;
|
| 380 |
+
static constexpr int kAlignmentA = DefaultConfig::kAlignmentA; // from smem
|
| 381 |
+
static constexpr int kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB
|
| 382 |
+
: GemmType::kMinimumAlignment;
|
| 383 |
+
using ThreadblockShape =
|
| 384 |
+
cutlass::gemm::GemmShape<kQueriesPerBlock, kKeysPerBlock, GemmType::ThreadK>;
|
| 385 |
+
using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>;
|
| 386 |
+
using InstructionShape = typename GemmType::InstructionShape;
|
| 387 |
+
|
| 388 |
+
using LayoutB = cutlass::layout::RowMajor;
|
| 389 |
+
using DefaultGemm =
|
| 390 |
+
cutlass::gemm::kernel::DefaultGemm<scalar_t, // ElementA,
|
| 391 |
+
cutlass::layout::RowMajor, // LayoutA,
|
| 392 |
+
kAlignmentA,
|
| 393 |
+
scalar_t, // ElementB,
|
| 394 |
+
LayoutB, // LayoutB,
|
| 395 |
+
kAlignmentB,
|
| 396 |
+
output_accum_t,
|
| 397 |
+
cutlass::layout::RowMajor, // LayoutC,
|
| 398 |
+
accum_t,
|
| 399 |
+
OpClass,
|
| 400 |
+
ArchTag,
|
| 401 |
+
ThreadblockShape,
|
| 402 |
+
WarpShape,
|
| 403 |
+
typename GemmType::InstructionShape,
|
| 404 |
+
typename DefaultConfig::EpilogueOutputOp,
|
| 405 |
+
void, // ThreadblockSwizzle - not used
|
| 406 |
+
DefaultConfig::kStages,
|
| 407 |
+
false, // SplitKSerial
|
| 408 |
+
typename GemmType::Operator>;
|
| 409 |
+
|
| 410 |
+
using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory<
|
| 411 |
+
typename DefaultGemm::Mma,
|
| 412 |
+
typename MM0::AccumulatorSharedStorage,
|
| 413 |
+
false>; // kScaleOperandA
|
| 414 |
+
using Mma = typename DefaultMmaFromSmem::Mma;
|
| 415 |
+
using IteratorB = typename Mma::IteratorB;
|
| 416 |
+
using WarpCount = typename Mma::WarpCount;
|
| 417 |
+
static_assert(WarpCount::kM * WarpCount::kN * WarpCount::kK == kNumWarpsPerBlock, "");
|
| 418 |
+
|
| 419 |
+
using DefaultEpilogue = typename DefaultGemm::Epilogue;
|
| 420 |
+
using OutputTileIterator = typename cutlass::epilogue::threadblock::PredicatedTileIterator<
|
| 421 |
+
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
|
| 422 |
+
output_t>;
|
| 423 |
+
using OutputTileIteratorAccum =
|
| 424 |
+
typename cutlass::epilogue::threadblock::PredicatedTileIterator<
|
| 425 |
+
typename DefaultEpilogue::OutputTileIterator::ThreadMap,
|
| 426 |
+
output_accum_t>;
|
| 427 |
+
|
| 428 |
+
struct SharedStorageMM1 {
|
| 429 |
+
typename Mma::SharedStorage mm;
|
| 430 |
+
};
|
| 431 |
+
};
|
| 432 |
+
|
| 433 |
+
static constexpr int64_t kAlignmentQ = MM0::kAlignmentA;
|
| 434 |
+
static constexpr int64_t kAlignmentK = MM0::kAlignmentB;
|
| 435 |
+
static constexpr int64_t kAlignmentV = 1;
|
| 436 |
+
|
| 437 |
+
// Shared storage - depends on kernel params
|
| 438 |
+
struct ScalingCoefs {
|
| 439 |
+
cutlass::Array<accum_t, kQueriesPerBlock> m_prime;
|
| 440 |
+
cutlass::Array<accum_t, kQueriesPerBlock> s_prime;
|
| 441 |
+
cutlass::Array<accum_t, kQueriesPerBlock> mi;
|
| 442 |
+
};
|
| 443 |
+
|
| 444 |
+
struct SharedStorageEpilogueAtEnd : ScalingCoefs {
|
| 445 |
+
struct SharedStorageAfterMM0 {
|
| 446 |
+
// Everything here might be overwritten during MM0
|
| 447 |
+
union {
|
| 448 |
+
// typename MM0::BiasLoader::SmemTile bias;
|
| 449 |
+
cutlass::AlignedBuffer<float, MM0::BiasLoader::Shape::kCount> bias;
|
| 450 |
+
typename MM0::AccumulatorSharedStorage si;
|
| 451 |
+
};
|
| 452 |
+
typename MM1::SharedStorageMM1 mm1;
|
| 453 |
+
};
|
| 454 |
+
|
| 455 |
+
union {
|
| 456 |
+
typename MM0::Mma::SharedStorage mm0;
|
| 457 |
+
SharedStorageAfterMM0 after_mm0;
|
| 458 |
+
typename MM1::DefaultEpilogue::SharedStorage epilogue;
|
| 459 |
+
};
|
| 460 |
+
|
| 461 |
+
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage()
|
| 462 |
+
{
|
| 463 |
+
return epilogue;
|
| 464 |
+
}
|
| 465 |
+
};
|
| 466 |
+
|
| 467 |
+
struct SharedStorageEpilogueInLoop : ScalingCoefs {
|
| 468 |
+
struct SharedStorageAfterMM0 {
|
| 469 |
+
// Everything here might be overwritten during MM0
|
| 470 |
+
union {
|
| 471 |
+
// typename MM0::BiasLoader::SmemTile bias;
|
| 472 |
+
cutlass::AlignedBuffer<float, MM0::BiasLoader::Shape::kCount> bias;
|
| 473 |
+
typename MM0::AccumulatorSharedStorage si;
|
| 474 |
+
};
|
| 475 |
+
typename MM1::SharedStorageMM1 mm1;
|
| 476 |
+
typename MM1::DefaultEpilogue::SharedStorage epilogue;
|
| 477 |
+
};
|
| 478 |
+
|
| 479 |
+
union {
|
| 480 |
+
typename MM0::Mma::SharedStorage mm0;
|
| 481 |
+
SharedStorageAfterMM0 after_mm0;
|
| 482 |
+
};
|
| 483 |
+
|
| 484 |
+
CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage()
|
| 485 |
+
{
|
| 486 |
+
return after_mm0.epilogue;
|
| 487 |
+
}
|
| 488 |
+
};
|
| 489 |
+
|
| 490 |
+
using SharedStorage =
|
| 491 |
+
typename cutlass::platform::conditional<kSingleValueIteration || kKeepOutputInRF,
|
| 492 |
+
SharedStorageEpilogueAtEnd,
|
| 493 |
+
SharedStorageEpilogueInLoop>::type;
|
| 494 |
+
|
| 495 |
+
static bool __host__ check_supported(Params const& p)
|
| 496 |
+
{
|
| 497 |
+
CHECK_ALIGNED_PTR(p.query_ptr, kAlignmentQ);
|
| 498 |
+
CHECK_ALIGNED_PTR(p.key_ptr, kAlignmentK);
|
| 499 |
+
CHECK_ALIGNED_PTR(p.value_ptr, kAlignmentV);
|
| 500 |
+
EVOFORMER_CHECK(p.q_strideM % kAlignmentQ == 0, "query is not correctly aligned (strideM)");
|
| 501 |
+
EVOFORMER_CHECK(p.k_strideM % kAlignmentK == 0, "key is not correctly aligned (strideM)");
|
| 502 |
+
EVOFORMER_CHECK(p.v_strideM % kAlignmentV == 0, "value is not correctly aligned (strideM)");
|
| 503 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.q_strideH % kAlignmentQ == 0,
|
| 504 |
+
"query is not correctly aligned (strideH)");
|
| 505 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.k_strideH % kAlignmentK == 0,
|
| 506 |
+
"key is not correctly aligned (strideH)");
|
| 507 |
+
EVOFORMER_CHECK(p.num_heads <= 1 || p.v_strideH % kAlignmentV == 0,
|
| 508 |
+
"value is not correctly aligned (strideH)");
|
| 509 |
+
return true;
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
static void CUTLASS_DEVICE attention_kernel(Params& p)
|
| 513 |
+
{
|
| 514 |
+
// In this block, we will only ever:
|
| 515 |
+
// - read query[query_start:query_end, :]
|
| 516 |
+
// - write to output[query_start:query_end, :]
|
| 517 |
+
|
| 518 |
+
extern __shared__ char smem_buffer[];
|
| 519 |
+
SharedStorage& shared_storage = *((SharedStorage*)smem_buffer);
|
| 520 |
+
auto& m_prime = shared_storage.m_prime;
|
| 521 |
+
auto& s_prime = shared_storage.s_prime;
|
| 522 |
+
auto& mi = shared_storage.mi;
|
| 523 |
+
const uint32_t query_start = blockIdx.x * kQueriesPerBlock;
|
| 524 |
+
|
| 525 |
+
static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, "");
|
| 526 |
+
if (thread_id() < kQueriesPerBlock) {
|
| 527 |
+
s_prime[thread_id()] = accum_t(0);
|
| 528 |
+
m_prime[thread_id()] = -cutlass::platform::numeric_limits<accum_t>::infinity();
|
| 529 |
+
mi[thread_id()] = -cutlass::platform::numeric_limits<accum_t>::infinity();
|
| 530 |
+
}
|
| 531 |
+
typename MM1::Mma::FragmentC accum_o;
|
| 532 |
+
accum_o.clear();
|
| 533 |
+
|
| 534 |
+
auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator {
|
| 535 |
+
using OutputTileIterator = typename MM1::OutputTileIterator;
|
| 536 |
+
return OutputTileIterator(
|
| 537 |
+
typename OutputTileIterator::Params{(int32_t)p.o_strideM},
|
| 538 |
+
p.output_ptr,
|
| 539 |
+
typename OutputTileIterator::TensorCoord{p.num_queries, p.head_dim_value},
|
| 540 |
+
thread_id(),
|
| 541 |
+
{0, col});
|
| 542 |
+
};
|
| 543 |
+
|
| 544 |
+
auto createOutputAccumIter = [&](int col) -> typename MM1::OutputTileIteratorAccum {
|
| 545 |
+
using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum;
|
| 546 |
+
return OutputTileIteratorAccum(
|
| 547 |
+
typename OutputTileIteratorAccum::Params{(int32_t)(p.head_dim_value * p.num_heads)},
|
| 548 |
+
p.output_accum_ptr,
|
| 549 |
+
typename OutputTileIteratorAccum::TensorCoord{p.num_queries, p.head_dim_value},
|
| 550 |
+
thread_id(),
|
| 551 |
+
{0, col});
|
| 552 |
+
};
|
| 553 |
+
|
| 554 |
+
// Iterate through keys
|
| 555 |
+
for (int32_t iter_key_start = 0; iter_key_start < p.num_keys;
|
| 556 |
+
iter_key_start += kKeysPerBlock) {
|
| 557 |
+
int32_t problem_size_0_m = cutlass::fast_min((int32_t)kQueriesPerBlock, p.num_queries);
|
| 558 |
+
int32_t problem_size_0_n =
|
| 559 |
+
cutlass::fast_min(int32_t(kKeysPerBlock), p.num_keys - iter_key_start);
|
| 560 |
+
int32_t const& problem_size_0_k = p.head_dim;
|
| 561 |
+
int32_t const& problem_size_1_n = p.head_dim_value;
|
| 562 |
+
int32_t const& problem_size_1_k = problem_size_0_n;
|
| 563 |
+
|
| 564 |
+
auto prologueV = [&](int blockN) {
|
| 565 |
+
typename MM1::Mma::IteratorB iterator_V(
|
| 566 |
+
typename MM1::IteratorB::Params{MM1::LayoutB(p.v_strideM)},
|
| 567 |
+
p.value_ptr + iter_key_start * p.v_strideM,
|
| 568 |
+
{problem_size_1_k, problem_size_1_n},
|
| 569 |
+
thread_id(),
|
| 570 |
+
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
|
| 571 |
+
MM1::Mma::prologue(
|
| 572 |
+
shared_storage.after_mm0.mm1.mm, iterator_V, thread_id(), problem_size_1_k);
|
| 573 |
+
};
|
| 574 |
+
|
| 575 |
+
__syncthreads(); // Need to have shared memory initialized, and `m_prime`
|
| 576 |
+
// updated from end of prev iter
|
| 577 |
+
//
|
| 578 |
+
// MATMUL: Q.K_t
|
| 579 |
+
//
|
| 580 |
+
// Computes the block-matrix product of:
|
| 581 |
+
// (a) query[query_start:query_end, :]
|
| 582 |
+
// with
|
| 583 |
+
// (b) key[iter_key_start:iter_key_start + kKeysPerBlock]
|
| 584 |
+
// and stores that into `shared_storage.si`
|
| 585 |
+
//
|
| 586 |
+
|
| 587 |
+
// Compute threadblock location
|
| 588 |
+
cutlass::gemm::GemmCoord tb_tile_offset = {0, 0, 0};
|
| 589 |
+
|
| 590 |
+
cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * MM0::Mma::Shape::kM,
|
| 591 |
+
tb_tile_offset.k()};
|
| 592 |
+
|
| 593 |
+
cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
|
| 594 |
+
tb_tile_offset.n() * MM0::Mma::Shape::kN};
|
| 595 |
+
|
| 596 |
+
// Construct iterators to A and B operands
|
| 597 |
+
typename MM0::IteratorA iterator_A(
|
| 598 |
+
typename MM0::IteratorA::Params(typename MM0::MmaCore::LayoutA(p.q_strideM)),
|
| 599 |
+
p.query_ptr,
|
| 600 |
+
{problem_size_0_m, problem_size_0_k},
|
| 601 |
+
thread_id(),
|
| 602 |
+
tb_offset_A);
|
| 603 |
+
|
| 604 |
+
typename MM0::IteratorB iterator_B(
|
| 605 |
+
typename MM0::IteratorB::Params(typename MM0::MmaCore::LayoutB(p.k_strideM)),
|
| 606 |
+
p.key_ptr + iter_key_start * p.k_strideM,
|
| 607 |
+
{problem_size_0_k, problem_size_0_n},
|
| 608 |
+
thread_id(),
|
| 609 |
+
tb_offset_B);
|
| 610 |
+
|
| 611 |
+
auto my_warp_id = warp_id();
|
| 612 |
+
auto my_lane_id = lane_id();
|
| 613 |
+
|
| 614 |
+
// Construct thread-scoped matrix multiply
|
| 615 |
+
typename MM0::Mma mma(shared_storage.mm0, thread_id(), my_warp_id, my_lane_id);
|
| 616 |
+
|
| 617 |
+
typename MM0::Mma::FragmentC accum;
|
| 618 |
+
|
| 619 |
+
accum.clear();
|
| 620 |
+
|
| 621 |
+
auto gemm_k_iterations =
|
| 622 |
+
(problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK;
|
| 623 |
+
|
| 624 |
+
// Compute threadblock-scoped matrix multiply-add
|
| 625 |
+
mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum);
|
| 626 |
+
__syncthreads();
|
| 627 |
+
|
| 628 |
+
if (kPreloadV) {
|
| 629 |
+
prologueV(0);
|
| 630 |
+
} else {
|
| 631 |
+
MM1::Mma::drain_cp_asyncs();
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
typename MM0::Mma::Operator::IteratorC::TensorCoord iteratorC_tile_offset = {
|
| 635 |
+
(tb_tile_offset.m() * MM0::Mma::WarpCount::kM) +
|
| 636 |
+
(my_warp_id % MM0::Mma::WarpCount::kM),
|
| 637 |
+
(tb_tile_offset.n() * MM0::Mma::WarpCount::kN) +
|
| 638 |
+
(my_warp_id / MM0::Mma::WarpCount::kM)};
|
| 639 |
+
|
| 640 |
+
// multiply by scaling factor
|
| 641 |
+
// if (kSupportsBias) {
|
| 642 |
+
// accum =
|
| 643 |
+
// cutlass::multiplies<typename MM0::Mma::FragmentC>()(p.scale,
|
| 644 |
+
// accum);
|
| 645 |
+
// }
|
| 646 |
+
|
| 647 |
+
if (kSupportsBias) {
|
| 648 |
+
cutlass::TensorRef<float, cutlass::layout::RowMajor> bias_tensor_ref(
|
| 649 |
+
shared_storage.after_mm0.bias.data(),
|
| 650 |
+
cutlass::layout::RowMajor(MM0::ThreadblockShape::kN));
|
| 651 |
+
using Shape =
|
| 652 |
+
cutlass::MatrixShape<MM0::ThreadblockShape::kM, MM0::ThreadblockShape::kN>;
|
| 653 |
+
AttentionBiasEpilogue<Shape,
|
| 654 |
+
scalar_t,
|
| 655 |
+
MM0::MmaCore::kThreads,
|
| 656 |
+
Broadcast1_,
|
| 657 |
+
Broadcast2_>
|
| 658 |
+
bias_epilogue;
|
| 659 |
+
bias_epilogue(bias_tensor_ref,
|
| 660 |
+
p.bias1_ptr + iter_key_start,
|
| 661 |
+
p.bias2_ptr + query_start * p.num_keys + iter_key_start,
|
| 662 |
+
thread_id(),
|
| 663 |
+
{problem_size_0_m, problem_size_0_n},
|
| 664 |
+
p.num_keys);
|
| 665 |
+
// Pij += Bij, Pij is in register fragment and Bij is in shared memory
|
| 666 |
+
auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset(
|
| 667 |
+
lane_id(), warp_id(), iteratorC_tile_offset);
|
| 668 |
+
MM0::AccumLambdaIterator::iterateRows(
|
| 669 |
+
lane_offset,
|
| 670 |
+
[&](int accum_m) {},
|
| 671 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 672 |
+
if (accum_m < problem_size_0_m && accum_n < problem_size_0_n) {
|
| 673 |
+
accum[idx] =
|
| 674 |
+
accum[idx] * p.scale + bias_tensor_ref.at({accum_m, accum_n});
|
| 675 |
+
}
|
| 676 |
+
},
|
| 677 |
+
[&](int accum_m) {});
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
DISPATCH_BOOL(iter_key_start == 0, kIsFirst, ([&] {
|
| 681 |
+
DISPATCH_BOOL(
|
| 682 |
+
p.num_keys - iter_key_start >= kKeysPerBlock, kFullColumns, ([&] {
|
| 683 |
+
// Update `mi` from accum stored in registers
|
| 684 |
+
// Also does accum[i] <- exp(accum[i] - mi)
|
| 685 |
+
iterative_softmax<typename MM0::Mma::Operator::IteratorC,
|
| 686 |
+
kFullColumns,
|
| 687 |
+
kIsFirst>(accum_o,
|
| 688 |
+
accum,
|
| 689 |
+
mi,
|
| 690 |
+
m_prime,
|
| 691 |
+
s_prime,
|
| 692 |
+
lane_id(),
|
| 693 |
+
thread_id(),
|
| 694 |
+
warp_id(),
|
| 695 |
+
p.num_keys - iter_key_start,
|
| 696 |
+
iteratorC_tile_offset,
|
| 697 |
+
kSupportsBias ? 1.0f : p.scale);
|
| 698 |
+
}));
|
| 699 |
+
}));
|
| 700 |
+
|
| 701 |
+
// Output results to shared-memory
|
| 702 |
+
int warp_idx_mn_0 =
|
| 703 |
+
my_warp_id % (MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN);
|
| 704 |
+
auto output_tile_coords =
|
| 705 |
+
cutlass::MatrixCoord{warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM,
|
| 706 |
+
warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM};
|
| 707 |
+
|
| 708 |
+
MM0::B2bGemm::accumToSmem(
|
| 709 |
+
shared_storage.after_mm0.si, accum, my_lane_id, output_tile_coords);
|
| 710 |
+
|
| 711 |
+
__syncthreads();
|
| 712 |
+
|
| 713 |
+
//
|
| 714 |
+
// MATMUL: Attn . V
|
| 715 |
+
// Run the matmul `attn @ V` for a block of attn and V.
|
| 716 |
+
// `attn` is read from shared memory (in `shared_storage_si`)
|
| 717 |
+
// `V` is read from global memory (with iterator_B)
|
| 718 |
+
//
|
| 719 |
+
|
| 720 |
+
const int64_t nBlockN =
|
| 721 |
+
kSingleValueIteration
|
| 722 |
+
? 1
|
| 723 |
+
: ceil_div((int64_t)problem_size_1_n, int64_t(MM1::ThreadblockShape::kN));
|
| 724 |
+
for (int blockN = 0; blockN < nBlockN; ++blockN) {
|
| 725 |
+
int gemm_k_iterations =
|
| 726 |
+
(problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK;
|
| 727 |
+
|
| 728 |
+
// Compute threadblock-scoped matrix multiply-add and store it in accum
|
| 729 |
+
// (in registers)
|
| 730 |
+
if (!kPreloadV) {
|
| 731 |
+
__syncthreads(); // we share shmem between mma and epilogue
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
typename MM1::Mma::IteratorB iterator_V(
|
| 735 |
+
typename MM1::IteratorB::Params{MM1::LayoutB(p.v_strideM)},
|
| 736 |
+
p.value_ptr + iter_key_start * p.v_strideM,
|
| 737 |
+
{problem_size_1_k, problem_size_1_n},
|
| 738 |
+
thread_id(),
|
| 739 |
+
cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN});
|
| 740 |
+
typename MM1::Mma mma_pv(shared_storage.after_mm0.mm1.mm,
|
| 741 |
+
shared_storage.after_mm0.si,
|
| 742 |
+
(int)thread_id(),
|
| 743 |
+
(int)warp_id(),
|
| 744 |
+
(int)lane_id(),
|
| 745 |
+
(int)problem_size_1_k);
|
| 746 |
+
mma_pv.set_prologue_done(kPreloadV);
|
| 747 |
+
if (!kKeepOutputInRF) { accum_o.clear(); }
|
| 748 |
+
mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o);
|
| 749 |
+
__syncthreads();
|
| 750 |
+
|
| 751 |
+
if (kPreloadV && !kSingleValueIteration && blockN + 1 < nBlockN) {
|
| 752 |
+
prologueV(blockN + 1);
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
if (!kKeepOutputInRF) {
|
| 756 |
+
MM1::Mma::drain_cp_asyncs();
|
| 757 |
+
DISPATCH_BOOL(
|
| 758 |
+
iter_key_start == 0, kIsFirst, ([&] {
|
| 759 |
+
DISPATCH_BOOL(
|
| 760 |
+
(iter_key_start + kKeysPerBlock) >= p.num_keys, kIsLast, ([&] {
|
| 761 |
+
using DefaultEpilogue = typename MM1::DefaultEpilogue;
|
| 762 |
+
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
|
| 763 |
+
using ElementCompute = typename DefaultOp::ElementCompute;
|
| 764 |
+
using EpilogueOutputOp = typename cutlass::epilogue::thread::
|
| 765 |
+
MemoryEfficientAttentionNormalize<
|
| 766 |
+
typename cutlass::platform::
|
| 767 |
+
conditional<kIsLast, output_t, output_accum_t>::
|
| 768 |
+
type,
|
| 769 |
+
output_accum_t,
|
| 770 |
+
DefaultOp::kCount,
|
| 771 |
+
typename DefaultOp::ElementAccumulator,
|
| 772 |
+
ElementCompute,
|
| 773 |
+
kIsFirst,
|
| 774 |
+
kIsLast,
|
| 775 |
+
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
|
| 776 |
+
using Epilogue =
|
| 777 |
+
typename cutlass::epilogue::threadblock::EpiloguePipelined<
|
| 778 |
+
typename DefaultEpilogue::Shape,
|
| 779 |
+
typename MM1::Mma::Operator,
|
| 780 |
+
DefaultEpilogue::kPartitionsK,
|
| 781 |
+
typename cutlass::platform::conditional<
|
| 782 |
+
kIsLast,
|
| 783 |
+
typename MM1::OutputTileIterator,
|
| 784 |
+
typename MM1::OutputTileIteratorAccum>::type,
|
| 785 |
+
typename DefaultEpilogue::AccumulatorFragmentIterator,
|
| 786 |
+
typename DefaultEpilogue::WarpTileIterator,
|
| 787 |
+
typename DefaultEpilogue::SharedLoadIterator,
|
| 788 |
+
EpilogueOutputOp,
|
| 789 |
+
typename DefaultEpilogue::Padding,
|
| 790 |
+
DefaultEpilogue::kFragmentsPerIteration,
|
| 791 |
+
true, // IterationsUnroll
|
| 792 |
+
typename MM1::OutputTileIteratorAccum // Read
|
| 793 |
+
// iterator
|
| 794 |
+
>;
|
| 795 |
+
|
| 796 |
+
int col = blockN * MM1::Mma::Shape::kN;
|
| 797 |
+
auto source_iter = createOutputAccumIter(col);
|
| 798 |
+
auto dest_iter =
|
| 799 |
+
call_conditional<kIsLast,
|
| 800 |
+
decltype(createOutputIter),
|
| 801 |
+
decltype(createOutputAccumIter)>::
|
| 802 |
+
apply(createOutputIter, createOutputAccumIter, col);
|
| 803 |
+
EpilogueOutputOp rescale(s_prime, m_prime);
|
| 804 |
+
Epilogue epilogue(shared_storage.epilogue_shared_storage(),
|
| 805 |
+
thread_id(),
|
| 806 |
+
warp_id(),
|
| 807 |
+
lane_id());
|
| 808 |
+
epilogue(rescale, dest_iter, accum_o, source_iter);
|
| 809 |
+
}));
|
| 810 |
+
}));
|
| 811 |
+
if (!kSingleValueIteration) { __syncthreads(); }
|
| 812 |
+
}
|
| 813 |
+
}
|
| 814 |
+
__syncthreads(); // we modify `m_prime` after
|
| 815 |
+
}
|
| 816 |
+
|
| 817 |
+
if (kKeepOutputInRF) {
|
| 818 |
+
constexpr bool kIsFirst = true;
|
| 819 |
+
constexpr bool kIsLast = true;
|
| 820 |
+
using DefaultEpilogue = typename MM1::DefaultEpilogue;
|
| 821 |
+
using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp;
|
| 822 |
+
using ElementCompute = typename DefaultOp::ElementCompute;
|
| 823 |
+
using EpilogueOutputOp =
|
| 824 |
+
typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize<
|
| 825 |
+
output_t, // output
|
| 826 |
+
output_accum_t, // source
|
| 827 |
+
DefaultOp::kCount,
|
| 828 |
+
typename DefaultOp::ElementAccumulator, // accum
|
| 829 |
+
output_accum_t, // compute
|
| 830 |
+
kIsFirst,
|
| 831 |
+
kIsLast,
|
| 832 |
+
cutlass::Array<ElementCompute, kQueriesPerBlock>>;
|
| 833 |
+
using Epilogue = typename cutlass::epilogue::threadblock::EpiloguePipelined<
|
| 834 |
+
typename DefaultEpilogue::Shape,
|
| 835 |
+
typename MM1::Mma::Operator,
|
| 836 |
+
DefaultEpilogue::kPartitionsK,
|
| 837 |
+
typename MM1::OutputTileIterator, // destination
|
| 838 |
+
typename DefaultEpilogue::AccumulatorFragmentIterator,
|
| 839 |
+
typename DefaultEpilogue::WarpTileIterator,
|
| 840 |
+
typename DefaultEpilogue::SharedLoadIterator,
|
| 841 |
+
EpilogueOutputOp,
|
| 842 |
+
typename DefaultEpilogue::Padding,
|
| 843 |
+
DefaultEpilogue::kFragmentsPerIteration,
|
| 844 |
+
true, // IterationsUnroll
|
| 845 |
+
typename MM1::OutputTileIteratorAccum // source tile
|
| 846 |
+
>;
|
| 847 |
+
auto dest_iter = createOutputIter(0);
|
| 848 |
+
EpilogueOutputOp rescale(s_prime, m_prime);
|
| 849 |
+
Epilogue epilogue(
|
| 850 |
+
shared_storage.epilogue_shared_storage(), thread_id(), warp_id(), lane_id());
|
| 851 |
+
MM1::Mma::drain_cp_asyncs();
|
| 852 |
+
epilogue(rescale, dest_iter, accum_o);
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
// 7. Calculate logsumexp
|
| 856 |
+
// To make the backward easier, we pad logsumexp with `inf`
|
| 857 |
+
// this avoids a few bound checks, and is not more expensive during fwd
|
| 858 |
+
static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, "");
|
| 859 |
+
if (p.logsumexp_ptr && thread_id() < kQueriesPerBlock) {
|
| 860 |
+
auto lse_dim = ceil_div((int32_t)p.num_queries, kAlignLSE) * kAlignLSE;
|
| 861 |
+
if (thread_id() < p.num_queries) {
|
| 862 |
+
p.logsumexp_ptr[thread_id()] =
|
| 863 |
+
accum_t(mi[thread_id()]) + cutlass::fast_log(accum_t(s_prime[thread_id()]));
|
| 864 |
+
} else if (thread_id() < lse_dim) {
|
| 865 |
+
p.logsumexp_ptr[thread_id()] =
|
| 866 |
+
cutlass::platform::numeric_limits<accum_t>::infinity();
|
| 867 |
+
}
|
| 868 |
+
}
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
template <typename WarpIteratorC,
|
| 872 |
+
bool kFullColumns,
|
| 873 |
+
bool kIsFirst>
|
| 874 |
+
CUTLASS_DEVICE static void iterative_softmax(
|
| 875 |
+
typename WarpIteratorC::Fragment& frag_o, // output so far
|
| 876 |
+
typename WarpIteratorC::Fragment& frag,
|
| 877 |
+
cutlass::Array<accum_t, kQueriesPerBlock>& mi,
|
| 878 |
+
cutlass::Array<accum_t, kQueriesPerBlock>& m_prime,
|
| 879 |
+
cutlass::Array<accum_t, kQueriesPerBlock>& s_prime,
|
| 880 |
+
int8_t lane_id,
|
| 881 |
+
int8_t thread_id,
|
| 882 |
+
int8_t warp_id,
|
| 883 |
+
int16_t max_col,
|
| 884 |
+
typename WarpIteratorC::TensorCoord const& tile_offset,
|
| 885 |
+
float scaling)
|
| 886 |
+
{
|
| 887 |
+
/* Iterates on the accumulator and corresponding position on result matrix
|
| 888 |
+
|
| 889 |
+
(1) Update `mi[r]` to the max value of the row `r`
|
| 890 |
+
(2) In a second iteration do the following:
|
| 891 |
+
(a) accum <- exp(accum - mi)
|
| 892 |
+
(b) m_prime <- exp(m_prime - mi)
|
| 893 |
+
(c) s_prime <- s_prime * m_prime + sum(accum)
|
| 894 |
+
|
| 895 |
+
All of this is done on registers, before we store all of this
|
| 896 |
+
on shared memory for the next matmul with Value.
|
| 897 |
+
*/
|
| 898 |
+
using Fragment = typename WarpIteratorC::Fragment;
|
| 899 |
+
using LambdaIterator =
|
| 900 |
+
typename DefaultMmaAccumLambdaIterator<WarpIteratorC, accum_t, kWarpSize>::Iterator;
|
| 901 |
+
// Convert to `accum_t` (rather than double)
|
| 902 |
+
constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E
|
| 903 |
+
if (!kIsFirst) {
|
| 904 |
+
if (thread_id < kQueriesPerBlock) { m_prime[thread_id] = mi[thread_id]; }
|
| 905 |
+
__syncthreads();
|
| 906 |
+
}
|
| 907 |
+
|
| 908 |
+
auto lane_offset = LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset);
|
| 909 |
+
|
| 910 |
+
// First update `mi` to the max per-row
|
| 911 |
+
{
|
| 912 |
+
accum_t max;
|
| 913 |
+
LambdaIterator::iterateRows(
|
| 914 |
+
lane_offset,
|
| 915 |
+
[&](int accum_m) { max = -cutlass::platform::numeric_limits<accum_t>::infinity(); },
|
| 916 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 917 |
+
if (kFullColumns || accum_n < max_col) {
|
| 918 |
+
max = cutlass::fast_max(max, frag[idx]);
|
| 919 |
+
}
|
| 920 |
+
},
|
| 921 |
+
[&](int accum_m) {
|
| 922 |
+
// Having 4x atomicMax seems faster than reduce within warp
|
| 923 |
+
// first...
|
| 924 |
+
atomicMaxFloat(&mi[accum_m], max * scaling);
|
| 925 |
+
});
|
| 926 |
+
}
|
| 927 |
+
frag = cutlass::multiplies<Fragment>()(scaling * kLog2e, frag);
|
| 928 |
+
|
| 929 |
+
// Make sure we all share the update values for `mi`
|
| 930 |
+
__syncthreads();
|
| 931 |
+
|
| 932 |
+
if (thread_id < kQueriesPerBlock) {
|
| 933 |
+
auto m_prime_exp = exp2f(kLog2e * (m_prime[thread_id] - mi[thread_id]));
|
| 934 |
+
m_prime[thread_id] = m_prime_exp;
|
| 935 |
+
s_prime[thread_id] *= m_prime_exp;
|
| 936 |
+
}
|
| 937 |
+
__syncthreads(); // Update output fragments
|
| 938 |
+
if (kKeepOutputInRF && !kIsFirst) {
|
| 939 |
+
accum_t mp;
|
| 940 |
+
LambdaIterator::iterateRows(
|
| 941 |
+
lane_offset,
|
| 942 |
+
[&](int accum_m) { mp = m_prime[accum_m]; },
|
| 943 |
+
[&](int accum_m, int accum_n, int idx) { frag_o[idx] *= mp; },
|
| 944 |
+
[&](int accum_m) {});
|
| 945 |
+
__syncthreads();
|
| 946 |
+
}
|
| 947 |
+
// Update accum_m, accum_n, ...
|
| 948 |
+
{
|
| 949 |
+
accum_t mi_row, total_row;
|
| 950 |
+
LambdaIterator::iterateRows(
|
| 951 |
+
lane_offset,
|
| 952 |
+
[&](int accum_m) { mi_row = kLog2e * mi[accum_m]; },
|
| 953 |
+
[&](int accum_m, int accum_n, int idx) {
|
| 954 |
+
frag[idx] = (kFullColumns || accum_n < max_col) ? exp2f(frag[idx] - mi_row)
|
| 955 |
+
: accum_t(0.0);
|
| 956 |
+
},
|
| 957 |
+
[&](int accum_m) {});
|
| 958 |
+
LambdaIterator::iterateRows(
|
| 959 |
+
lane_offset,
|
| 960 |
+
[&](int accum_m) { total_row = 0.0; },
|
| 961 |
+
[&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; },
|
| 962 |
+
[&](int accum_m) {
|
| 963 |
+
if (LambdaIterator::reduceSameRow(
|
| 964 |
+
lane_id, total_row, [](accum_t a, accum_t b) { return a + b; })) {
|
| 965 |
+
atomicAdd(&s_prime[accum_m], total_row);
|
| 966 |
+
}
|
| 967 |
+
});
|
| 968 |
+
}
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
static CUTLASS_DEVICE int8_t lane_id() { return threadIdx.x; }
|
| 972 |
+
static CUTLASS_DEVICE int8_t warp_id() { return threadIdx.y; }
|
| 973 |
+
static CUTLASS_DEVICE int16_t thread_id() { return threadIdx.x + threadIdx.y * blockDim.x; }
|
| 974 |
+
};
|
| 975 |
+
|
| 976 |
+
template <typename AK>
|
| 977 |
+
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
|
| 978 |
+
attention_kernel_batched_impl(typename AK::Params p)
|
| 979 |
+
{
|
| 980 |
+
if (!p.advance_to_block()) { return; }
|
| 981 |
+
AK::attention_kernel(p);
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
template <typename AK>
|
| 985 |
+
__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm)
|
| 986 |
+
attention_kernel_batched(typename AK::Params params);
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
// This does nothing.
|
| 9 |
+
template <typename ThreadMap, typename Shape, typename scalar_t>
|
| 10 |
+
struct BroadcastNoLoad {
|
| 11 |
+
using Fragment =
|
| 12 |
+
cutlass::Array<scalar_t, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
|
| 13 |
+
static const bool kEnable = false;
|
| 14 |
+
CUTLASS_DEVICE static void load(Fragment& frag,
|
| 15 |
+
scalar_t* ptr,
|
| 16 |
+
int thread_id,
|
| 17 |
+
const cutlass::MatrixCoord& extent,
|
| 18 |
+
int stride)
|
| 19 |
+
{
|
| 20 |
+
}
|
| 21 |
+
CUTLASS_DEVICE static scalar_t*
|
| 22 |
+
advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
|
| 23 |
+
{
|
| 24 |
+
return ptr;
|
| 25 |
+
}
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
// This is to load the bias matrix from the global memory with on-the-fly
|
| 29 |
+
// broadcast. The shape in global memory is [B, N, 1, 1, L]. Each time we load
|
| 30 |
+
// the last dimension as a L row vector, and we further broadcast the L vector
|
| 31 |
+
// to a tile of size [L, L] by repeating the L vector L times
|
| 32 |
+
template <typename ThreadMap, typename Shape, typename scalar_t>
|
| 33 |
+
struct BroadcastA : public BroadcastNoLoad<ThreadMap, Shape, scalar_t> {
|
| 34 |
+
using Base = BroadcastNoLoad<ThreadMap, Shape, scalar_t>;
|
| 35 |
+
static const bool kEnable = true;
|
| 36 |
+
using layout = cutlass::layout::AffineRank2RowMajor;
|
| 37 |
+
|
| 38 |
+
using GmemTileIterator = cutlass::transform::threadblock::
|
| 39 |
+
PredicatedTileIterator<Shape, scalar_t, layout, 0, ThreadMap>;
|
| 40 |
+
using Fragment = typename GmemTileIterator::Fragment;
|
| 41 |
+
|
| 42 |
+
CUTLASS_DEVICE static void load(Fragment& frag,
|
| 43 |
+
scalar_t* ptr,
|
| 44 |
+
int thread_id,
|
| 45 |
+
const cutlass::MatrixCoord& extent,
|
| 46 |
+
int stride)
|
| 47 |
+
{
|
| 48 |
+
GmemTileIterator iter({layout(0, 1)}, ptr, extent, thread_id);
|
| 49 |
+
iter.load(frag);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
CUTLASS_DEVICE static scalar_t*
|
| 53 |
+
advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
|
| 54 |
+
{
|
| 55 |
+
return ptr + B_id * strideB + N_id * strideN;
|
| 56 |
+
}
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
// This is to load the bias matrix from the global memory with on-the-fly
|
| 60 |
+
// broadcast. The shape in global memory is [B, 1, H, L, L]. Each time we load
|
| 61 |
+
// a [L, L] matrix. Different N use the same bias matrix when B and H are the
|
| 62 |
+
// same.
|
| 63 |
+
template <typename ThreadMap, typename Shape, typename scalar_t>
|
| 64 |
+
struct BroadcastB : public BroadcastNoLoad<ThreadMap, Shape, scalar_t> {
|
| 65 |
+
using Base = BroadcastNoLoad<ThreadMap, Shape, scalar_t>;
|
| 66 |
+
static const bool kEnable = true;
|
| 67 |
+
using layout = cutlass::layout::RowMajor;
|
| 68 |
+
|
| 69 |
+
using GmemTileIterator = cutlass::transform::threadblock::
|
| 70 |
+
PredicatedTileIterator<Shape, scalar_t, layout, 0, ThreadMap>;
|
| 71 |
+
using Fragment = typename GmemTileIterator::Fragment;
|
| 72 |
+
|
| 73 |
+
CUTLASS_DEVICE static void load(Fragment& frag,
|
| 74 |
+
scalar_t* ptr,
|
| 75 |
+
int thread_id,
|
| 76 |
+
const cutlass::MatrixCoord& extent,
|
| 77 |
+
int stride)
|
| 78 |
+
{
|
| 79 |
+
GmemTileIterator iter({layout(stride)}, ptr, extent, thread_id);
|
| 80 |
+
iter.load(frag);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
CUTLASS_DEVICE static scalar_t*
|
| 84 |
+
advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
|
| 85 |
+
{
|
| 86 |
+
return ptr + B_id * strideB + H_id * strideH;
|
| 87 |
+
}
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
template <typename Shape,
|
| 91 |
+
typename scalar_t,
|
| 92 |
+
int kThreads,
|
| 93 |
+
template <typename, typename, typename>
|
| 94 |
+
class Broadcast1_,
|
| 95 |
+
template <typename, typename, typename>
|
| 96 |
+
class Broadcast2_>
|
| 97 |
+
struct AttentionBiasEpilogue {
|
| 98 |
+
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
|
| 99 |
+
cutlass::layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
|
| 100 |
+
kThreads,
|
| 101 |
+
1>;
|
| 102 |
+
|
| 103 |
+
using Broadcast1 = Broadcast1_<ThreadMap, Shape, scalar_t>;
|
| 104 |
+
using Broadcast2 = Broadcast2_<ThreadMap, Shape, scalar_t>;
|
| 105 |
+
|
| 106 |
+
Broadcast1 broadcast1;
|
| 107 |
+
Broadcast2 broadcast2;
|
| 108 |
+
|
| 109 |
+
using Ref = cutlass::TensorRef<float, cutlass::layout::RowMajor>;
|
| 110 |
+
using SmemTileIterator = cutlass::transform::threadblock::
|
| 111 |
+
RegularTileIterator<Shape, float, cutlass::layout::RowMajor, 0, ThreadMap>;
|
| 112 |
+
|
| 113 |
+
CUTLASS_DEVICE void operator()(const Ref& ref,
|
| 114 |
+
scalar_t* ptr1,
|
| 115 |
+
scalar_t* ptr2,
|
| 116 |
+
int thread_id,
|
| 117 |
+
const cutlass::MatrixCoord& extent,
|
| 118 |
+
int stride)
|
| 119 |
+
{
|
| 120 |
+
static_assert(Broadcast1::Fragment::kElements == Broadcast2::Fragment::kElements,
|
| 121 |
+
"The two broadcast fragments must have the same number of "
|
| 122 |
+
"elements");
|
| 123 |
+
typename SmemTileIterator::Fragment frag;
|
| 124 |
+
frag.clear();
|
| 125 |
+
float* frag_ptr = reinterpret_cast<float*>(&frag);
|
| 126 |
+
if (Broadcast1::kEnable) {
|
| 127 |
+
typename Broadcast1::Fragment frag1;
|
| 128 |
+
frag1.clear();
|
| 129 |
+
broadcast1.load(frag1, ptr1, thread_id, extent, stride);
|
| 130 |
+
scalar_t* frag1_ptr = reinterpret_cast<scalar_t*>(&frag1);
|
| 131 |
+
for (int i = 0; i < Broadcast1::Fragment::kElements; ++i) {
|
| 132 |
+
frag_ptr[i] += static_cast<float>(frag1_ptr[i]);
|
| 133 |
+
}
|
| 134 |
+
}
|
| 135 |
+
if (Broadcast2::kEnable) {
|
| 136 |
+
typename Broadcast2::Fragment frag2;
|
| 137 |
+
frag2.clear();
|
| 138 |
+
broadcast2.load(frag2, ptr2, thread_id, extent, stride);
|
| 139 |
+
scalar_t* frag2_ptr = reinterpret_cast<scalar_t*>(&frag2);
|
| 140 |
+
for (int i = 0; i < Broadcast2::Fragment::kElements; ++i) {
|
| 141 |
+
frag_ptr[i] += static_cast<float>(frag2_ptr[i]);
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
SmemTileIterator iter(ref, thread_id);
|
| 145 |
+
iter.store(frag);
|
| 146 |
+
__syncthreads();
|
| 147 |
+
}
|
| 148 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************************************
|
| 2 |
+
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
|
| 3 |
+
*reserved. SPDX-License-Identifier: BSD-3-Clause
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
*
|
| 8 |
+
* 1. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
*this list of conditions and the following disclaimer.
|
| 10 |
+
*
|
| 11 |
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 12 |
+
* this list of conditions and the following disclaimer in the documentation
|
| 13 |
+
* and/or other materials provided with the distribution.
|
| 14 |
+
*
|
| 15 |
+
* 3. Neither the name of the copyright holdvr nor the names of its
|
| 16 |
+
* contributors may be used to endorse or promote products derived from
|
| 17 |
+
* this software without specific prior written permission.
|
| 18 |
+
*
|
| 19 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 20 |
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 21 |
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 22 |
+
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
| 23 |
+
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 24 |
+
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 25 |
+
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 26 |
+
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 27 |
+
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 28 |
+
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 29 |
+
*POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
*
|
| 31 |
+
**************************************************************************************************/
|
| 32 |
+
|
| 33 |
+
// Copyright (c) Microsoft Corporation.
|
| 34 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 35 |
+
|
| 36 |
+
// DeepSpeed Team
|
| 37 |
+
|
| 38 |
+
#pragma once
|
| 39 |
+
#include <cutlass/cutlass.h>
|
| 40 |
+
#include "cutlass/aligned_buffer.h"
|
| 41 |
+
#include "cutlass/array.h"
|
| 42 |
+
#include "cutlass/coord.h"
|
| 43 |
+
#include "cutlass/layout/matrix.h"
|
| 44 |
+
#include "cutlass/layout/pitch_linear.h"
|
| 45 |
+
#include "cutlass/numeric_types.h"
|
| 46 |
+
#include "cutlass/platform/platform.h"
|
| 47 |
+
#include "cutlass/transform/pitch_linear_thread_map.h"
|
| 48 |
+
#include "cutlass/transform/threadblock/predicated_tile_iterator.h"
|
| 49 |
+
#include "cutlass/transform/threadblock/regular_tile_iterator.h"
|
| 50 |
+
|
| 51 |
+
template <typename scalar_t, // scalar type
|
| 52 |
+
typename ThreadblockTileShape, // size of tile to load
|
| 53 |
+
int Threads, // number of participating threads
|
| 54 |
+
int ElementsPerAccess> // thread access width in elements
|
| 55 |
+
class TileSmemLoader {
|
| 56 |
+
public:
|
| 57 |
+
using Shape = ThreadblockTileShape;
|
| 58 |
+
using SmemTile = cutlass::AlignedBuffer<scalar_t, ThreadblockTileShape::kCount>;
|
| 59 |
+
|
| 60 |
+
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
|
| 61 |
+
cutlass::layout::PitchLinearShape<ThreadblockTileShape::kColumn, // contiguous
|
| 62 |
+
ThreadblockTileShape::kRow>, // strided
|
| 63 |
+
Threads, // Threads
|
| 64 |
+
ElementsPerAccess>; // ElementsPerAccess
|
| 65 |
+
|
| 66 |
+
using GmemTileIterator = cutlass::transform::threadblock::PredicatedTileIterator<
|
| 67 |
+
ThreadblockTileShape, // Shape
|
| 68 |
+
scalar_t, // Element
|
| 69 |
+
cutlass::layout::RowMajor, // Layout
|
| 70 |
+
0, // AdvanceRank
|
| 71 |
+
ThreadMap>; // ThreadMap
|
| 72 |
+
|
| 73 |
+
using SmemTileIterator =
|
| 74 |
+
cutlass::transform::threadblock::RegularTileIterator<ThreadblockTileShape, // Shape
|
| 75 |
+
scalar_t, // Element
|
| 76 |
+
cutlass::layout::RowMajor, // Layout
|
| 77 |
+
0, // AdvanceRank
|
| 78 |
+
ThreadMap>; // ThreadMap
|
| 79 |
+
|
| 80 |
+
using Fragment = typename GmemTileIterator::Fragment;
|
| 81 |
+
|
| 82 |
+
/// load a tile from global memory into shared memory
|
| 83 |
+
CUTLASS_DEVICE
|
| 84 |
+
static void load(GmemTileIterator tile_load_iter, SmemTileIterator tile_store_iter)
|
| 85 |
+
{
|
| 86 |
+
Fragment tb_frag;
|
| 87 |
+
tb_frag.clear();
|
| 88 |
+
tile_load_iter.load(tb_frag);
|
| 89 |
+
tile_store_iter.store(tb_frag);
|
| 90 |
+
|
| 91 |
+
__syncthreads();
|
| 92 |
+
}
|
| 93 |
+
};
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/StopWatch.h
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
#ifdef _WIN32
|
| 8 |
+
#include <windows.h>
|
| 9 |
+
#else
|
| 10 |
+
#include <time.h>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
#ifdef _WIN32
|
| 14 |
+
|
| 15 |
+
class Stopwatch {
|
| 16 |
+
private:
|
| 17 |
+
double m_total_time;
|
| 18 |
+
LARGE_INTEGER m_start_time;
|
| 19 |
+
|
| 20 |
+
public:
|
| 21 |
+
Stopwatch() { m_total_time = 0.0; }
|
| 22 |
+
|
| 23 |
+
~Stopwatch() {}
|
| 24 |
+
|
| 25 |
+
void Reset() { m_total_time = 0.0; }
|
| 26 |
+
|
| 27 |
+
void Start() { QueryPerformanceCounter(&m_start_time); }
|
| 28 |
+
|
| 29 |
+
void Restart()
|
| 30 |
+
{
|
| 31 |
+
m_total_time = 0.0;
|
| 32 |
+
QueryPerformanceCounter(&m_start_time);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
void Stop()
|
| 36 |
+
{
|
| 37 |
+
LARGE_INTEGER frequency;
|
| 38 |
+
LARGE_INTEGER stop_time;
|
| 39 |
+
QueryPerformanceFrequency(&frequency);
|
| 40 |
+
QueryPerformanceCounter(&stop_time);
|
| 41 |
+
m_total_time +=
|
| 42 |
+
((double)(stop_time.QuadPart - m_start_time.QuadPart) / (double)frequency.QuadPart);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
double GetTimeInSeconds() { return m_total_time; }
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
#else
|
| 49 |
+
|
| 50 |
+
class Stopwatch {
|
| 51 |
+
private:
|
| 52 |
+
double m_total_time;
|
| 53 |
+
struct timespec m_start_time;
|
| 54 |
+
bool m_is_started;
|
| 55 |
+
|
| 56 |
+
public:
|
| 57 |
+
Stopwatch()
|
| 58 |
+
{
|
| 59 |
+
m_total_time = 0.0;
|
| 60 |
+
m_is_started = false;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
~Stopwatch() {}
|
| 64 |
+
|
| 65 |
+
void Reset() { m_total_time = 0.0; }
|
| 66 |
+
|
| 67 |
+
void Start()
|
| 68 |
+
{
|
| 69 |
+
clock_gettime(CLOCK_MONOTONIC, &m_start_time);
|
| 70 |
+
m_is_started = true;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
void Restart()
|
| 74 |
+
{
|
| 75 |
+
m_total_time = 0.0;
|
| 76 |
+
clock_gettime(CLOCK_MONOTONIC, &m_start_time);
|
| 77 |
+
m_is_started = true;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
void Stop()
|
| 81 |
+
{
|
| 82 |
+
if (m_is_started) {
|
| 83 |
+
m_is_started = false;
|
| 84 |
+
|
| 85 |
+
struct timespec end_time;
|
| 86 |
+
clock_gettime(CLOCK_MONOTONIC, &end_time);
|
| 87 |
+
|
| 88 |
+
m_total_time += (double)(end_time.tv_sec - m_start_time.tv_sec) +
|
| 89 |
+
(double)(end_time.tv_nsec - m_start_time.tv_nsec) / 1e9;
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
double GetTimeInSeconds()
|
| 94 |
+
{
|
| 95 |
+
if (m_is_started) {
|
| 96 |
+
Stop();
|
| 97 |
+
Start();
|
| 98 |
+
}
|
| 99 |
+
return m_total_time;
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
#endif
|
parrot/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/Timer.h
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation.
|
| 2 |
+
// SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
// DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
#ifndef __TIMER_H__
|
| 7 |
+
#define __TIMER_H__
|
| 8 |
+
|
| 9 |
+
#include <cuda_runtime.h>
|
| 10 |
+
#include <chrono>
|
| 11 |
+
#include "cuda.h"
|
| 12 |
+
|
| 13 |
+
class GPUTimer {
|
| 14 |
+
cudaEvent_t start, stop;
|
| 15 |
+
|
| 16 |
+
public:
|
| 17 |
+
GPUTimer()
|
| 18 |
+
{
|
| 19 |
+
cudaEventCreate(&start);
|
| 20 |
+
cudaEventCreate(&stop);
|
| 21 |
+
}
|
| 22 |
+
~GPUTimer()
|
| 23 |
+
{
|
| 24 |
+
cudaEventDestroy(start);
|
| 25 |
+
cudaEventDestroy(stop);
|
| 26 |
+
}
|
| 27 |
+
inline void Record() { cudaEventRecord(start); }
|
| 28 |
+
inline void Elapsed(float& time_elapsed)
|
| 29 |
+
{
|
| 30 |
+
cudaEventRecord(stop);
|
| 31 |
+
cudaEventSynchronize(stop);
|
| 32 |
+
cudaEventElapsedTime(&time_elapsed, start, stop);
|
| 33 |
+
}
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
class CPUTimer {
|
| 37 |
+
std::chrono::high_resolution_clock::time_point start;
|
| 38 |
+
|
| 39 |
+
public:
|
| 40 |
+
CPUTimer() : start(std::chrono::high_resolution_clock::now()) {}
|
| 41 |
+
inline void Reset() { start = std::chrono::high_resolution_clock::now(); }
|
| 42 |
+
inline float Elapsed()
|
| 43 |
+
{
|
| 44 |
+
auto temp = start;
|
| 45 |
+
start = std::chrono::high_resolution_clock::now();
|
| 46 |
+
return (float)(std::chrono::duration_cast<std::chrono::microseconds>(start - temp).count() /
|
| 47 |
+
1e3);
|
| 48 |
+
}
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
#endif
|