repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/gavgpool-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/AlignedAllocator.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class GAvgPoolMicrokernelTester {
public:
inline GAvgPoolMicrokernelTester& m(size_t m) {
assert(m != 0);
this->m_ = m;
return *this;
}
inline size_t m() const {
return this->m_;
}
inline GAvgPoolMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline GAvgPoolMicrokernelTester& nr(size_t nr) {
assert(nr != 0);
this->nr_ = nr;
return *this;
}
inline size_t nr() const {
return this->nr_;
}
inline size_t packedN() const {
return n() % nr() == 0 ? n() : (n() / nr() + 1) * nr();
}
inline GAvgPoolMicrokernelTester& xStride(size_t xStride) {
assert(xStride != 0);
this->xStride_ = xStride;
return *this;
}
inline size_t xStride() const {
if (this->xStride_ == 0) {
return n();
} else {
assert(this->xStride_ >= n());
return this->xStride_;
}
}
inline GAvgPoolMicrokernelTester& xScale(float xScale) {
assert(xScale > 0.0f);
assert(std::isnormal(xScale));
this->xScale_ = xScale;
return *this;
}
inline float xScale() const {
return this->xScale_;
}
inline GAvgPoolMicrokernelTester& xZeroPoint(uint8_t xZeroPoint) {
this->xZeroPoint_ = xZeroPoint;
return *this;
}
inline uint8_t xZeroPoint() const {
return this->xZeroPoint_;
}
inline GAvgPoolMicrokernelTester& yScale(float yScale) {
assert(yScale > 0.0f);
assert(std::isnormal(yScale));
this->yScale_ = yScale;
return *this;
}
inline float yScale() const {
return this->yScale_;
}
inline GAvgPoolMicrokernelTester& yZeroPoint(uint8_t yZeroPoint) {
this->yZeroPoint_ = yZeroPoint;
return *this;
}
inline uint8_t yZeroPoint() const {
return this->yZeroPoint_;
}
inline GAvgPoolMicrokernelTester& yMin(uint8_t yMin) {
this->yMin_ = yMin;
return *this;
}
inline uint8_t yMin() const {
return this->yMin_;
}
inline GAvgPoolMicrokernelTester& yMax(uint8_t yMax) {
this->yMax_ = yMax;
return *this;
}
inline uint8_t yMax() const {
return this->yMax_;
}
inline GAvgPoolMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_q8gavgpool_up_ukernel_function q8gavgpool) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x((m() - 1) * xStride() + n());
std::vector<uint8_t> zero(n());
std::vector<uint8_t> y(n());
std::vector<uint8_t> yRef(n());
std::vector<float> yFP(n());
std::vector<int32_t> yAcc(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
/* Prepare quantization parameters */
const union pytorch_qnnp_avgpool_quantization_params quantizationParams =
pytorch_qnnp_compute_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(m()),
xScale() / (yScale() * float(m())),
yZeroPoint(),
yMin(),
yMax());
const union pytorch_qnnp_avgpool_quantization_params
scalarQuantizationParams =
pytorch_qnnp_compute_scalar_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(m()),
xScale() / (yScale() * float(m())),
yZeroPoint(),
yMin(),
yMax());
/* Compute reference results */
for (size_t j = 0; j < n(); j++) {
int32_t acc = scalarQuantizationParams.scalar.bias;
for (size_t i = 0; i < m(); i++) {
acc += x[i * xStride() + j];
}
yAcc[j] = acc;
yRef[j] = pytorch_qnnp_avgpool_quantize(acc, scalarQuantizationParams);
yFP[j] = float(acc) * (xScale() / (yScale() * float(m()))) +
float(yZeroPoint());
yFP[j] = std::min<float>(yFP[j], float(yMax()));
yFP[j] = std::max<float>(yFP[j], float(yMin()));
}
/* Call optimized micro-kernel */
q8gavgpool(
m(),
n(),
x.data(),
xStride() * sizeof(uint8_t),
zero.data(),
y.data(),
&quantizationParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_LE(uint32_t(y[i]), uint32_t(yMax()))
<< "at position " << i << ", m = " << m() << ", n = " << n();
ASSERT_GE(uint32_t(y[i]), uint32_t(yMin()))
<< "at position " << i << ", m = " << m() << ", n = " << n();
ASSERT_NEAR(float(int32_t(y[i])), yFP[i], 0.5001f)
<< "at position " << i << ", m = " << m() << ", n = " << n()
<< ", acc = " << yAcc[i];
ASSERT_EQ(uint32_t(yRef[i]), uint32_t(y[i]))
<< "at position " << i << ", m = " << m() << ", n = " << n()
<< ", acc = " << yAcc[i];
}
}
}
void test(pytorch_q8gavgpool_mp_ukernel_function q8gavgpool) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x((m() - 1) * xStride() + n());
std::vector<int32_t, AlignedAllocator<int32_t, 16>> mpAcc(packedN());
std::vector<uint8_t> zero(n());
std::vector<uint8_t> y(n());
std::vector<uint8_t> yRef(n());
std::vector<float> yFP(n());
std::vector<int32_t> yAcc(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
/* Prepare quantization parameters */
const union pytorch_qnnp_avgpool_quantization_params quantizationParams =
pytorch_qnnp_compute_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(m()),
xScale() / (yScale() * float(m())),
yZeroPoint(),
yMin(),
yMax());
const union pytorch_qnnp_avgpool_quantization_params
scalarQuantizationParams =
pytorch_qnnp_compute_scalar_avgpool_quantization_params(
-int32_t(xZeroPoint()) * int32_t(m()),
xScale() / (yScale() * float(m())),
yZeroPoint(),
yMin(),
yMax());
/* Compute reference results */
for (size_t j = 0; j < n(); j++) {
int32_t acc = scalarQuantizationParams.scalar.bias;
for (size_t i = 0; i < m(); i++) {
acc += x[i * xStride() + j];
}
yAcc[j] = acc;
yRef[j] = pytorch_qnnp_avgpool_quantize(acc, scalarQuantizationParams);
yFP[j] = float(acc) * (xScale() / (yScale() * float(m()))) +
float(yZeroPoint());
yFP[j] = std::min<float>(yFP[j], float(yMax()));
yFP[j] = std::max<float>(yFP[j], float(yMin()));
}
/* Call optimized micro-kernel */
q8gavgpool(
m(),
n(),
x.data(),
xStride() * sizeof(uint8_t),
zero.data(),
mpAcc.data(),
y.data(),
&quantizationParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_LE(uint32_t(y[i]), uint32_t(yMax()))
<< "at position " << i << ", m = " << m() << ", n = " << n();
ASSERT_GE(uint32_t(y[i]), uint32_t(yMin()))
<< "at position " << i << ", m = " << m() << ", n = " << n();
ASSERT_NEAR(float(int32_t(y[i])), yFP[i], 0.5001f)
<< "at position " << i << ", m = " << m() << ", n = " << n()
<< ", acc = " << yAcc[i];
ASSERT_EQ(uint32_t(yRef[i]), uint32_t(y[i]))
<< "at position " << i << ", m = " << m() << ", n = " << n()
<< ", acc = " << yAcc[i];
}
}
}
private:
size_t m_{1};
size_t n_{1};
size_t nr_{1};
size_t xStride_{0};
float xScale_{1.25f};
float yScale_{0.75f};
uint8_t xZeroPoint_{121};
uint8_t yZeroPoint_{133};
uint8_t yMin_{0};
uint8_t yMax_{255};
size_t iterations_{15};
};
| 8,830
| 28.241722
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/gemm-block-sparse-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <fp16.h>
#include <pack_block_sparse.h>
#include <qnnpack/AlignedAllocator.h>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
#define MAYBE_UNUSED __attribute__((unused))
namespace {
void fillBlockSparseWeights(
uint8_t* b,
size_t N,
size_t K,
size_t row_block_size,
size_t col_block_size,
float sparsity,
const uint8_t* zero_points) {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
std::bernoulli_distribution dist{sparsity};
for (uint32_t n = 0; n < N ; n += row_block_size) {
for (uint32_t k = 0; k < K; k += col_block_size) {
if (dist(rng)) {
for (uint32_t nb = 0; (nb < row_block_size) && (n + nb < N); ++nb) {
for (uint32_t kb = 0; (kb < col_block_size) && (k + kb < K); ++kb) {
*(b + (n + nb) * K + k + kb) = zero_points[n + nb];
}
}
}
}
}
}
// Temp Debug utils that will be removed later
MAYBE_UNUSED void printMatrix(const char* name, const uint8_t* a, const size_t M, const size_t N) {
std::cout << "Matrix START:" << name << "...\n";
for (uint32_t m = 0; m < M ; ++m) {
for (uint32_t n = 0; n < N; n++) {
std::cout << (const uint32_t)(*(a + m * N + n)) << ", ";
}
std::cout << std::endl;
}
std::cout << "Matrix END...\n\n";
}
MAYBE_UNUSED void printMatrix(const char* name, const float* a, const size_t M, const size_t N) {
std::cout << "Matrix START:" << name << "...\n";
for (uint32_t m = 0; m < M ; ++m) {
for (uint32_t n = 0; n < N; n++) {
std::cout << (*(a + m * N + n)) << ", ";
}
std::cout << std::endl;
}
std::cout << "Matrix END...\n\n";
}
}
class GemmBlockSparseMicrokernelTester {
public:
inline GemmBlockSparseMicrokernelTester& mr(size_t mr) {
this->mr_ = mr;
return *this;
}
inline size_t mr() const {
return this->mr_;
}
inline GemmBlockSparseMicrokernelTester& nr(size_t nr) {
this->nr_ = nr;
return *this;
}
inline size_t nr() const {
return this->nr_;
}
inline GemmBlockSparseMicrokernelTester& m(size_t m) {
this->m_ = m;
return *this;
}
inline size_t m() const {
return this->m_;
}
inline GemmBlockSparseMicrokernelTester& n(size_t n) {
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline GemmBlockSparseMicrokernelTester& k(size_t k) {
this->k_ = k;
return *this;
}
inline size_t k() const {
return this->k_;
}
inline GemmBlockSparseMicrokernelTester& ks(size_t ks) {
this->ks_ = ks;
return *this;
}
inline GemmBlockSparseMicrokernelTester& rowBlockSize(size_t block_size) {
this->rowBlockSize_ = block_size;
return *this;
}
inline GemmBlockSparseMicrokernelTester& colBlockSize(size_t block_size) {
this->colBlockSize_ = block_size;
return *this;
}
inline GemmBlockSparseMicrokernelTester& sparsity(float s) {
this->sparsity_ = s;
return *this;
}
inline size_t ks() const {
return this->ks_;
}
inline size_t rowBlockSize() const {
return this->rowBlockSize_;
}
inline size_t colBlockSize() const {
return this->colBlockSize_;
}
inline float sparsity() const {
return this->sparsity_;
}
inline size_t biasN() const {
return n() % nr() == 0 ? n() : (n() / nr() + 1) * nr();
}
inline GemmBlockSparseMicrokernelTester& aStride(size_t aStride) {
this->aStride_ = aStride;
return *this;
}
inline size_t aStride() const {
return this->aStride_ == 0 ? k() : this->aStride_;
}
inline GemmBlockSparseMicrokernelTester& cStride(size_t cStride) {
this->cStride_ = cStride;
return *this;
}
inline size_t cStride() const {
return this->cStride_ == 0 ? n() : this->cStride_;
}
inline GemmBlockSparseMicrokernelTester& aZeroPoint(uint8_t aZeroPoint) {
this->aZeroPoint_ = aZeroPoint;
return *this;
}
inline uint8_t aZeroPoint() const {
return this->aZeroPoint_;
}
inline GemmBlockSparseMicrokernelTester& bZeroPoint(uint8_t bZeroPoint) {
this->bZeroPoint_ = bZeroPoint;
return *this;
}
inline uint8_t bZeroPoint() const {
return this->bZeroPoint_;
}
inline GemmBlockSparseMicrokernelTester& multiplier(const float multiplier) {
this->multiplier_ = multiplier;
return *this;
}
inline float multiplier() const {
return this->multiplier_;
}
inline GemmBlockSparseMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline GemmBlockSparseMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline GemmBlockSparseMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_q8gemm_dq_sparse_ukernel_function qgemm) const {
ASSERT_LE(m(), mr());
ASSERT_LE(n(), nr());
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> a((m() - 1) * aStride() + k() + 8);
std::vector<uint8_t> b(n() * k());
std::vector<float, AlignedAllocator<float, 32>> bias(std::max<size_t>(8, n()));
std::vector<float> c((m() - 1) * cStride() + n());
std::vector<float> acc(m() * n());
const uint8_t* aPtr = a.data();
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(a.begin(), a.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::fill(c.begin(), c.end(), 0.0f);
size_t num_zero_points_padded = n() + 8;
std::vector<uint8_t> kernel_zero_points
(num_zero_points_padded, bZeroPoint());
std::generate(kernel_zero_points.begin(), kernel_zero_points.end(), std::ref(u8rng));
// This loop to ensure the assert_ne on b mat does not fire.
uint8_t max_elem, min_elem;
do {
std::generate(b.begin(), b.end(), std::ref(u8rng));
fillBlockSparseWeights(
b.data(),
n(),
k(),
rowBlockSize(),
colBlockSize(),
sparsity(),
kernel_zero_points.data());
max_elem = *std::max_element(b.cbegin(), b.cend());
min_elem = *std::min_element(b.cbegin(), b.cend());
} while (max_elem == min_elem);
std::unique_ptr<qnnpack::BCSRMatrix> bcsr_matrix =
qnnpack::generateBlockCSRMatrix<uint32_t>(
b.data(),
n(),
k(),
rowBlockSize(),
colBlockSize(),
kernel_zero_points.data());
ASSERT_NE(
*std::max_element(a.cbegin(), a.cend()),
*std::min_element(a.cbegin(), a.cend()));
ASSERT_NE(
*std::max_element(b.cbegin(), b.cend()),
*std::min_element(b.cbegin(), b.cend()));
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
std::vector<float> dequantization_scales(num_zero_points_padded);
std::generate(
dequantization_scales.begin(),
dequantization_scales.end(),
std::ref(f32rng));
/* Compute 32-bit results and output quantization arguments */
std::fill(acc.begin(), acc.end(), 0);
for (size_t mIndex = 0; mIndex < m(); mIndex++) {
for (size_t nIndex = 0; nIndex < n(); nIndex++) {
for (size_t kIndex = 0; kIndex < k(); kIndex++) {
ASSERT_LT(mIndex * n() + nIndex, acc.size());
ASSERT_LT(mIndex * k() + kIndex, a.size());
acc[mIndex * n() + nIndex] +=
(int32_t(aPtr[mIndex * aStride() + kIndex]) -
int32_t(aZeroPoint())) *
(int32_t(b[nIndex * k() + kIndex]) - int32_t(kernel_zero_points[nIndex]));
}
acc[mIndex * n() + nIndex] =
acc[mIndex * n() + nIndex] *
dequantization_scales[nIndex] +
bias[nIndex];
}
}
const struct pytorch_qnnp_conv_dynamic_quantization_params quantizationParams{
aZeroPoint(),
kernel_zero_points.data(),
dequantization_scales.data(),
};
qgemm(
m(),
n(),
aPtr,
aStride() * sizeof(uint8_t),
bcsr_matrix->values.data(),
static_cast<const uint32_t*>(bcsr_matrix->row_values_data_ptr()),
static_cast<const uint32_t*>(bcsr_matrix->col_indices_data_ptr()),
bias.data(),
c.data(),
cStride(),
0,
&quantizationParams);
for (size_t mIndex = 0; mIndex < m(); mIndex++) {
for (size_t nIndex = 0; nIndex < n(); nIndex++) {
ASSERT_EQ(
c[mIndex * cStride() + nIndex],
acc[mIndex * n() + nIndex])
<< "at " << mIndex << ", " << nIndex
<< ": reference = " << acc[mIndex * n() + nIndex]
<< ", optimized = " << c[mIndex * cStride() + nIndex]
<< ", Mr x Nr = " << mr() << " x " << nr()
<< ", M x N x K = " << m() << " x " << n() << " x " << k();
}
}
}
}
template <typename SPARSE_INDICES_DTYPE, typename GEMM_UKERNEL_DTYPE>
void test_packed(
pytorch_q8gemm_sparse_packA_ukernel_function packa,
GEMM_UKERNEL_DTYPE qgemm) const {
ASSERT_LE(m(), mr());
ASSERT_LE(n(), nr());
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> a((m() - 1) * aStride() + k() + 8);
std::vector<uint8_t> b(n() * k());
std::vector<float, AlignedAllocator<float, 32>> bias(std::max<size_t>(8, n()));
std::vector<float> c((m() - 1) * cStride() + n());
std::vector<float> acc(m() * n());
auto m_blocks = (m() + mr() - 1) / mr();
// While colBlockSize() is what kr is, we reuse 8x4/4x4 packing kernels
// and thus a_packed has to be allocated accordingly.
const uint32_t kr_value = 4;
auto k_blocks = (k() + kr_value - 1) / kr_value;
std::vector<uint8_t> a_packed((m_blocks * k_blocks * mr() * kr_value) + 8, 0);
const uint8_t* aPtr = a.data();
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(a.begin(), a.end(), std::ref(u8rng));
std::generate(bias.begin(), bias.end(), std::ref(s32rng));
std::fill(c.begin(), c.end(), 0.0f);
size_t num_zero_points_padded = n() + 8;
std::vector<uint8_t> kernel_zero_points
(num_zero_points_padded, bZeroPoint());
uint8_t max_elem, min_elem;
// This loop to ensure the assert_ne on b mat does not fire.
do {
std::generate(b.begin(), b.end(), std::ref(u8rng));
fillBlockSparseWeights(
b.data(),
n(),
k(),
rowBlockSize(),
colBlockSize(),
sparsity(),
kernel_zero_points.data());
max_elem = *std::max_element(b.cbegin(), b.cend());
min_elem = *std::min_element(b.cbegin(), b.cend());
} while (max_elem == min_elem);
std::unique_ptr<qnnpack::BCSRMatrix> bcsr_matrix =
qnnpack::generateBlockCSRMatrix<SPARSE_INDICES_DTYPE>(
b.data(),
n(),
k(),
rowBlockSize(),
colBlockSize(),
kernel_zero_points.data());
ASSERT_NE(
*std::max_element(a.cbegin(), a.cend()),
*std::min_element(a.cbegin(), a.cend()));
ASSERT_NE(
*std::max_element(b.cbegin(), b.cend()),
*std::min_element(b.cbegin(), b.cend()));
auto f32rng =
std::bind(std::uniform_real_distribution<float>(1, 5), rng);
std::vector<float> dequantization_scales(num_zero_points_padded, 1.f);
std::generate(
dequantization_scales.begin(),
dequantization_scales.end(),
std::ref(f32rng));
/* Compute 32-bit results and output quantization arguments */
std::fill(acc.begin(), acc.end(), 0);
for (size_t mIndex = 0; mIndex < m(); mIndex++) {
for (size_t nIndex = 0; nIndex < n(); nIndex++) {
for (size_t kIndex = 0; kIndex < k(); kIndex++) {
ASSERT_LT(mIndex * n() + nIndex, acc.size());
ASSERT_LT(mIndex * k() + kIndex, a.size());
acc[mIndex * n() + nIndex] +=
(int32_t(aPtr[mIndex * aStride() + kIndex]) -
int32_t(aZeroPoint())) *
(int32_t(b[nIndex * k() + kIndex]) - int32_t(kernel_zero_points[nIndex]));
}
acc[mIndex * n() + nIndex] =
acc[mIndex * n() + nIndex] *
dequantization_scales[nIndex] +
bias[nIndex];
}
}
const struct pytorch_qnnp_conv_dynamic_quantization_params quantizationParams{
aZeroPoint(),
kernel_zero_points.data(),
dequantization_scales.data(),
};
packa(
m(),
k(),
aPtr,
aStride() * sizeof(uint8_t),
a_packed.data()
);
qgemm(
m(),
n(),
a_packed.data(),
bcsr_matrix->values.data(),
static_cast<const SPARSE_INDICES_DTYPE*>(
bcsr_matrix->row_values_data_ptr()),
static_cast<const SPARSE_INDICES_DTYPE*>(
bcsr_matrix->col_indices_data_ptr()),
bias.data(),
c.data(),
cStride(),
0,
&quantizationParams);
for (size_t mIndex = 0; mIndex < m(); mIndex++) {
for (size_t nIndex = 0; nIndex < n(); nIndex++) {
ASSERT_NEAR(
c[mIndex * cStride() + nIndex],
acc[mIndex * n() + nIndex],
std::abs(acc[mIndex * n() + nIndex]) * 1.0e-3f)
<< "at " << mIndex << ", " << nIndex
<< ": reference = " << acc[mIndex * n() + nIndex]
<< ", optimized = " << c[mIndex * cStride() + nIndex]
<< ", Mr x Nr = " << mr() << " x " << nr()
<< ", M x N x K = " << m() << " x " << n() << " x " << k();
}
}
}
}
private:
size_t mr_{1};
size_t nr_{1};
size_t m_{1};
size_t n_{1};
size_t k_{1};
size_t ks_{1};
size_t aStride_{0};
size_t cStride_{0};
size_t rowBlockSize_{1};
size_t colBlockSize_{4};
uint8_t aZeroPoint_{0};
uint8_t bZeroPoint_{0};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{10};
float multiplier_{2.0f};
float sparsity_{0.7f};
};
| 15,424
| 29.009728
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/global-average-pooling-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class GlobalAveragePoolingOperatorTester {
public:
inline GlobalAveragePoolingOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline GlobalAveragePoolingOperatorTester& width(size_t width) {
assert(width != 0);
this->width_ = width;
return *this;
}
inline size_t width() const {
return this->width_;
}
inline GlobalAveragePoolingOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return channels();
} else {
assert(this->inputStride_ >= channels());
return this->inputStride_;
}
}
inline GlobalAveragePoolingOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return channels();
} else {
assert(this->outputStride_ >= channels());
return this->outputStride_;
}
}
inline GlobalAveragePoolingOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline GlobalAveragePoolingOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline GlobalAveragePoolingOperatorTester& inputZeroPoint(
uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline GlobalAveragePoolingOperatorTester& outputScale(float outputScale) {
assert(outputScale > 0.0f);
assert(std::isnormal(outputScale));
this->outputScale_ = outputScale;
return *this;
}
inline float outputScale() const {
return this->outputScale_;
}
inline GlobalAveragePoolingOperatorTester& outputZeroPoint(
uint8_t outputZeroPoint) {
this->outputZeroPoint_ = outputZeroPoint;
return *this;
}
inline uint8_t outputZeroPoint() const {
return this->outputZeroPoint_;
}
inline GlobalAveragePoolingOperatorTester& outputMin(uint8_t outputMin) {
this->outputMin_ = outputMin;
return *this;
}
inline uint8_t outputMin() const {
return this->outputMin_;
}
inline GlobalAveragePoolingOperatorTester& outputMax(uint8_t outputMax) {
this->outputMax_ = outputMax;
return *this;
}
inline uint8_t outputMax() const {
return this->outputMax_;
}
inline GlobalAveragePoolingOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(
(batchSize() * width() - 1) * inputStride() + channels());
std::vector<uint8_t> output(batchSize() * outputStride());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
const double scale =
double(inputScale()) / (double(width()) * double(outputScale()));
for (size_t i = 0; i < batchSize(); i++) {
for (size_t j = 0; j < channels(); j++) {
double acc = 0.0f;
for (size_t k = 0; k < width(); k++) {
acc += double(
int32_t(input[(i * width() + k) * inputStride() + j]) -
int32_t(inputZeroPoint()));
}
outputRef[i * channels() + j] =
float(acc * scale + double(outputZeroPoint()));
outputRef[i * channels() + j] = std::min<float>(
outputRef[i * channels() + j], float(outputMax()));
outputRef[i * channels() + j] = std::max<float>(
outputRef[i * channels() + j], float(outputMin()));
}
}
/* Create, setup, run, and destroy Add operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t globalAveragePoolingOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_global_average_pooling_nwc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
outputMin(),
outputMax(),
0,
&globalAveragePoolingOp));
ASSERT_NE(nullptr, globalAveragePoolingOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_global_average_pooling_nwc_q8(
globalAveragePoolingOp,
batchSize(),
width(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(
globalAveragePoolingOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(globalAveragePoolingOp));
globalAveragePoolingOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(
uint32_t(output[i * outputStride() + c]), uint32_t(outputMax()));
ASSERT_GE(
uint32_t(output[i * outputStride() + c]), uint32_t(outputMin()));
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.80f)
<< "in batch index " << i << ", channel " << c;
}
}
}
}
private:
size_t batchSize_{1};
size_t width_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{1.0f};
float outputScale_{1.0f};
uint8_t inputZeroPoint_{121};
uint8_t outputZeroPoint_{133};
uint8_t outputMin_{0};
uint8_t outputMax_{255};
size_t iterations_{1};
};
| 7,107
| 26.984252
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/hardsigmoid-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class HardsigmoidOperatorTester {
public:
inline HardsigmoidOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline HardsigmoidOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline HardsigmoidOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline HardsigmoidOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline HardsigmoidOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline HardsigmoidOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline float outputScale() const {
return this->outputScale_;
}
inline uint8_t outputZeroPoint() const {
return this->outputZeroPoint_;
}
inline HardsigmoidOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline HardsigmoidOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline HardsigmoidOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
const float hardsigmoidX =
std::min(std::max(x + 3.0f, 0.0f), 6.0f) / 6.0f;
const float scaledHardsigmoidX = hardsigmoidX / outputScale();
float y = scaledHardsigmoidX;
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + int32_t(outputZeroPoint());
}
}
/* Create, setup, run, and destroy Hardsigmoid operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t hardsigmoidOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_hardsigmoid_nc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&hardsigmoidOp));
ASSERT_NE(nullptr, hardsigmoidOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_hardsigmoid_nc_q8(
hardsigmoidOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(hardsigmoidOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(hardsigmoidOp));
hardsigmoidOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.75f};
uint8_t inputZeroPoint_{121};
float outputScale_{1.0f / 256.0f};
uint8_t outputZeroPoint_{0};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 5,893
| 26.036697
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/hardswish-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class HardswishOperatorTester {
public:
inline HardswishOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline HardswishOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline HardswishOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline HardswishOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline HardswishOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline HardswishOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline HardswishOperatorTester& outputScale(float outputScale) {
assert(outputScale > 0.0f);
assert(std::isnormal(outputScale));
this->outputScale_ = outputScale;
return *this;
}
inline float outputScale() const {
return this->outputScale_;
}
inline HardswishOperatorTester& outputZeroPoint(uint8_t outputZeroPoint) {
this->outputZeroPoint_ = outputZeroPoint;
return *this;
}
inline uint8_t outputZeroPoint() const {
return this->outputZeroPoint_;
}
inline HardswishOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline HardswishOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline HardswishOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
const float hardswishX =
x * std::min(std::max(x + 3.0f, 0.0f), 6.0f) / 6.0f;
const float scaledHardswishX = hardswishX / outputScale();
float y = scaledHardswishX;
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + int32_t(outputZeroPoint());
}
}
/* Create, setup, run, and destroy Hardswish operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t hardswishOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_hardswish_nc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&hardswishOp));
ASSERT_NE(nullptr, hardswishOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_hardswish_nc_q8(
hardswishOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(hardswishOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(hardswishOp));
hardswishOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.75f};
uint8_t inputZeroPoint_{121};
float outputScale_{0.75f};
uint8_t outputZeroPoint_{121};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 6,189
| 25.913043
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/leaky-relu-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class LeakyReLUOperatorTester {
public:
inline LeakyReLUOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline LeakyReLUOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline LeakyReLUOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline LeakyReLUOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline LeakyReLUOperatorTester& negativeSlope(float negativeSlope) {
assert(negativeSlope > 0.0f);
assert(negativeSlope < 1.0f);
this->negativeSlope_ = negativeSlope;
return *this;
}
inline float negativeSlope() const {
return this->negativeSlope_;
}
inline LeakyReLUOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline LeakyReLUOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline LeakyReLUOperatorTester& outputScale(float outputScale) {
assert(outputScale > 0.0f);
assert(std::isnormal(outputScale));
this->outputScale_ = outputScale;
return *this;
}
inline float outputScale() const {
return this->outputScale_;
}
inline LeakyReLUOperatorTester& outputZeroPoint(uint8_t outputZeroPoint) {
this->outputZeroPoint_ = outputZeroPoint;
return *this;
}
inline uint8_t outputZeroPoint() const {
return this->outputZeroPoint_;
}
inline LeakyReLUOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline LeakyReLUOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline LeakyReLUOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
float y = (x < 0.0f ? x * negativeSlope() : x) / outputScale();
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + float(int32_t(outputZeroPoint()));
}
}
/* Create, setup, run, and destroy LeakyReLU operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t leakyReLUOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_leaky_relu_nc_q8(
channels(),
negativeSlope(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&leakyReLUOp));
ASSERT_NE(nullptr, leakyReLUOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_leaky_relu_nc_q8(
leakyReLUOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(leakyReLUOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(leakyReLUOp));
leakyReLUOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float negativeSlope_{0.5f};
float outputScale_{0.75f};
uint8_t outputZeroPoint_{133};
float inputScale_{1.25f};
uint8_t inputZeroPoint_{121};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 6,417
| 25.630705
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/lut-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
class LUTMicrokernelTester {
public:
inline LUTMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline LUTMicrokernelTester& inplace(bool inplace) {
this->inplace_ = inplace;
return *this;
}
inline bool inplace() const {
return this->inplace_;
}
inline LUTMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_x8lut_ukernel_function x8lut) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x(n());
std::vector<uint8_t> t(256);
std::vector<uint8_t> y(n());
std::vector<uint8_t> yRef(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::generate(t.begin(), t.end(), std::ref(u8rng));
if (inplace()) {
std::generate(y.begin(), y.end(), std::ref(u8rng));
} else {
std::fill(y.begin(), y.end(), 0xA5);
}
const uint8_t* xData = inplace() ? y.data() : x.data();
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
yRef[i] = t[xData[i]];
}
/* Call optimized micro-kernel */
x8lut(n(), xData, t.data(), y.data());
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_EQ(uint32_t(yRef[i]), uint32_t(y[i]))
<< "at position " << i << ", n = " << n();
}
}
}
private:
size_t n_{1};
bool inplace_{false};
size_t iterations_{15};
};
| 2,195
| 23.131868
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/lut-norm-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
class LUTNormMicrokernelTester {
public:
inline LUTNormMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline LUTNormMicrokernelTester& inplace(bool inplace) {
this->inplace_ = inplace;
return *this;
}
inline bool inplace() const {
return this->inplace_;
}
inline LUTNormMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_u8lut32norm_ukernel_function u8lut32norm) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
auto u32rng = std::bind(
std::uniform_int_distribution<uint32_t>(
1, std::numeric_limits<uint32_t>::max() / (257 * n())),
rng);
std::vector<uint8_t> x(n());
std::vector<uint32_t> t(256);
std::vector<uint8_t> y(n());
std::vector<float> yRef(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::generate(t.begin(), t.end(), std::ref(u32rng));
if (inplace()) {
std::generate(y.begin(), y.end(), std::ref(u8rng));
} else {
std::fill(y.begin(), y.end(), 0xA5);
}
const uint8_t* xData = inplace() ? y.data() : x.data();
/* Compute reference results */
uint32_t sum = 0;
for (size_t i = 0; i < n(); i++) {
sum += t[xData[i]];
}
for (size_t i = 0; i < n(); i++) {
yRef[i] = 256.0f * float(t[xData[i]]) / float(sum);
yRef[i] = std::min(yRef[i], 255.0f);
}
/* Call optimized micro-kernel */
u8lut32norm(n(), xData, t.data(), y.data());
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_NEAR(yRef[i], float(y[i]), 0.5f)
<< "at position " << i << ", n = " << n() << ", sum = " << sum;
}
}
}
private:
size_t n_{1};
bool inplace_{false};
size_t iterations_{15};
};
| 2,580
| 24.81
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/max-pooling-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class MaxPoolingOperatorTester {
public:
inline MaxPoolingOperatorTester& padding(uint32_t padding) {
this->paddingHeight_ = padding;
this->paddingWidth_ = padding;
return *this;
}
inline MaxPoolingOperatorTester& padding(
uint32_t paddingHeight,
uint32_t paddingWidth) {
this->paddingHeight_ = paddingHeight;
this->paddingWidth_ = paddingWidth;
return *this;
}
inline MaxPoolingOperatorTester& paddingHeight(uint32_t paddingHeight) {
this->paddingHeight_ = paddingHeight;
return *this;
}
inline MaxPoolingOperatorTester& paddingWidth(uint32_t paddingWidth) {
this->paddingWidth_ = paddingWidth;
return *this;
}
inline uint32_t paddingHeight() const {
return this->paddingHeight_;
}
inline uint32_t paddingWidth() const {
return this->paddingWidth_;
}
inline MaxPoolingOperatorTester& inputSize(
size_t inputHeight,
size_t inputWidth) {
assert(inputHeight >= 1);
assert(inputWidth >= 1);
this->inputHeight_ = inputHeight;
this->inputWidth_ = inputWidth;
return *this;
}
inline MaxPoolingOperatorTester& inputHeight(size_t inputHeight) {
assert(inputHeight >= 1);
this->inputHeight_ = inputHeight;
return *this;
}
inline size_t inputHeight() const {
return this->inputHeight_;
}
inline MaxPoolingOperatorTester& inputWidth(size_t inputWidth) {
assert(inputWidth >= 1);
this->inputWidth_ = inputWidth;
return *this;
}
inline size_t inputWidth() const {
return this->inputWidth_;
}
inline MaxPoolingOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline MaxPoolingOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline MaxPoolingOperatorTester& poolingSize(uint32_t poolingSize) {
assert(poolingSize >= 1);
this->poolingHeight_ = poolingSize;
this->poolingWidth_ = poolingSize;
return *this;
}
inline MaxPoolingOperatorTester& poolingSize(
uint32_t poolingHeight,
uint32_t poolingWidth) {
assert(poolingHeight >= 1);
assert(poolingWidth >= 1);
this->poolingHeight_ = poolingHeight;
this->poolingWidth_ = poolingWidth;
return *this;
}
inline MaxPoolingOperatorTester& poolingHeight(uint32_t poolingHeight) {
assert(poolingHeight >= 1);
this->poolingHeight_ = poolingHeight;
return *this;
}
inline uint32_t poolingHeight() const {
return this->poolingHeight_;
}
inline MaxPoolingOperatorTester& poolingWidth(uint32_t poolingWidth) {
assert(poolingWidth >= 1);
this->poolingWidth_ = poolingWidth;
return *this;
}
inline uint32_t poolingWidth() const {
return this->poolingWidth_;
}
inline MaxPoolingOperatorTester& stride(uint32_t stride) {
assert(stride >= 1);
this->strideHeight_ = stride;
this->strideWidth_ = stride;
return *this;
}
inline MaxPoolingOperatorTester& stride(
uint32_t strideHeight,
uint32_t strideWidth) {
assert(strideHeight >= 1);
assert(strideWidth >= 1);
this->strideHeight_ = strideHeight;
this->strideWidth_ = strideWidth;
return *this;
}
inline MaxPoolingOperatorTester& strideHeight(uint32_t strideHeight) {
assert(strideHeight >= 1);
this->strideHeight_ = strideHeight;
return *this;
}
inline uint32_t strideHeight() const {
return this->strideHeight_;
}
inline MaxPoolingOperatorTester& strideWidth(uint32_t strideWidth) {
assert(strideWidth >= 1);
this->strideWidth_ = strideWidth;
return *this;
}
inline uint32_t strideWidth() const {
return this->strideWidth_;
}
inline MaxPoolingOperatorTester& dilation(uint32_t dilation) {
assert(dilation >= 1);
this->dilationHeight_ = dilation;
this->dilationWidth_ = dilation;
return *this;
}
inline MaxPoolingOperatorTester& dilation(
uint32_t dilationHeight,
uint32_t dilationWidth) {
assert(dilationHeight >= 1);
assert(dilationWidth >= 1);
this->dilationHeight_ = dilationHeight;
this->dilationWidth_ = dilationWidth;
return *this;
}
inline MaxPoolingOperatorTester& dilationHeight(uint32_t dilationHeight) {
assert(dilationHeight >= 1);
this->dilationHeight_ = dilationHeight;
return *this;
}
inline uint32_t dilationHeight() const {
return this->dilationHeight_;
}
inline MaxPoolingOperatorTester& dilationWidth(uint32_t dilationWidth) {
assert(dilationWidth >= 1);
this->dilationWidth_ = dilationWidth;
return *this;
}
inline uint32_t dilationWidth() const {
return this->dilationWidth_;
}
inline uint32_t dilatedPoolingHeight() const {
return (poolingHeight() - 1) * dilationHeight() + 1;
}
inline uint32_t dilatedPoolingWidth() const {
return (poolingWidth() - 1) * dilationWidth() + 1;
}
inline size_t outputHeight() const {
const size_t paddedInputHeight = inputHeight() + paddingHeight() * 2;
if (paddedInputHeight <= dilatedPoolingHeight()) {
return 1;
} else {
return (paddedInputHeight - dilatedPoolingHeight()) / strideHeight() + 1;
}
}
inline size_t outputWidth() const {
const size_t paddedInputWidth = inputWidth() + paddingWidth() * 2;
if (paddedInputWidth <= dilatedPoolingWidth()) {
return 1;
} else {
return (paddedInputWidth - dilatedPoolingWidth()) / strideWidth() + 1;
}
}
inline MaxPoolingOperatorTester& inputPixelStride(size_t inputPixelStride) {
assert(inputPixelStride != 0);
this->inputPixelStride_ = inputPixelStride;
return *this;
}
inline size_t inputPixelStride() const {
if (this->inputPixelStride_ == 0) {
return channels();
} else {
assert(this->inputPixelStride_ >= channels());
return this->inputPixelStride_;
}
}
inline MaxPoolingOperatorTester& outputPixelStride(size_t outputPixelStride) {
assert(outputPixelStride != 0);
this->outputPixelStride_ = outputPixelStride;
return *this;
}
inline size_t outputPixelStride() const {
if (this->outputPixelStride_ == 0) {
return channels();
} else {
assert(this->outputPixelStride_ >= channels());
return this->outputPixelStride_;
}
}
inline MaxPoolingOperatorTester& nextInputSize(
uint32_t nextInputHeight,
uint32_t nextInputWidth) {
assert(nextInputHeight >= 1);
assert(nextInputWidth >= 1);
this->nextInputHeight_ = nextInputHeight;
this->nextInputWidth_ = nextInputWidth;
return *this;
}
inline MaxPoolingOperatorTester& nextInputHeight(uint32_t nextInputHeight) {
assert(nextInputHeight >= 1);
this->nextInputHeight_ = nextInputHeight;
return *this;
}
inline uint32_t nextInputHeight() const {
if (this->nextInputHeight_ == 0) {
return inputHeight();
} else {
return this->nextInputHeight_;
}
}
inline MaxPoolingOperatorTester& nextInputWidth(uint32_t nextInputWidth) {
assert(nextInputWidth >= 1);
this->nextInputWidth_ = nextInputWidth;
return *this;
}
inline uint32_t nextInputWidth() const {
if (this->nextInputWidth_ == 0) {
return inputWidth();
} else {
return this->nextInputWidth_;
}
}
inline size_t nextOutputHeight() const {
const size_t paddedNextInputHeight =
nextInputHeight() + paddingHeight() * 2;
if (paddedNextInputHeight <= dilatedPoolingHeight()) {
return 1;
} else {
return (paddedNextInputHeight - dilatedPoolingHeight()) / strideHeight() +
1;
}
}
inline size_t nextOutputWidth() const {
const size_t paddedNextInputWidth = nextInputWidth() + paddingWidth() * 2;
if (paddedNextInputWidth <= dilatedPoolingWidth()) {
return 1;
} else {
return (paddedNextInputWidth - dilatedPoolingWidth()) / strideWidth() + 1;
}
}
inline MaxPoolingOperatorTester& nextBatchSize(size_t nextBatchSize) {
assert(nextBatchSize >= 1);
this->nextBatchSize_ = nextBatchSize;
return *this;
}
inline size_t nextBatchSize() const {
if (this->nextBatchSize_ == 0) {
return batchSize();
} else {
return this->nextBatchSize_;
}
}
inline MaxPoolingOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline MaxPoolingOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline MaxPoolingOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testU8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(
(batchSize() * inputHeight() * inputWidth() - 1) * inputPixelStride() +
channels());
std::vector<uint8_t> output(
(batchSize() * outputHeight() * outputWidth() - 1) *
outputPixelStride() +
channels());
std::vector<uint8_t> outputRef(
batchSize() * outputHeight() * outputWidth() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oy = 0; oy < outputHeight(); oy++) {
for (size_t ox = 0; ox < outputWidth(); ox++) {
for (size_t c = 0; c < channels(); c++) {
uint8_t maxValue = 0;
for (size_t py = 0; py < poolingHeight(); py++) {
const size_t iy = oy * strideHeight() + py * dilationHeight() -
paddingHeight();
for (size_t px = 0; px < poolingWidth(); px++) {
const size_t ix = ox * strideWidth() + px * dilationWidth() -
paddingWidth();
if (ix < inputWidth() && iy < inputHeight()) {
maxValue = std::max(
maxValue,
input
[((i * inputHeight() + iy) * inputWidth() + ix) *
inputPixelStride() +
c]);
}
}
}
maxValue = std::min(maxValue, qmax());
maxValue = std::max(maxValue, qmin());
outputRef
[((i * outputHeight() + oy) * outputWidth() + ox) *
channels() +
c] = maxValue;
}
}
}
}
/* Create, setup, run, and destroy Max Pooling operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t maxPoolingOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_max_pooling2d_nhwc_u8(
paddingHeight(),
paddingWidth(),
poolingHeight(),
poolingWidth(),
strideHeight(),
strideWidth(),
dilationHeight(),
dilationWidth(),
channels(),
qmin(),
qmax(),
0,
&maxPoolingOp));
ASSERT_NE(nullptr, maxPoolingOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_max_pooling2d_nhwc_u8(
maxPoolingOp,
batchSize(),
inputHeight(),
inputWidth(),
input.data(),
inputPixelStride(),
output.data(),
outputPixelStride(),
nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(maxPoolingOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(maxPoolingOp));
maxPoolingOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t y = 0; y < outputHeight(); y++) {
for (size_t x = 0; x < outputWidth(); x++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]),
uint32_t(qmax()));
ASSERT_GE(
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]),
uint32_t(qmin()));
ASSERT_EQ(
uint32_t(outputRef
[((i * outputHeight() + y) * outputWidth() + x) *
channels() +
c]),
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]))
<< "in batch index " << i << ", pixel (" << y << ", " << x
<< "), channel " << c;
}
}
}
}
}
}
void testSetupU8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input(std::max(
(batchSize() * inputHeight() * inputWidth() - 1) * inputPixelStride() +
channels(),
(nextBatchSize() * nextInputHeight() * nextInputWidth() - 1) *
inputPixelStride() +
channels()));
std::vector<uint8_t> output(std::max(
(batchSize() * outputHeight() * outputWidth() - 1) *
outputPixelStride() +
channels(),
(nextBatchSize() * nextOutputHeight() * nextOutputWidth() - 1) *
outputPixelStride() +
channels()));
std::vector<float> outputRef(
batchSize() * outputHeight() * outputWidth() * channels());
std::vector<float> nextOutputRef(
nextBatchSize() * nextOutputHeight() * nextOutputWidth() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t oy = 0; oy < outputHeight(); oy++) {
for (size_t ox = 0; ox < outputWidth(); ox++) {
for (size_t c = 0; c < channels(); c++) {
uint8_t maxValue = 0;
for (size_t py = 0; py < poolingHeight(); py++) {
const size_t iy = oy * strideHeight() + py * dilationHeight() -
paddingHeight();
for (size_t px = 0; px < poolingWidth(); px++) {
const size_t ix = ox * strideWidth() + px * dilationWidth() -
paddingWidth();
if (ix < inputWidth() && iy < inputHeight()) {
maxValue = std::max(
maxValue,
input
[((i * inputHeight() + iy) * inputWidth() + ix) *
inputPixelStride() +
c]);
}
}
}
maxValue = std::min(maxValue, qmax());
maxValue = std::max(maxValue, qmin());
outputRef
[((i * outputHeight() + oy) * outputWidth() + ox) *
channels() +
c] = maxValue;
}
}
}
}
/* Create, setup, and run Max Pooling operator once */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t maxPoolingOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_max_pooling2d_nhwc_u8(
paddingHeight(),
paddingWidth(),
poolingHeight(),
poolingWidth(),
strideHeight(),
strideWidth(),
dilationHeight(),
dilationWidth(),
channels(),
qmin(),
qmax(),
0,
&maxPoolingOp));
ASSERT_NE(nullptr, maxPoolingOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_max_pooling2d_nhwc_u8(
maxPoolingOp,
batchSize(),
inputHeight(),
inputWidth(),
input.data(),
inputPixelStride(),
output.data(),
outputPixelStride(),
nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(maxPoolingOp, nullptr /* thread pool */));
/* Verify results of the first run */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t y = 0; y < outputHeight(); y++) {
for (size_t x = 0; x < outputWidth(); x++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]),
uint32_t(qmax()));
ASSERT_GE(
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]),
uint32_t(qmin()));
ASSERT_EQ(
uint32_t(outputRef
[((i * outputHeight() + y) * outputWidth() + x) *
channels() +
c]),
uint32_t(output
[((i * outputHeight() + y) * outputWidth() + x) *
outputPixelStride() +
c]))
<< "in batch index " << i << ", pixel (" << y << ", " << x
<< "), channel " << c;
}
}
}
}
/* Re-generate data for the second run */
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results for the second run */
for (size_t i = 0; i < nextBatchSize(); i++) {
for (size_t oy = 0; oy < nextOutputHeight(); oy++) {
for (size_t ox = 0; ox < nextOutputWidth(); ox++) {
for (size_t c = 0; c < channels(); c++) {
uint8_t maxValue = 0;
for (size_t py = 0; py < poolingHeight(); py++) {
const size_t iy = oy * strideHeight() + py * dilationHeight() -
paddingHeight();
for (size_t px = 0; px < poolingWidth(); px++) {
const size_t ix = ox * strideWidth() + px * dilationWidth() -
paddingWidth();
if (ix < nextInputWidth() && iy < nextInputHeight()) {
maxValue = std::max(
maxValue,
input
[((i * nextInputHeight() + iy) * nextInputWidth() +
ix) *
inputPixelStride() +
c]);
}
}
}
maxValue = std::min(maxValue, qmax());
maxValue = std::max(maxValue, qmin());
nextOutputRef
[((i * nextOutputHeight() + oy) * nextOutputWidth() + ox) *
channels() +
c] = maxValue;
}
}
}
}
/* Setup and run Max Pooling operator the second time, and destroy the
* operator */
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_max_pooling2d_nhwc_u8(
maxPoolingOp,
nextBatchSize(),
nextInputHeight(),
nextInputWidth(),
input.data(),
inputPixelStride(),
output.data(),
outputPixelStride(),
nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(maxPoolingOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(maxPoolingOp));
maxPoolingOp = nullptr;
/* Verify results of the second run */
for (size_t i = 0; i < nextBatchSize(); i++) {
for (size_t y = 0; y < nextOutputHeight(); y++) {
for (size_t x = 0; x < nextOutputWidth(); x++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_LE(
uint32_t(
output
[((i * nextOutputHeight() + y) * nextOutputWidth() +
x) *
outputPixelStride() +
c]),
uint32_t(qmax()));
ASSERT_GE(
uint32_t(
output
[((i * nextOutputHeight() + y) * nextOutputWidth() +
x) *
outputPixelStride() +
c]),
uint32_t(qmin()));
ASSERT_EQ(
uint32_t(
nextOutputRef
[((i * nextOutputHeight() + y) * nextOutputWidth() +
x) *
channels() +
c]),
uint32_t(
output
[((i * nextOutputHeight() + y) * nextOutputWidth() +
x) *
outputPixelStride() +
c]))
<< "in batch index " << i << ", pixel (" << y << ", " << x
<< "), channel " << c;
}
}
}
}
}
}
private:
uint32_t paddingHeight_{0};
uint32_t paddingWidth_{0};
size_t inputHeight_{1};
size_t inputWidth_{1};
size_t channels_{1};
size_t batchSize_{1};
size_t inputPixelStride_{0};
size_t outputPixelStride_{0};
uint32_t poolingHeight_{1};
uint32_t poolingWidth_{1};
uint32_t strideHeight_{1};
uint32_t strideWidth_{1};
uint32_t dilationHeight_{1};
uint32_t dilationWidth_{1};
size_t nextInputHeight_{0};
size_t nextInputWidth_{0};
size_t nextBatchSize_{0};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{1};
};
| 23,959
| 30.609499
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/maxpool-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class MaxPoolMicrokernelTester {
public:
inline MaxPoolMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline MaxPoolMicrokernelTester& s(size_t s) {
assert(s != 0);
this->s_ = s;
return *this;
}
inline size_t s() const {
return this->s_;
}
inline MaxPoolMicrokernelTester& kh(size_t kh) {
assert(kh != 0);
this->kh_ = kh;
return *this;
}
inline size_t kh() const {
return this->kh_;
}
inline MaxPoolMicrokernelTester& kw(size_t kw) {
assert(kw != 0);
this->kw_ = kw;
return *this;
}
inline size_t kw() const {
return this->kw_;
}
inline size_t ks() const {
return kh() * kw();
}
inline size_t packedKs() const {
if (kc() < kr()) {
return ks();
} else if (ks() <= mr()) {
return mr();
} else {
return (ks() - mr()) % qr() == 0
? ks()
: ((ks() - mr()) / qr() + 1) * qr() + mr();
}
}
inline MaxPoolMicrokernelTester& mr(size_t mr) {
assert(mr != 0);
this->mr_ = mr;
return *this;
}
inline size_t mr() const {
return this->mr_;
}
inline MaxPoolMicrokernelTester& qr(size_t qr) {
assert(qr != 0);
this->qr_ = qr;
return *this;
}
inline size_t qr() const {
return this->qr_;
}
inline MaxPoolMicrokernelTester& kc(size_t kc) {
assert(kc != 0);
this->kc_ = kc;
return *this;
}
inline size_t kc() const {
return this->kc_;
}
inline MaxPoolMicrokernelTester& kr(size_t kr) {
assert(kr != 0);
this->kr_ = kr;
return *this;
}
inline size_t kr() const {
return this->kr_;
}
inline size_t packedN() const {
return kc() % kr() == 0 ? kc() : (kc() / kr() + 1) * kr();
}
inline MaxPoolMicrokernelTester& xStride(size_t xStride) {
assert(xStride != 0);
this->xStride_ = xStride;
return *this;
}
inline size_t xStride() const {
if (this->xStride_ == 0) {
return kc();
} else {
assert(this->xStride_ >= kc());
return this->xStride_;
}
}
inline MaxPoolMicrokernelTester& yStride(size_t yStride) {
assert(yStride != 0);
this->yStride_ = yStride;
return *this;
}
inline size_t yStride() const {
if (this->yStride_ == 0) {
return kc();
} else {
assert(this->yStride_ >= kc());
return this->yStride_;
}
}
inline MaxPoolMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline MaxPoolMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline MaxPoolMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_u8maxpool_ukernel_function u8maxpool) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<const uint8_t*> indirectX(packedKs() + (n() * s() - 1) * kh());
std::vector<uint8_t> x((indirectX.size() - 1) * xStride() + kc());
std::vector<uint8_t> zero(kc());
std::vector<uint8_t> y((n() - 1) * yStride() + kc());
std::vector<uint8_t> yRef(n() * kc());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
for (size_t i = 0; i < indirectX.size(); i++) {
indirectX[i] = x.data() + i * xStride();
}
std::shuffle(indirectX.begin(), indirectX.end(), rng);
/* Prepare quantization parameters */
const union pytorch_qnnp_u8_clamping_params clampingParams =
pytorch_qnnp_compute_u8_clamping_params(qmin(), qmax());
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
uint8_t maxValue = 0;
for (size_t j = 0; j < ks(); j++) {
maxValue = std::max(maxValue, indirectX[i * s() * kh() + j][k]);
}
maxValue = std::min(maxValue, qmax());
maxValue = std::max(maxValue, qmin());
yRef[i * kc() + k] = maxValue;
}
}
/* Call optimized micro-kernel */
u8maxpool(
n(),
ks(),
kc(),
indirectX.data(),
y.data(),
(kh() * s() - packedKs()) * sizeof(void*),
(yStride() - kc()) * sizeof(uint8_t),
&clampingParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
for (size_t k = 0; k < kc(); k++) {
ASSERT_EQ(
uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k]))
<< "at pixel " << i << ", channel " << k << ", n = " << n()
<< ", ks = " << kh() << "x" << kw() << " (" << ks()
<< "), kc = " << kc();
}
}
}
}
private:
size_t n_{1};
size_t s_{1};
size_t kh_{1};
size_t kw_{1};
size_t mr_{1};
size_t qr_{1};
size_t kc_{1};
size_t kr_{1};
size_t xStride_{0};
size_t yStride_{0};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 5,852
| 21.774319
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/requantization-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cstddef>
#include <cstdlib>
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <cmath>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
#include <qnnpack/scalar-utils.h>
class RequantizationTester {
public:
inline RequantizationTester& s(uint32_t s) {
this->s_ = s;
return *this;
}
inline uint32_t s() const {
return this->s_;
}
inline float scale() const {
return ldexpf(1.0f, -s());
}
inline RequantizationTester& zeroPoint(int32_t zeroPoint) {
this->zeroPoint_ = zeroPoint;
return *this;
}
inline int32_t zeroPoint() const {
return this->zeroPoint_;
}
inline RequantizationTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline RequantizationTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline RequantizationTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
/*
* Test that requantization of numbers ((i - zero point) * 2**s) with
* - scale = exp2(-s)
* - zero point in [0, 255]
* - no output clamping
* produces exactly i, provided that ((i - zero point) * 2**s) does not
* overflow.
*/
void testExactDivideByPO2(pytorch_requantization_function requantize) const {
ASSERT_GE(zeroPoint(), 0);
ASSERT_LE(zeroPoint(), 255);
/* Note: need s >= 1 to ensure scale = exp2(-s) < 1.0 */
ASSERT_GE(s(), 1);
ASSERT_LT(s(), 32);
std::vector<int32_t> inputs(256);
std::vector<uint8_t> outputs(inputs.size());
const int32_t maxI =
(uint32_t(std::numeric_limits<int32_t>::max()) >> s()) + zeroPoint();
const int32_t minI =
-(-uint32_t(std::numeric_limits<int32_t>::min()) >> s()) + zeroPoint();
for (int32_t i = 0; i < 256; i++) {
const int32_t clampedI = std::max(minI, std::min(maxI, i));
inputs[i] = int32_t(uint32_t(clampedI - zeroPoint()) << s());
}
requantize(
inputs.size(),
inputs.data(),
scale(),
zeroPoint(),
qmin(),
qmax(),
outputs.data());
for (int32_t i = 0; i < 256; i++) {
const int32_t clampedI = std::max(minI, std::min(maxI, i));
ASSERT_EQ(clampedI, outputs[i])
<< "i = " << i << ", clamped i = " << clampedI << ", min i = " << minI
<< ", max i = " << maxI << ", s = " << s()
<< ", zero point = " << zeroPoint();
}
}
/*
* Test that requantization of numbers (i * 2**s + sign(i - zero point) *
* 2**(s-1)) with
* - scale = exp2(-s)
* - zero point in [1, 255]
* - no output clamping
* produces exactly i, provided that ((i - zero point) * 2**s) does not
* overflow.
*/
void testDivideByPO2WithRoundingUp(pytorch_requantization_function requantize) {
ASSERT_GE(zeroPoint(), 0);
ASSERT_LE(zeroPoint(), 255);
/* Note: need s >= 1 to ensure scale = exp2(-s) < 1.0 */
ASSERT_GE(s(), 1);
ASSERT_LT(s(), 32);
std::vector<int32_t> inputs(256);
std::vector<uint8_t> outputs(inputs.size());
for (int32_t i = 0; i < 256; i++) {
const int64_t input =
RequantizationTester::shiftLeft(i - zeroPoint(), s()) -
(INT64_C(1) << (s() - 1)) + (int64_t)(i <= zeroPoint());
inputs[i] = int32_t(input);
}
requantize(
inputs.size(),
inputs.data(),
scale(),
zeroPoint(),
qmin(),
qmax(),
outputs.data());
for (int32_t i = 0; i < 256; i++) {
const int64_t input =
RequantizationTester::shiftLeft(i - zeroPoint(), s()) -
(INT64_C(1) << (s() - 1)) + (int64_t)(i <= zeroPoint());
if (int32_t(input) == input) {
ASSERT_EQ(i, uint32_t(outputs[i]))
<< "i = " << i << ", input = " << input << ", s = " << s()
<< ", zero point = " << zeroPoint();
}
}
}
/*
* Test that requantization of numbers (i * 2**s + sign(i - zero point) *
* 2**(s-1)) with
* - scale = exp2(-s)
* - zero point in [1, 255]
* - no output clamping
* produces exactly i, provided that ((i - zero point) * 2**s) does not
* overflow.
*/
void testDivideByPO2WithRoundingDown(pytorch_requantization_function requantize) {
ASSERT_GE(zeroPoint(), 0);
ASSERT_LE(zeroPoint(), 255);
/* Note: need s >= 1 to ensure scale = exp2(-s) < 1.0 */
ASSERT_GE(s(), 1);
ASSERT_LT(s(), 32);
std::vector<int32_t> inputs(256);
std::vector<uint8_t> outputs(inputs.size());
for (int32_t i = 0; i < 256; i++) {
const int64_t input =
RequantizationTester::shiftLeft(i - zeroPoint(), s()) +
(INT64_C(1) << (s() - 1)) - (int64_t)(i >= zeroPoint());
inputs[i] = int32_t(input);
}
requantize(
inputs.size(),
inputs.data(),
scale(),
zeroPoint(),
qmin(),
qmax(),
outputs.data());
for (int32_t i = 0; i < 256; i++) {
const int64_t input =
RequantizationTester::shiftLeft(i - zeroPoint(), s()) +
(INT64_C(1) << (s() - 1)) - (int64_t)(i >= zeroPoint());
if (int32_t(input) == input) {
ASSERT_EQ(i, uint32_t(outputs[i]))
<< "i = " << i << ", input = " << input << ", s = " << s()
<< ", zero point = " << zeroPoint();
}
}
}
void testDivideByPO2WithRoundingAway(pytorch_requantization_function requantize) {
ASSERT_GE(zeroPoint(), 0);
ASSERT_LE(zeroPoint(), 255);
/* Note: need s >= 1 to ensure scale = exp2(-s) < 1.0 */
ASSERT_GE(s(), 1);
ASSERT_LT(s(), 32);
std::vector<int32_t> inputs(256);
std::vector<uint8_t> outputs(inputs.size());
for (int32_t i = 0; i < 256; i++) {
int64_t input = RequantizationTester::shiftLeft(i - zeroPoint(), s());
if (input > 0) {
input -= INT64_C(1) << (s() - 1);
} else if (input < 0) {
input += INT64_C(1) << (s() - 1);
}
inputs[i] = int32_t(input);
}
requantize(
inputs.size(),
inputs.data(),
scale(),
zeroPoint(),
qmin(),
qmax(),
outputs.data());
for (uint32_t i = 0; i < 256; i++) {
int64_t input = RequantizationTester::shiftLeft(i - zeroPoint(), s());
if (input > 0) {
input -= INT64_C(1) << (s() - 1);
} else if (input < 0) {
input += INT64_C(1) << (s() - 1);
}
if (int32_t(input) == input) {
ASSERT_EQ(i, uint32_t(outputs[i]))
<< "i = " << i << ", input = " << input << ", s = " << s()
<< ", zero point = " << zeroPoint();
}
}
}
void testSpecialCases(pytorch_requantization_function requantize) {
std::vector<int32_t> inputs(256);
std::vector<uint8_t> outputs(inputs.size());
std::fill(
inputs.begin(), inputs.end(), std::numeric_limits<int32_t>::min());
for (int32_t zeroPoint = 0; zeroPoint < 256; zeroPoint++) {
requantize(
inputs.size(),
inputs.data(),
ldexpf(1.0f, -32) /* scale */,
zeroPoint /* zero point */,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
outputs.data());
ASSERT_EQ(
std::max(int32_t(0), zeroPoint - 1),
*std::min_element(outputs.cbegin(), outputs.cend()));
}
std::fill(
inputs.begin(), inputs.end(), std::numeric_limits<int32_t>::max());
requantize(
inputs.size(),
inputs.data(),
0x1.FFFFFEp-1f /* scale */,
std::numeric_limits<uint8_t>::max() /* zero point */,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
outputs.data());
for (size_t i = 0; i < inputs.size(); i++) {
ASSERT_EQ(std::numeric_limits<uint8_t>::max(), outputs[i]);
}
}
void testRandomCasesPrecise(pytorch_requantization_function requantize) {
std::random_device randomDevice;
std::mt19937 mtRng(randomDevice());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
auto rng = std::bind(std::uniform_int_distribution<uint8_t>(), mtRng);
std::vector<int32_t> inputs(4096);
std::vector<uint8_t> outputs(inputs.size());
const uint8_t zeroPoint = UINT8_C(128);
std::uniform_real_distribution<float> scaleDistribution(
0x1.000000p-23f, 0x1.FFFFFEp-1f);
const float scale = scaleDistribution(mtRng);
for (size_t i = 0; i < inputs.size(); i++) {
const uint8_t approximateOutput = rng();
const int32_t input =
int32_t(double(approximateOutput) / double(scale));
inputs[i] = input;
}
requantize(
inputs.size(),
inputs.data(),
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
outputs.data());
/* Ensure that outputs are not all identical, as in this case test doesn't
* validate much */
ASSERT_NE(
*std::max_element(outputs.cbegin(), outputs.cend()),
*std::min_element(outputs.cbegin(), outputs.cend()));
for (size_t i = 0; i < inputs.size(); i++) {
const uint8_t referenceOutput = pytorch_scalar_requantize_precise(
inputs[i],
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
ASSERT_EQ(uint32_t(referenceOutput), uint32_t(outputs[i]));
}
}
}
void testRandomCasesApproximate(pytorch_requantization_function requantize) {
std::random_device randomDevice;
std::mt19937 mtRng(randomDevice());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
auto rng = std::bind(std::uniform_int_distribution<uint8_t>(), mtRng);
std::vector<int32_t> inputs(4096);
std::vector<uint8_t> outputs(inputs.size());
const uint8_t zeroPoint = UINT8_C(128);
std::uniform_real_distribution<float> scaleDistribution(
0x1.000000p-23f, 0x1.FFFFFEp-1f);
const float scale = scaleDistribution(mtRng);
for (size_t i = 0; i < inputs.size(); i++) {
const uint8_t approximateOutput = rng();
const int32_t input =
int32_t(double(approximateOutput) / double(scale));
inputs[i] = input;
}
requantize(
inputs.size(),
inputs.data(),
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
outputs.data());
/* Ensure that outputs are not all identical, as in this case test doesn't
* validate much */
ASSERT_NE(
*std::max_element(outputs.cbegin(), outputs.cend()),
*std::min_element(outputs.cbegin(), outputs.cend()));
for (size_t i = 0; i < inputs.size(); i++) {
const double referenceOutput =
RequantizationTester::requantizeApproximate(
inputs[i],
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
ASSERT_LE(fabs(referenceOutput - double(outputs[i])), 0.55)
<< "input = " << inputs[i] << ", output = " << uint32_t(outputs[i])
<< ", reference output = " << referenceOutput;
}
}
}
void testRandomCasesAgainstReference(
pytorch_requantization_function requantize,
pytorch_requantization_function requantizeReference) {
std::random_device randomDevice;
std::mt19937 mtRng(randomDevice());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
auto rng = std::bind(std::uniform_int_distribution<uint8_t>(), mtRng);
std::vector<int32_t> inputs(4096);
std::vector<uint8_t> outputs(inputs.size());
std::vector<uint8_t> referenceOutputs(inputs.size());
const uint8_t zeroPoint = UINT8_C(128);
std::uniform_real_distribution<float> scaleDistribution(
0x1.000000p-23f, 0x1.FFFFFEp-1f);
const float scale = scaleDistribution(mtRng);
for (size_t i = 0; i < inputs.size(); i++) {
const uint8_t approximateOutput = rng();
const int32_t input =
int32_t(double(approximateOutput) / double(scale));
inputs[i] = input;
}
requantize(
inputs.size(),
inputs.data(),
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
outputs.data());
requantizeReference(
inputs.size(),
inputs.data(),
scale,
zeroPoint,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(),
referenceOutputs.data());
/* Ensure that outputs are not all identical, as in this case test doesn't
* validate much */
ASSERT_NE(
*std::max_element(outputs.cbegin(), outputs.cend()),
*std::min_element(outputs.cbegin(), outputs.cend()));
for (size_t i = 0; i < inputs.size(); i++) {
ASSERT_EQ(uint32_t(referenceOutputs[i]), uint32_t(outputs[i]));
}
}
}
static inline int64_t shiftLeft(int64_t w, uint32_t n) {
return (int64_t)((uint64_t)w << n);
}
static inline double requantizeApproximate(
int32_t value,
float scale,
uint8_t zeroPoint,
uint8_t qmin,
uint8_t qmax) {
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
double clampedValue = double(value) * double(scale) + double(zeroPoint);
const double fmin = double(qmin);
if (clampedValue < fmin) {
clampedValue = fmin;
}
const double fmax = double(qmax);
if (clampedValue > fmax) {
clampedValue = fmax;
}
return clampedValue;
}
private:
size_t zeroPoint_{0};
size_t s_{1};
uint8_t qmin_{std::numeric_limits<uint8_t>::min()};
uint8_t qmax_{std::numeric_limits<uint8_t>::max()};
size_t iterations_{1};
};
| 14,479
| 29.7431
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/rmax-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
class RMaxMicrokernelTester {
public:
inline RMaxMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline RMaxMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_u8rmax_ukernel_function u8rmax) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
/* Compute reference results */
uint8_t yRef = 0;
for (size_t i = 0; i < n(); i++) {
yRef = std::max(yRef, x[i]);
}
/* Call optimized micro-kernel */
const uint8_t y = u8rmax(n(), x.data());
/* Verify results */
ASSERT_EQ(yRef, y) << "n = " << n();
}
}
private:
size_t n_{1};
size_t iterations_{15};
};
| 1,549
| 21.463768
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/sigmoid-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class SigmoidOperatorTester {
public:
inline SigmoidOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline SigmoidOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline SigmoidOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline SigmoidOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline SigmoidOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline SigmoidOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline float outputScale() const {
return 1.0f / 256.0f;
}
inline uint8_t outputZeroPoint() const {
return 0;
}
inline SigmoidOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline SigmoidOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline SigmoidOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
const float sigmoidX = 1.0f / (1.0f + exp(-x));
const float scaledSigmoidX = sigmoidX / outputScale();
float y = scaledSigmoidX;
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + int32_t(outputZeroPoint());
}
}
/* Create, setup, run, and destroy Sigmoid operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t sigmoidOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_sigmoid_nc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&sigmoidOp));
ASSERT_NE(nullptr, sigmoidOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_sigmoid_nc_q8(
sigmoidOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(sigmoidOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(sigmoidOp));
sigmoidOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.75f};
uint8_t inputZeroPoint_{121};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 5,667
| 25.362791
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/softargmax-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class SoftArgMaxOperatorTester {
public:
inline SoftArgMaxOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline SoftArgMaxOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline SoftArgMaxOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline SoftArgMaxOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline SoftArgMaxOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline SoftArgMaxOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline float outputScale() const {
return 1.0f / 256.0f;
}
inline uint8_t outputZeroPoint() const {
return 0;
}
inline SoftArgMaxOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
const int32_t maxInput = *std::max_element(
input.data() + i * inputStride(),
input.data() + i * inputStride() + channels());
float sumExp = 0.0f;
for (size_t c = 0; c < channels(); c++) {
sumExp +=
exp((int32_t(input[i * inputStride() + c]) - maxInput) *
inputScale());
}
for (size_t c = 0; c < channels(); c++) {
outputRef[i * channels() + c] =
exp((int32_t(input[i * inputStride() + c]) - maxInput) *
inputScale()) /
(sumExp * outputScale());
outputRef[i * channels() + c] =
std::min(outputRef[i * channels() + c], 255.0f);
}
}
/* Create, setup, run, and destroy SoftArgMax operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t softArgMaxOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_softargmax_nc_q8(
channels(),
inputScale(),
outputZeroPoint(),
outputScale(),
0,
&softArgMaxOp));
ASSERT_NE(nullptr, softArgMaxOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_softargmax_nc_q8(
softArgMaxOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(softArgMaxOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_delete_operator(softArgMaxOp));
softArgMaxOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.176080093};
uint8_t inputZeroPoint_{121};
size_t iterations_{15};
};
| 5,428
| 26.281407
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/tanh-operator-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class TanHOperatorTester {
public:
inline TanHOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline TanHOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline TanHOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline TanHOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline TanHOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline TanHOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline float outputScale() const {
return 1.0f / 128.0f;
}
inline uint8_t outputZeroPoint() const {
return 128;
}
inline TanHOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline TanHOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline TanHOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
const float tanhX = tanh(x);
const float scaledTanHX = tanhX / outputScale();
float y = scaledTanHX;
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + int32_t(outputZeroPoint());
}
}
/* Create, setup, run, and destroy TanH operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t tanhOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_tanh_nc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&tanhOp));
ASSERT_NE(nullptr, tanhOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_tanh_nc_q8(
tanhOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(tanhOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(tanhOp));
tanhOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.75f};
uint8_t inputZeroPoint_{121};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 5,581
| 24.962791
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/test_utils.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <gtest/gtest.h>
namespace qnnpack {
namespace testing {
enum class Mode {
Static,
Runtime,
};
#define _MAKE_TEST(TestClass, test_name, test_body, ...) \
TEST(TestClass, test_name) { \
test_body.testQ8(__VA_ARGS__); \
}
#define _STATIC_TEST(TestClass, test_name, test_body) \
_MAKE_TEST(TestClass, test_name##_static, test_body, Mode::Static)
#define _RUNTIME_TEST(TestClass, test_name, test_body) \
_MAKE_TEST(TestClass, test_name##_runtime, test_body, Mode::Runtime)
#define _STATIC_AND_RUNTIME_TEST(TestClass, test_name, test_body) \
_STATIC_TEST(TestClass, test_name, test_body) \
_RUNTIME_TEST(TestClass, test_name, test_body)
}} // namespace qnnpack::testing
| 1,037
| 27.833333
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/vadd-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
#include <qnnpack/requantization.h>
class VAddMicrokernelTester {
public:
inline VAddMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline VAddMicrokernelTester& inplaceA(bool inplaceA) {
this->inplaceA_ = inplaceA;
return *this;
}
inline bool inplaceA() const {
return this->inplaceA_;
}
inline VAddMicrokernelTester& inplaceB(bool inplaceB) {
this->inplaceB_ = inplaceB;
return *this;
}
inline bool inplaceB() const {
return this->inplaceB_;
}
inline VAddMicrokernelTester& aScale(float aScale) {
assert(aScale > 0.0f);
assert(std::isnormal(aScale));
this->aScale_ = aScale;
return *this;
}
inline float aScale() const {
return this->aScale_;
}
inline VAddMicrokernelTester& aZeroPoint(uint8_t aZeroPoint) {
this->aZeroPoint_ = aZeroPoint;
return *this;
}
inline uint8_t aZeroPoint() const {
return this->aZeroPoint_;
}
inline VAddMicrokernelTester& bScale(float bScale) {
assert(bScale > 0.0f);
assert(std::isnormal(bScale));
this->bScale_ = bScale;
return *this;
}
inline float bScale() const {
return this->bScale_;
}
inline VAddMicrokernelTester& bZeroPoint(uint8_t bZeroPoint) {
this->bZeroPoint_ = bZeroPoint;
return *this;
}
inline uint8_t bZeroPoint() const {
return this->bZeroPoint_;
}
inline VAddMicrokernelTester& yScale(float yScale) {
assert(yScale > 0.0f);
assert(std::isnormal(yScale));
this->yScale_ = yScale;
return *this;
}
inline float yScale() const {
return this->yScale_;
}
inline VAddMicrokernelTester& yZeroPoint(uint8_t yZeroPoint) {
this->yZeroPoint_ = yZeroPoint;
return *this;
}
inline uint8_t yZeroPoint() const {
return this->yZeroPoint_;
}
inline VAddMicrokernelTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline VAddMicrokernelTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline VAddMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_q8vadd_ukernel_function q8vadd) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> a(n());
std::vector<uint8_t> b(n());
std::vector<uint8_t> y(n());
std::vector<float> yFP(n());
std::vector<uint8_t> yRef(n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(a.begin(), a.end(), std::ref(u8rng));
std::generate(b.begin(), b.end(), std::ref(u8rng));
if (inplaceA() || inplaceB()) {
std::generate(y.begin(), y.end(), std::ref(u8rng));
} else {
std::fill(y.begin(), y.end(), 0xA5);
}
const uint8_t* aData = inplaceA() ? y.data() : a.data();
const uint8_t* bData = inplaceB() ? y.data() : b.data();
/* Prepare quantization parameters */
const union pytorch_qnnp_add_quantization_params quantizationParams =
pytorch_qnnp_compute_add_quantization_params(
aZeroPoint(),
bZeroPoint(),
yZeroPoint(),
aScale() / yScale(),
bScale() / yScale(),
qmin(),
qmax());
const union pytorch_qnnp_add_quantization_params
scalarQuantizationParams =
pytorch_qnnp_compute_scalar_add_quantization_params(
aZeroPoint(),
bZeroPoint(),
yZeroPoint(),
aScale() / yScale(),
bScale() / yScale(),
qmin(),
qmax());
/* Compute reference results */
for (size_t i = 0; i < n(); i++) {
yFP[i] = float(yZeroPoint()) +
float(int32_t(aData[i]) - int32_t(aZeroPoint())) *
(aScale() / yScale()) +
float(int32_t(bData[i]) - int32_t(bZeroPoint())) *
(bScale() / yScale());
yFP[i] = std::min<float>(yFP[i], float(qmax()));
yFP[i] = std::max<float>(yFP[i], float(qmin()));
yRef[i] = pytorch_qnnp_add_quantize(
aData[i], bData[i], scalarQuantizationParams);
}
/* Call optimized micro-kernel */
q8vadd(n(), aData, bData, y.data(), &quantizationParams);
/* Verify results */
for (size_t i = 0; i < n(); i++) {
ASSERT_LE(uint32_t(y[i]), uint32_t(qmax()))
<< "at " << i << ", n = " << n();
ASSERT_GE(uint32_t(y[i]), uint32_t(qmin()))
<< "at " << i << ", n = " << n();
ASSERT_NEAR(float(int32_t(y[i])), yFP[i], 0.6f)
<< "at " << i << ", n = " << n();
ASSERT_EQ(uint32_t(yRef[i]), uint32_t(y[i]))
<< "at " << i << ", n = " << n();
}
}
}
private:
size_t n_{1};
bool inplaceA_{false};
bool inplaceB_{false};
float aScale_{0.75f};
float bScale_{1.25f};
float yScale_{0.96875f};
uint8_t aZeroPoint_{121};
uint8_t bZeroPoint_{127};
uint8_t yZeroPoint_{133};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
| 5,868
| 25.084444
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/native/quantized/cpu/qnnpack/test/zip-microkernel-tester.h
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cstddef>
#include <cstdlib>
#include <algorithm>
#include <cfloat>
#include <cmath>
#include <functional>
#include <random>
#include <vector>
#include <qnnpack/params.h>
class ZipMicrokernelTester {
public:
inline ZipMicrokernelTester& n(size_t n) {
assert(n != 0);
this->n_ = n;
return *this;
}
inline size_t n() const {
return this->n_;
}
inline ZipMicrokernelTester& g(size_t g) {
assert(g != 0);
this->g_ = g;
return *this;
}
inline size_t g() const {
return this->g_;
}
inline ZipMicrokernelTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void test(pytorch_xzipc_ukernel_function xzip) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x(n() * g());
std::vector<uint8_t> y(g() * n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
/* Call optimized micro-kernel */
xzip(n(), x.data(), y.data());
/* Verify results */
for (size_t i = 0; i < n(); i++) {
for (size_t j = 0; j < g(); j++) {
ASSERT_EQ(uint32_t(y[i * g() + j]), uint32_t(x[j * n() + i]))
<< "at element " << i << ", group " << j;
}
}
}
}
void test(pytorch_xzipv_ukernel_function xzip) const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> x(n() * g());
std::vector<uint8_t> y(g() * n());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(x.begin(), x.end(), std::ref(u8rng));
std::fill(y.begin(), y.end(), 0xA5);
/* Call optimized micro-kernel */
xzip(n(), g(), x.data(), y.data());
/* Verify results */
for (size_t i = 0; i < n(); i++) {
for (size_t j = 0; j < g(); j++) {
ASSERT_EQ(uint32_t(y[i * g() + j]), uint32_t(x[j * n() + i]))
<< "at element " << i << ", group " << j;
}
}
}
}
private:
size_t n_{1};
size_t g_{1};
size_t iterations_{3};
};
| 2,671
| 23.513761
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/FlattenIndicesCommon.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/Dispatch.h>
#include <ATen/native/sparse/Macros.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/SparseTensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/tensor.h>
#endif
#ifdef GPUCC
#define NAME "flatten_indices_cuda"
#else
#define NAME "flatten_indices_cpu"
#endif
namespace at::native {
namespace {
template <template <typename func_t> class kernel_t>
struct KernelLauncher {
template <typename func_t>
static void launch(TensorIteratorBase& iter, const func_t& f) {
kernel_t<func_t>::launch(iter, f);
}
};
template <
template <typename func_t> class kernel_t,
typename index_t,
int64_t max_static_len = 0>
Tensor _flatten_indices_impl(const Tensor& indices, IntArrayRef size) {
TORCH_INTERNAL_ASSERT(indices.dim() > 1 && static_cast<size_t>(indices.size(0)) == size.size());
// Need owning storage in case of the Tensor class.
const auto hash_coeffs_storage = [&]() -> auto {
auto strides = c10::contiguous_strides(size);
return at::sparse::TensorGeometryHolder<max_static_len>(strides, strides, indices.options());
}();
const auto hash_coeffs = std::get<0>(*hash_coeffs_storage);
const auto hash_indices = [&]() -> Tensor {
// non-const because of gcc-5/clang-5 issues
auto sparse_dim = indices.size(0);
auto indices_dim_stride = indices.stride(0);
auto indices_nnz_stride = indices.stride(1);
auto hash = at::arange(indices.size(1), indices.options().dtype(kLong));
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_output(hash)
.add_input(hash)
.build();
{
const auto* RESTRICT ptr_indices = indices.data_ptr<index_t>();
KernelLauncher<kernel_t>::launch(iter,
// NOTE: capture by value required by CUDA
[=] FUNCAPI (int64_t nnz_idx) -> int64_t {
const auto* RESTRICT ptr_indices_dim = ptr_indices + nnz_idx * indices_nnz_stride;
auto hash = static_cast<int64_t>(0);
for (int64_t dim = 0; dim < sparse_dim; ++dim) {
const auto dim_hash_coeff = hash_coeffs[dim];
const auto dim_index = ptr_indices_dim[dim * indices_dim_stride];
hash += dim_index * dim_hash_coeff;
}
return hash;
});
}
return hash;
}();
return hash_indices;
}
template <template <typename func_t> class kernel_t>
Tensor _flatten_indices(const Tensor& indices, IntArrayRef size) {
TORCH_CHECK(indices.dim() > 1 && static_cast<size_t>(indices.size(0)) == size.size(),
NAME, "(): the dimensionality of sparse `indices` and the lenght of `size` must match. ",
"Got `indices.size(0) == ", indices.size(0), "` != `size.size() == ", size.size(), "`.");
Tensor flattened_indices;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), NAME, [&] () {
constexpr int64_t max_sparse_dims = 8;
if (indices.size(0) <= max_sparse_dims) {
flattened_indices = _flatten_indices_impl<kernel_t, index_t, max_sparse_dims>(indices, size);
} else {
flattened_indices = _flatten_indices_impl<kernel_t, index_t>(indices, size);
}
});
return flattened_indices;
}
}
} // at::native
| 3,349
| 30.308411
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/ParamUtils.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorUtils.h>
#include <tuple>
namespace at {
namespace native {
TORCH_API std::tuple<Tensor, Tensor, int64_t> softmax_sparse_input_preprocessing(
const Tensor& input_,
const int64_t dim_,
const bool half_to_float,
CheckedFrom function_name);
TORCH_API std::tuple<Tensor, Tensor, Tensor, int64_t> softmax_backward_sparse_input_preprocessing(
const Tensor& grad_,
const Tensor& output_,
int64_t dim_,
const Tensor& input_,
CheckedFrom function_name);
} // namespace native
} // namespace at
| 590
| 22.64
| 98
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/Dispatch.h>
#include <ATen/native/sparse/Macros.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/SparseTensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/result_type.h>
#endif
#ifdef GPUCC
#define NAME "sparse_binary_op_intersection_cuda"
#else
#define NAME "sparse_binary_op_intersection_cpu"
#endif
namespace at {
namespace native {
namespace {
using at::sparse::get_sparse_impl;
// ForwardIt: only legacy random access iterator is supported.
template<class ForwardIt, class T, bool is_lower = true>
static FUNCAPI INLINE
ForwardIt find_bound(ForwardIt first, ForwardIt last, const T& value) {
ForwardIt RESTRICT it;
typename std::iterator_traits<ForwardIt>::difference_type count, step;
// NOTE: std::distance(first, last) compiles but produces wrong results on CUDA,
// so only legacy random access iterators are safe in this code.
count = last - first;
while (count > 0) {
it = first;
step = count / 2;
// avoiding std::advance(it, step),
// although it does work unlike std::distance on CUDA.
it += step;
// The decision which separates finding a lower bound vs an upper bound.
// Note that a lower bound is a value at *it with the smallest index
// such that *it >= value if such value exists, or last if does not.
// Similarly, an upper bound is a value at *it with the smallest index
// such that *it > value if such value exists, or last if does not.
// Let is_lower = true and *it < value, then we know that *it and values
// preceeding *it cannot contain a lower bound, so we adjust initial iterator range
// from [first, first + count] to [first + step + 1, first + count - (step + 1)],
// where +1 skips the element at which we have just evaluated *it < value.
// Samilar logic holds when is_lower = false.
if (is_lower ? *it < value : value >= *it) {
first = ++it;
count -= step + 1;
}
else {
count = step;
}
}
return first;
}
template <template <typename func_t> class kernel_t>
struct KernelLauncher {
template <typename func_t>
static void launch(TensorIteratorBase& iter, const func_t& f) {
kernel_t<func_t>::launch(iter, f);
}
};
TensorIterator make_value_selection_intersection_iter(
const Tensor& lhs_values,
const Tensor& lhs_select_idx,
const Tensor& rhs_values,
const Tensor& rhs_select_idx,
const Tensor& intersection_counts) {
const auto res_values_sizes = [&]() -> std::vector<int64_t> {
auto sizes = infer_size(
// keep nnz dim
lhs_values.sizes(),
// remove nnz dim for smooth broadcasting
rhs_values.sizes().slice(1));
// update nnz dim to be the lenght of an index
sizes[0] = lhs_select_idx.numel();
return sizes;
}();
auto res_values = at::empty(res_values_sizes, lhs_values.options());
const auto restride_idx = [&res_values](const Tensor& idx) -> Tensor {
auto idx_sizes = std::vector<int64_t>(res_values.dim(), 1);
auto idx_strides = std::vector<int64_t>(res_values.dim(), 0);
idx_sizes[0] = idx.numel();
idx_strides[0] = 1;
return idx.as_strided(idx_sizes, idx_strides);
};
const auto restride_values = [&lhs_select_idx](const Tensor& values) -> Tensor {
auto values_sizes = at::DimVector(values.sizes());
auto values_strides = at::DimVector(values.strides());
values_sizes[0] = lhs_select_idx.numel();
values_strides[0] = 0;
return values.as_strided(values_sizes, values_strides);
};
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_owned_output(res_values)
.add_owned_input(restride_values(lhs_values))
.add_owned_input(restride_idx(lhs_select_idx))
.add_owned_input(restride_values(rhs_values))
.add_owned_input(restride_idx(rhs_select_idx))
.add_owned_input(restride_idx(intersection_counts))
.build();
return iter;
}
template <
template <typename func_t> class kernel_t,
typename value_selection_intersection_kernel_t,
typename index_t = int64_t,
int64_t max_static_len = 0>
void _sparse_binary_op_intersection_kernel_impl(
Tensor& res,
const Tensor& x_,
const Tensor& y_,
const std::vector<int64_t> broadcasted_shape,
const c10::optional<Tensor>& x_hash_opt_ = c10::nullopt,
const c10::optional<Tensor>& y_hash_opt_ = c10::nullopt,
const bool accumulate_matches = true,
const bool distributive_with_sum = true
) {
// The common dtype check is relevant when op is done in-place.
// This is because binary_of_t produces new values and it could be that
// new_values.dtype != res.dtype. In such a case we should error out
// as soon as possible to avoid redundant kernel runs.
const auto common_dtype = at::result_type(x_, y_);
TORCH_CHECK(canCast(common_dtype, res.scalar_type()),
"Can't convert result type ", common_dtype,
" to output ", res.scalar_type());
using KernelLauncher = KernelLauncher<kernel_t>;
using OptTensor = c10::optional<Tensor>;
// If the op and sum are not distributive, coalesce is required.
const auto coalesce_if_not_distributive = [distributive_with_sum](const Tensor& t, const OptTensor& t_hash_opt) -> auto {
// No need to coalesce in such a case.
if (distributive_with_sum) {
return std::make_tuple(t, t_hash_opt);
} else {
// Otherwise coalesce and force hash recompute.
return std::make_tuple(t.coalesce(), static_cast<OptTensor>(c10::nullopt));
}
};
Tensor x, y;
OptTensor x_hash_opt, y_hash_opt;
std::tie(x, x_hash_opt) = coalesce_if_not_distributive(x_, x_hash_opt_);
std::tie(y, y_hash_opt) = coalesce_if_not_distributive(y_, y_hash_opt_);
// Given sparse tensors x and y we decide which one is source, and which one
// is probably_coalesced. The indices of both source and probably_coalesced are
// hashed and then the hash values of the source's indices are binary-searched
// into the hash values of the probably_coalesced's indices.
// If probably_coalesce is coalesced, by the property of the hashing method
// (see below), the hash values are already sorted and we can avoid any
// explicit sorting routines.
Tensor probably_coalesced, source;
OptTensor probably_coalesced_indices_hash_opt, source_indices_hash_opt;
std::tie(probably_coalesced, probably_coalesced_indices_hash_opt, source, source_indices_hash_opt) = [&]() -> auto {
// Case 1: either x or y is coalesced.
if ((x.is_coalesced() ^ y.is_coalesced())) {
return x.is_coalesced()
? std::make_tuple(x, x_hash_opt, y, y_hash_opt)
: std::make_tuple(y, y_hash_opt, x, x_hash_opt);
}
// Case 2: Both x and y are either coalesced or non-coalesced.
// If both are coalesced, search into the larger tensor is faster.
// Same holds when both are non-coalesced.
else {
Tensor larger, smaller;
OptTensor larger_hash_opt, smaller_hash_opt;
std::tie(larger, larger_hash_opt, smaller, smaller_hash_opt) = [&]() -> auto {
return x._nnz() >= y._nnz()
? std::make_tuple(x, x_hash_opt, y, y_hash_opt)
: std::make_tuple(y, y_hash_opt, x, x_hash_opt);
}();
// If under a uniform distribution it is likely to hit many elements in larger,
// it is best to coalesce it for better performance.
const auto larger_sizes = larger.sizes();
const auto sparse_dim_numel = std::accumulate(
larger_sizes.begin(),
larger_sizes.begin() + larger.sparse_dim(),
1,
std::multiplies<int64_t>());
// If nnz > prod(larger.shape[:sparse_dim]), by the pidgeonhole principle,
// there is at least one bucket with nnz / prod(larger.shape[:sparse_dim]) elements.
// It provides a lower bound for the max count in the intersection.
// This condition is very conservative as we do not check whether such an event
// actually occurred, although it is very likely under a uniform distribution,
// the distribution with the highest uncertainty (maximizes entropy).
const auto max_count_lower_bound = larger._nnz() / sparse_dim_numel;
constexpr int64_t MAX_COPIES_PER_THREAD = 50;
return max_count_lower_bound > MAX_COPIES_PER_THREAD
// coalesce invalidates hash values, so force-recompute
? std::make_tuple(larger.coalesce(), static_cast<OptTensor>(c10::nullopt), smaller, smaller_hash_opt)
: std::make_tuple(larger, larger_hash_opt, smaller, smaller_hash_opt);
}
}();
// The employed hash function maps a d-dim index to a linear offset
// into a contiguous memory that is sufficient to fit a dense tensor
// of shape broadcasted_shape(x.shape, y.shape), i.e.
// idx -> \sum_{i = 0}^d idx[i] * hash_coeffs[i], where
// hash_coeffs are the strides of a contiguous tensor of shape
// broadcasted_shape(x.shape, y.shape).
// Assuming the following order on the dimensions, i.e. the right-most dim is the
// fastest-changing dim, and the left-most is the slowest-changing dim,
// which is implicit in the definition of hash_coeffs,
// it could be shown that the hash function is actually bijective and, hence,
// is a perfect hash function (no collisions ever).
// Need owning storage in case of the Tensor class.
const auto hash_coeffs_storage = [&]() -> auto {
const auto broadcasted_sparse_dim_shape = std::vector<int64_t>(
broadcasted_shape.begin(),
broadcasted_shape.begin() + probably_coalesced.sparse_dim()
);
auto strides = c10::contiguous_strides(broadcasted_sparse_dim_shape);
return at::sparse::TensorGeometryHolder<max_static_len>(strides, strides, probably_coalesced.options());
}();
const auto hash_coeffs = std::get<0>(*hash_coeffs_storage);
const auto nnz_arange = at::arange(
std::max(probably_coalesced._nnz(), source._nnz()),
source._indices().options());
const auto probably_coalesced_nnz_arange = nnz_arange.narrow(-1, 0, probably_coalesced._nnz());
// non-const because of gcc-5/clang-5 issues
auto sparse_dim = probably_coalesced.sparse_dim();
// Apply the hash function to probably_coalesced.indices
const auto probably_coalesced_indices_hash = [&]() -> Tensor {
// probably_coalesced is coalesced and hash provided? Reuse it!
if (probably_coalesced_indices_hash_opt.has_value()) {
return (*probably_coalesced_indices_hash_opt).contiguous();
}
const auto indices = probably_coalesced._indices();
// non-const because of gcc-5/clang-5 issues
auto indices_dim_stride = indices.stride(0);
auto indices_nnz_stride = indices.stride(1);
auto hash = at::empty({probably_coalesced._nnz()}, indices.options().dtype(kLong));
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(hash)
.add_input(probably_coalesced_nnz_arange)
.build();
{
const auto* RESTRICT ptr_indices = indices.data_ptr<index_t>();
KernelLauncher::launch(iter,
// NOTE: capture by value required by CUDA
[=] FUNCAPI (index_t nnz_idx) -> int64_t {
const auto* RESTRICT ptr_indices_dim = ptr_indices + nnz_idx * indices_nnz_stride;
int64_t hash = 0;
for (int64_t dim = 0; dim < sparse_dim; ++dim) {
const auto dim_hash_coeff = hash_coeffs[dim];
const auto dim_index = ptr_indices_dim[dim * indices_dim_stride];
hash += dim_index * dim_hash_coeff;
}
return hash;
});
}
return hash;
}();
// Now that we have hash values of probably_coalesced.indices,
// we need to decide whether they need to get sorted.
// The sort is not requires if probably_coalesced is coalesced.
Tensor sorted_hash, argsort_hash;
std::tie(sorted_hash, argsort_hash) = [&]() -> std::tuple<Tensor, Tensor> {
if (probably_coalesced.is_coalesced()) {
// NOTE: argsort.dtype == nnz_arange.dtype
const auto argsort = nnz_arange.narrow(-1, 0, probably_coalesced._nnz());
return std::make_tuple(probably_coalesced_indices_hash, argsort);
}
else {
// NOTE: we want argsort.dtype == nnz_arange.dtype,
// but sort() produces indices of type int64_t,
// so we convert to nnz_arange.dtype to avoid issues
// with pointer types in the kernels below.
Tensor sorted, argsort;
std::tie(sorted, argsort) = probably_coalesced_indices_hash.sort();
return std::make_tuple(sorted, argsort.to(nnz_arange.scalar_type()));
}
}();
// Perform hash intersection.
// Let s_hash = hash(source.indices),
// pc_hash = hash(probably_coalesced.indices), then
// for i = 0, ..., len(s_hash) - 1:
// lb = <index of a value in pc_hash[argsort_hash] which is a lower bound for s_hash[i]>,
// up = <index of a value in pc_hash[argsort_hash] which is an upper bound for s_hash[i]>,
// intersection_count[i] = up - lb
// intersection_first_idx[i] = lb.
//
// intersection_count and intersection_first_idx are used to form indices at which
// intersection values are selected.
Tensor intersection_count, intersection_first_idx;
std::tie(intersection_count, intersection_first_idx) = [&]() -> std::tuple<Tensor, Tensor> {
const auto source_nnz = source._nnz();
auto intersection_buffer = at::empty({2, source_nnz}, sorted_hash.options());
auto intersection_count = intersection_buffer.select(0, 0);
auto intersection_first_idx = intersection_buffer.select(0, 1);
const auto source_indices = source._indices();
const auto source_arange = nnz_arange.narrow(-1, 0, source_nnz);
// non-const because of gcc-5/clang-5 issues
auto indices_dim_stride = source_indices.stride(0);
auto indices_nnz_stride = source_indices.stride(1);
auto dummy = at::empty({1}, source_arange.options());
auto hash = source_indices_hash_opt.has_value()
? (*source_indices_hash_opt).contiguous()
: at::empty({0}, probably_coalesced._indices().options().dtype(kLong));
const auto* RESTRICT hash_ptr = source_indices_hash_opt.has_value()
? hash.data_ptr<int64_t>()
: nullptr;
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_owned_output(dummy.expand_as(source_arange))
.add_input(source_arange)
.build();
{
const auto* RESTRICT ptr_indices = source_indices.data_ptr<index_t>();
const auto* RESTRICT ptr_sorted_hash = sorted_hash.data_ptr<int64_t>();
const auto sorted_hash_len = sorted_hash.numel();
auto* RESTRICT ptr_intersection_count = intersection_count.data_ptr<int64_t>();
auto* RESTRICT ptr_intersection_first_idx = intersection_first_idx.data_ptr<int64_t>();
// Fusing hash computation with hash intersection.
KernelLauncher::launch(iter,
// NOTE: capture by value required by CUDA
[=] FUNCAPI (index_t nnz_idx) -> index_t {
// Compute hash value
const auto* RESTRICT ptr_indices_dim = ptr_indices + nnz_idx * indices_nnz_stride;
int64_t hash = 0;
if (hash_ptr) {
hash = hash_ptr[nnz_idx];
} else {
for (int64_t dim = 0; dim < sparse_dim; ++dim) {
const auto dim_hash_coeff = hash_coeffs[dim];
const auto dim_index = ptr_indices_dim[dim * indices_dim_stride];
hash += dim_index * dim_hash_coeff;
}
}
// Perform hash values intersection
const auto* RESTRICT lb = find_bound<const int64_t*, int64_t, /*is_lower=*/true>(
ptr_sorted_hash,
ptr_sorted_hash + sorted_hash_len,
hash
);
const auto* RESTRICT ub = find_bound<const int64_t*, int64_t, /*is_lower=*/false>(
ptr_sorted_hash,
ptr_sorted_hash + sorted_hash_len,
hash
);
ptr_intersection_count[nnz_idx] = ub - lb;
ptr_intersection_first_idx[nnz_idx] = lb - ptr_sorted_hash;
return 0;
});
}
return std::make_tuple(intersection_count, intersection_first_idx);
}();
const auto res_indices = source._indices().clone();
const auto binary_op_res_dtype = at::result_type(source._values(), probably_coalesced._values());
const auto res_values = value_selection_intersection_kernel_t::apply(
source._values().to(binary_op_res_dtype),
nnz_arange.narrow(-1, 0, source._nnz()),
probably_coalesced._values().to(binary_op_res_dtype),
intersection_first_idx.to(nnz_arange.scalar_type()),
intersection_count,
argsort_hash,
accumulate_matches).to(res.scalar_type());
const auto res_sparse_dim = source.sparse_dim();
const auto res_dense_dim = source.dense_dim();
const auto& res_shape = broadcasted_shape;
const auto res_nnz = source._nnz();
auto* res_sparse_impl = get_sparse_impl(res);
res_sparse_impl->raw_resize_(res_sparse_dim, res_dense_dim, res_shape);
res_sparse_impl->set_indices_and_values_unsafe(res_indices, res_values);
res_sparse_impl->set_nnz_and_narrow(res_nnz);
res._coalesced_(source.is_coalesced());
}
template <
template <typename func_t> class kernel_t,
typename value_selection_intersection_kernel_t>
void _sparse_binary_op_intersection_kernel_out(
Tensor& res,
const Tensor& x,
const Tensor& y,
const c10::optional<Tensor>& x_hash_opt = c10::nullopt,
const c10::optional<Tensor>& y_hash_opt = c10::nullopt,
// If op distributes with the sum, the arguments are processed as is,
// without the calls to coalesce().
const bool distributive_with_sum = true
) {
TORCH_CHECK(
(x.is_sparse() && y.is_sparse())
&& (x.dim() == y.dim()) && (x.sparse_dim() == y.sparse_dim())
&& (x.sizes().slice(0, x.sparse_dim()) == y.sizes().slice(0, y.sparse_dim())),
NAME, "(): expects sparse inputs with equal dimensionality, ",
"number of sparse dimensions, and shape of sparse dimensions");
TORCH_CHECK(
x._indices().scalar_type() == y._indices().scalar_type(),
NAME, "(): expects inputs' indices to be of the same dtype (i.e. long or int)");
const auto check_hash_validity = [](const Tensor& t, const c10::optional<Tensor>& t_hash_opt) {
if (!t_hash_opt.has_value()) {
return;
}
const auto t_hash = *t_hash_opt;
TORCH_INTERNAL_ASSERT(
t_hash.dim() == 1 && t_hash.scalar_type() == kLong && t_hash.size(-1) == t._indices().size(-1),
NAME, "(): explicit hash values need to be a 1-dim Long tensor with the ",
"NSE matching that of the corresponding sparse tensor.");
};
check_hash_validity(x, x_hash_opt);
check_hash_validity(y, y_hash_opt);
const auto broadcasted_shape = infer_size(x.sizes(), y.sizes());
// 8 sparse dims should be more than enough?
constexpr int64_t max_sparse_dims = 8;
// COO indices are only 64-bit integers for now.
using index_t = int64_t;
if (max_sparse_dims > x.sparse_dim()) {
_sparse_binary_op_intersection_kernel_impl<
// For some reason MSVC complaints about passing constexpr max_sparse_dims
// as a template parameter claiming as if it is not know at compile time.
kernel_t, value_selection_intersection_kernel_t, index_t, 8>(
res, x, y, broadcasted_shape, x_hash_opt, y_hash_opt, distributive_with_sum);
} else {
_sparse_binary_op_intersection_kernel_impl<
kernel_t, value_selection_intersection_kernel_t, index_t>(
res, x, y, broadcasted_shape, x_hash_opt, y_hash_opt, distributive_with_sum);
}
}
} // anonymous namespace
}} // at::native
| 19,963
| 40.333333
| 123
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/SparseCsrTensorMath.h
|
#pragma once
#include <ATen/Tensor.h>
#include <ATen/core/Scalar.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/ReductionType.h>
#include <ATen/native/cpu/SpmmReduceKernel.h>
namespace at {
namespace native {
namespace sparse {
namespace impl {
// Returns true if all entries of self are zero
// TODO: This has potential to be a generic helper
inline bool _is_sparse_and_zero(const Tensor& self) {
if (self.layout() == kSparse || self.layout() == kSparseCsr ||
self.layout() == kSparseCsc || self.layout() == kSparseBsr ||
self.layout() == kSparseBsc) {
if (self._nnz() == 0) {
return true;
}
}
return false;
}
inline void _check_is_cpu(const Tensor& self, c10::string_view name) {
TORCH_CHECK(
self.is_cpu(),
"Expected all tensors to be on the same device. addmm expected '",
name,
"' to be CPU tensor, but got ",
self.device(),
" tensor");
}
inline void _check_is_cuda(const Tensor& self, c10::string_view name) {
TORCH_CHECK(
self.is_cuda(),
"Expected all tensors to be on the same device. addmm expected '",
name,
"' to be CUDA tensor, but got ",
self.device(),
" tensor");
}
inline void _check_dim(const Tensor& self, int64_t target_dim, c10::string_view name) {
if (target_dim == 2) {
TORCH_CHECK(
self.dim() == target_dim,
name, " must be a matrix, ",
"got ", self.dim(), "-D tensor");
}
TORCH_CHECK(
self.dim() == target_dim,
"Expected ",
name,
" to be of dimension ",
target_dim,
" but got ",
self.dim(),
" instead.");
}
template <bool train>
inline void check_sparse_mm_reduce_impl_inputs(
const Tensor& self,
const Tensor& grad_out,
const Tensor& other) {
TORCH_INTERNAL_ASSERT(self.is_sparse_csr());
const auto input_scalar_type = self.values().scalar_type();
CheckedFrom c = train ? "sparse_mm_reduce_backward" : "sparse_mm_reduce";
if (train) {
checkLayout(c, grad_out, kStrided);
checkScalarType(c, {grad_out, "grad_out", 1}, input_scalar_type);
check_dim_size(grad_out, 2, 0, self.size(0));
check_dim_size(grad_out, 2, 1, other.size(1));
}
int pos = train ? 2 : 1;
checkLayout(c, other, kStrided);
checkScalarType(c, {other, "other", pos}, input_scalar_type);
check_dim_size(other, 2, 0, self.size(1));
}
}
}
}
}
| 2,383
| 25.197802
| 87
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/SparseTensorMath.h
|
#pragma once
#include <ATen/native/SparseTensorUtils.h>
namespace at { namespace native {
TORCH_API sparse::SparseTensor& mul_out_sparse_scalar(sparse::SparseTensor& r, const sparse::SparseTensor& t, const Scalar& value);
TORCH_API sparse::SparseTensor& mul_out_sparse_zerodim(sparse::SparseTensor& r, const sparse::SparseTensor& t, const Tensor& value);
TORCH_API sparse::SparseTensor& _mul_dense_sparse_out(const Tensor& d, const Tensor& s, Tensor& res);
TORCH_API sparse::SparseTensor& _mul_sparse_sparse_zero_dim_out(const Tensor& zero_dim, const Tensor& other, Tensor& res);
TORCH_API sparse::SparseTensor& _mul_sparse_sparse_out(const Tensor& x, const Tensor& y, Tensor& res);
}}
| 690
| 48.357143
| 132
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
|
#pragma once
#include <ATen/Dispatch.h>
#include <ATen/Tensor.h>
#include <ATen/Utils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/sparse/Macros.h>
#include <ATen/native/SparseTensorUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/tensor.h>
#endif
#ifdef GPUCC
#define NAME "compressed_index_invariance_checks_cuda"
#else
#define NAME "compressed_index_invariance_checks_cpu"
#endif
#define INVARIANT_CHECK_FUNC_API static INLINE FUNCAPI void
namespace at {
namespace native {
namespace {
// NOTE: all the checks but the very last one are designed
// to work with vectors.
// To enable vectorization one would need to write a conversion
// Vec -> bool and make kernel launchers call into vectorized
// execution paths.
// All the invariants are described in
// https://pearu.github.io/bsr_tensor_invariants.html NOTE: in the code we also
// use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively.
INVARIANT_CHECK_FUNC_API
_assert(const bool cond, const char* const message) {
#ifdef GPUCC
CUDA_KERNEL_ASSERT(cond && message);
#else
TORCH_CHECK(cond, message);
#endif
}
enum class CDimName : bool { CRow, CCol };
// Invariant 5.1
// compressed_index[..., 0] == 0.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero(
const index_t& cidx,
const index_t& zero) {
const bool invariant = cidx == zero;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied.");
} else {
_assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied.");
}
}
// Invariant 5.2
// compressed_index[..., -1] == nnz.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz(
const index_t& cidx,
const index_t& nnz) {
const bool invariant = cidx == nnz;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied.");
} else {
_assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied.");
}
}
// Invariant 5.3
// 0 <= compressed_indices[..., 1:] - compressed_indices[..., :-1] <= plain_dim.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_locally_bounded_sequence(
const index_t& cidx,
const index_t& cidx_next,
const index_t& zero,
const index_t& dim) {
const auto s_cidx = cidx_next - cidx;
const bool invariant = zero <= s_cidx && s_cidx <= dim;
if (cdim_name == CDimName::CRow) {
_assert(
invariant,
"`0 <= crow_indices[..., 1:] - crow_indices[..., :-1] <= ncols` is not satisfied.");
} else {
_assert(
invariant,
"`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied.");
}
}
// Invariants 5.4 and 5.5
// 0 <= plain_index < plain_dim.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_idx_bounds(
const index_t& idx,
const index_t& zero,
const index_t& dim) {
const bool invariant = zero <= idx && idx < dim;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`0 <= col_indices < ncols` is not satisfied.");
} else {
_assert(invariant, "`0 <= row_indices < nrows` is not satisfied.");
}
}
// Invariant 5.6
// plain_indices[..., compressed_indices[..., i - 1]:compressed_indices[..., i]]
// for all i = 1, ..., compressed_dim
// are sorted and distinct along the last dimension values.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_vals_slices_with_cidx(
const index_t* RESTRICT ptr_idx_batch,
const index_t cidx,
const index_t cidx_next) {
// Note that ptr_idx_batch = &idx[batch_idx] and is contiguous.
const auto* RESTRICT slice_begin = ptr_idx_batch + cidx;
const auto* RESTRICT slice_end = ptr_idx_batch + cidx_next;
for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) {
const auto invariant = *(curr - 1) < *curr;
if (cdim_name == CDimName::CRow) {
_assert(
invariant,
"`col_indices[..., crow_indices[..., i - 1]:crow_indices[..., i]] "
"for all i = 1, ..., nrows "
"are sorted and distinct along the last dimension values` "
"is not satisfied.");
} else {
_assert(
invariant,
"`row_indices[..., ccol_indices[..., i - 1]:ccol_indices[..., i]] "
"for all i = 1, ..., ncols "
"are sorted and distinct along the last dimension values` "
"is not satisfied.");
}
}
}
static inline int64_t indexCount(IntArrayRef sizes) {
int64_t res = 1;
for (const auto& s : sizes) {
res *= s;
}
return res;
}
template <typename func_t, typename vec_func_t>
struct EmptyVecKernel {
static void launch(
TensorIteratorBase& iter,
const func_t& f,
const vec_func_t& vec_f) {}
};
template <typename scalar_t>
using DummyVec = scalar_t;
template <
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t>
struct KernelLauncher {
template <typename func_t, typename vec_func_t>
static void launch(
TensorIteratorBase& iter,
const func_t& f,
const vec_func_t& vec_f) {
vec_kernel_t<func_t, vec_func_t>::launch(iter, f, vec_f);
}
template <typename func_t>
static void launch(TensorIteratorBase& iter, const func_t& f) {
kernel_t<func_t>::launch(iter, f);
}
};
template <
CDimName cdim_name,
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t = EmptyVecKernel,
template <typename scalar_t> class Vec = DummyVec,
size_t static_shape_max_len = 0>
void _validate_compressed_sparse_indices_kernel(
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
if (cdim_name == CDimName::CRow) {
TORCH_CHECK(
cidx.size(-1) == cdim + 1,
"crow_indices have wrong shape: ",
"crow_indices.shape[-1] = ",
cidx.size(-1),
" is not equal to ",
"nrows + 1 = ",
cdim + 1);
TORCH_CHECK(
idx.size(-1) == nnz,
"col_indices have wrong shape: ",
"col_indices.shape[-1] = ",
idx.size(-1),
" is not equal to ",
"nnz = ",
nnz);
} else {
TORCH_CHECK(
cidx.size(-1) == cdim + 1,
"ccol_indices have wrong shape: ",
"ccol_indices.shape[-1] = ",
cidx.size(-1),
" is not equal to ",
"ncols + 1 = ",
cdim + 1);
TORCH_CHECK(
idx.size(-1) == nnz,
"row_indices have wrong shape: ",
"row_indices.shape[-1] = ",
idx.size(-1),
" is not equal to ",
"nnz = ",
nnz);
}
using KernelLauncher = KernelLauncher<kernel_t, vec_kernel_t>;
// For TensorIterator's output: no void lambdas.
const auto dummy = at::empty({1}, cidx.options());
// Catch integer overflow from large dimensions. Otherwise, the
// invariant checks may fail with bogus exceptions or succeed with
// false-positive results when int64_t typed dimensions are cast to
// index dtype that corresponds to smaller interger type such as
// int32_t.
{
AT_DISPATCH_INDEX_TYPES(idx.scalar_type(), NAME, [cdim, dim, nnz]() {
if (cdim_name == CDimName::CRow) {
TORCH_CHECK(static_cast<int64_t>(static_cast<index_t>(dim)) == dim,
sizeof(index_t) * 8, "-bit integer overflow in column dimension = ", dim);
TORCH_CHECK(static_cast<int64_t>(static_cast<index_t>(cdim)) == cdim,
sizeof(index_t) * 8, "-bit integer overflow in row dimension = ", cdim);
} else {
TORCH_CHECK(static_cast<int64_t>(static_cast<index_t>(dim)) == dim,
sizeof(index_t) * 8, "-bit integer overflow in row dimension = ", dim);
TORCH_CHECK(static_cast<int64_t>(static_cast<index_t>(cdim)) == cdim,
sizeof(index_t) * 8, "-bit integer overflow in column dimension = ", cdim);
}
TORCH_CHECK(static_cast<int64_t>(static_cast<index_t>(nnz)) == nnz,
sizeof(index_t) * 8, "-bit integer overflow in nnz = ", nnz);
});
}
// Invariants 5.4 and 5.5
{
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_owned_output(dummy.expand_as(idx))
.add_input(idx)
.build();
AT_DISPATCH_INDEX_TYPES(idx.scalar_type(), NAME, [&iter, dim]() {
const auto zero = index_t{0};
KernelLauncher::launch(iter, [zero, dim] FUNCAPI(index_t idx) -> index_t {
_check_idx_bounds<cdim_name, index_t>(idx, zero, dim);
return 0;
});
});
}
// Invariants 5.1, 5.2, 5.3, 5.6
{
const auto cidx_first = cidx.slice(-1, 0, 1);
const auto cidx_last = cidx.slice(-1, cdim, cdim + 1);
const auto cidx_curr = cidx.slice(-1, 0, cdim);
const auto cidx_next = cidx.slice(-1, 1, cdim + 1);
const auto batch_dims = cidx.sizes().slice(0, cidx.dim() - 1);
const auto batch_count = indexCount(batch_dims);
const auto batch_idx =
at::arange(batch_count, cidx.options()).view(batch_dims).unsqueeze_(-1);
const auto idx_ndims = idx.dim();
const auto idx_geometry_holder = at::sparse::TensorGeometryHolder<static_shape_max_len>(idx);
const auto idx_sizes = std::get<0>(*idx_geometry_holder);
const auto idx_strides = std::get<1>(*idx_geometry_holder);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_owned_output(dummy.expand_as(cidx_curr))
.add_input(cidx_first)
.add_input(cidx_last)
.add_input(cidx_curr)
.add_input(cidx_next)
.add_input(batch_idx)
.build();
AT_DISPATCH_INDEX_TYPES(
idx.scalar_type(),
NAME,
[&iter, &idx, dim, nnz, idx_ndims, &idx_sizes, &idx_strides]() {
const auto* RESTRICT ptr_idx = idx.data_ptr<index_t>();
const auto zero = index_t{0};
KernelLauncher::launch(
iter,
[zero, dim, nnz, idx_ndims, idx_sizes, idx_strides, ptr_idx] FUNCAPI(
index_t cidx_first,
index_t cidx_last,
index_t cidx_curr,
index_t cidx_next,
index_t batch_idx) -> index_t {
// Invariant 5.1
_check_first_cidx_is_zero<cdim_name, index_t>(cidx_first, zero);
// Invariant 5.2
_check_last_cidx_is_nnz<cdim_name, index_t>(cidx_last, nnz);
// Invariant 5.3
_check_cidx_nondecreasing_locally_bounded_sequence<
cdim_name,
index_t>(cidx_curr, cidx_next, zero, dim);
// Invariant 5.6
// NOTE: the implementation below is sync-less, but,
// unfortunately, work is not guaranteed to be well-balanced
// between different threads.
int64_t idx_offset = 0;
// assuming idx contiguity per batch:
int64_t tmp = batch_idx * idx_sizes[idx_ndims - 1];
for (int i = idx_ndims - 1; i >= 0; i--) {
int64_t div = tmp / idx_sizes[i];
idx_offset += (tmp - div * idx_sizes[i]) * idx_strides[i];
tmp = div;
}
const auto* RESTRICT ptr_idx_batch = ptr_idx + idx_offset;
_check_idx_sorted_distinct_vals_slices_with_cidx<
cdim_name,
index_t>(ptr_idx_batch, cidx_curr, cidx_next);
return 0;
});
});
}
}
template <
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t = EmptyVecKernel,
template <typename scalar_t> class Vec = DummyVec>
void validate_compressed_sparse_indices_kernel(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
constexpr size_t idx_max_ndims = 8; // up to 7-dim batch.
const size_t idx_ndims = static_cast<size_t>(idx.dim());
if (is_crow) {
if (idx_ndims <= idx_max_ndims) {
_validate_compressed_sparse_indices_kernel<
CDimName::CRow,
kernel_t,
vec_kernel_t,
Vec,
idx_max_ndims>(cidx, idx, cdim, dim, nnz);
}
else {
_validate_compressed_sparse_indices_kernel<
CDimName::CRow,
kernel_t,
vec_kernel_t,
Vec>(cidx, idx, cdim, dim, nnz);
}
} else {
if (idx_ndims <= idx_max_ndims) {
_validate_compressed_sparse_indices_kernel<
CDimName::CCol,
kernel_t,
vec_kernel_t,
Vec,
idx_max_ndims>(cidx, idx, cdim, dim, nnz);
}
else {
_validate_compressed_sparse_indices_kernel<
CDimName::CCol,
kernel_t,
vec_kernel_t,
Vec>(cidx, idx, cdim, dim, nnz);
}
}
}
} // namespace
} // namespace native
} // namespace at
| 13,542
| 31.951338
| 97
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/cuda/SparseBlasImpl.h
|
#pragma once
#include <ATen/SparseCsrTensorUtils.h>
#include <ATen/Tensor.h>
#include <ATen/core/Scalar.h>
namespace at {
namespace native {
namespace sparse {
namespace impl {
namespace cuda {
void addmm_out_sparse_csr(
const at::sparse_csr::SparseCsrTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
const Tensor& result);
void addmv_out_sparse_csr(
const at::sparse_csr::SparseCsrTensor& mat,
const Tensor& vec,
const Scalar& beta,
const Scalar& alpha,
const Tensor& result);
void add_out_sparse_csr(
const at::sparse_csr::SparseCsrTensor& mat1,
const at::sparse_csr::SparseCsrTensor& mat2,
const Scalar& alpha,
const Scalar& beta,
const at::sparse_csr::SparseCsrTensor& result);
void triangular_solve_out_sparse_csr(
const at::sparse_csr::SparseCsrTensor& A,
const Tensor& B,
const Tensor& X,
bool upper,
bool transpose,
bool unitriangular);
void sampled_addmm_out_sparse_csr(
const Tensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
const at::sparse_csr::SparseCsrTensor& result);
} // namespace cuda
} // namespace impl
} // namespace sparse
} // namespace native
} // namespace at
| 1,249
| 22.148148
| 51
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/cuda/cutlass/default_epilogue_tensor_op_row_broadcast.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/array.h>
#include <cutlass/platform/platform.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/epilogue/thread/linear_combination.h>
#include <cutlass/epilogue/thread/linear_combination_clamp.h>
#include <cutlass/epilogue/thread/linear_combination_relu.h>
#include <cutlass/epilogue/thread/linear_combination_relu0.h>
#include <cutlass/epilogue/thread/linear_combination_gelu.h>
#include <cutlass/epilogue/thread/linear_combination_sigmoid.h>
#include <cutlass/epilogue/thread/linear_combination_hardswish.h>
#include <cutlass/epilogue/thread/linear_combination_planar_complex.h>
#include <cutlass/epilogue/thread/conversion_op.h>
#include <cutlass/epilogue/thread/reduction_op.h>
#include <cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h>
#include <cutlass/epilogue/warp/fragment_iterator_tensor_op.h>
#include <cutlass/epilogue/warp/fragment_iterator_complex_tensor_op.h>
#include <cutlass/epilogue/warp/tile_iterator_tensor_op.h>
#include <cutlass/epilogue/warp/tile_iterator_tensor_op_mixed.h>
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_thread_map_tensor_op.h>
#include <ATen/native/sparse/cuda/cutlass/predicated_tile_iterator_row_broadcast.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_strided_dgrad.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_affine.h>
#include <cutlass/epilogue/threadblock/shared_load_iterator.h>
#include <cutlass/epilogue/threadblock/shared_load_iterator_mixed.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <cutlass/epilogue/threadblock/interleaved_epilogue.h>
#include <cutlass/layout/permute.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <
typename Shape_,
typename WarpMmaTensorOp_,
int PartitionsK,
typename OutputOp_,
int ElementsPerAccess,
bool ScatterD = false,
typename PermuteDLayout = layout::NoPermute
>
struct DefaultEpilogueTensorOpRowBroadcast {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static int const kPartitionsK = PartitionsK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutC = typename WarpMmaTensorOp::LayoutC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape,
typename WarpMmaTensorOp::Shape,
kPartitionsK,
ElementOutput,
kElementsPerAccess
>::Type;
static bool const UseCUDAStore = platform::is_same<ElementOutput, double>::value;
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorRowBroadcast<
OutputTileThreadMap,
ElementOutput,
ScatterD,
PermuteDLayout,
UseCUDAStore
>;
using AccumulatorFragmentIterator = typename platform::conditional<is_complex<ElementOutput>::value,
cutlass::epilogue::warp::FragmentIteratorComplexTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC>,
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
LayoutC> >::type;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput,
ElementAccumulator,
kElementsPerAccess,
Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap
>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
static int const kFragmentsPerIteration = (kPartitionsK == 1 ? DefaultIterators::kFragmentsPerIteration : 1);
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::Epilogue<
Shape,
WarpMmaTensorOp,
kPartitionsK,
OutputTileIterator,
AccumulatorFragmentIterator,
WarpTileIterator,
SharedLoadIterator,
OutputOp,
Padding,
kFragmentsPerIteration
>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 7,731
| 39.910053
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/cuda/cutlass/default_gemm_sparse_row_broadcast.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief
Default kernel-level GEMM definitions combine threadblock-scoped matrix multiply-add with
the appropriate threadblock-scoped epilogue.
Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are
accommodated by exchanging A and B operands and assuming transposed layouts. Partial
specializations here choose 'device::GemmTransposed' to implement this functionality.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/numeric_types.h>
#include <cutlass/arch/wmma.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <cutlass/epilogue/thread/linear_combination.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/gemm/kernel/gemm.h>
#include <ATen/native/sparse/cuda/cutlass/sparse_gemm_row_broadcast.h>
#include <cutlass/gemm/kernel/gemm_pipelined.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm75.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm70.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm80.h>
#include <cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h>
#include <cutlass/gemm/threadblock/default_sparse_mma.h>
#include <cutlass/gemm/threadblock/default_mma_core_simt.h>
#include <cutlass/gemm/threadblock/threadblock_swizzle.h>
#include <ATen/native/sparse/cuda/cutlass/default_epilogue_tensor_op_row_broadcast.h>
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_epilogue_simt.h>
#include <cutlass/transform/threadblock/predicated_tile_iterator.h>
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h>
#endif //CUTLASS_ARCH_WMMA_ENABLED
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSparseGemmRowBroadcast;
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Ampere Architecture
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of A matrix in units of elements
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages,
/// If true, kernel is configured to support serial reduction in the
/// epilogue
bool SplitKSerial,
/// Operation performed by GEMM
typename Operator>
struct DefaultSparseGemmRowBroadcast<ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC,
layout::RowMajor, ElementAccumulator, arch::OpClassTensorOp,
arch::Sm80, ThreadblockShape, WarpShape, InstructionShape,
EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial,
Operator> {
/// Define the threadblock-scoped matrix multiply-accumulate
using Mma = typename cutlass::gemm::threadblock::DefaultSparseMma<
ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB,
ElementAccumulator, layout::RowMajor, arch::OpClassTensorOp, arch::Sm80,
ThreadblockShape, WarpShape, InstructionShape, Stages,
Operator>::ThreadblockMma;
static const int kPartitionsK = ThreadblockShape::kK / WarpShape::kK;
/// Define the epilogue
using Epilogue =
typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOpRowBroadcast<
ThreadblockShape, typename Mma::Operator, kPartitionsK, EpilogueOutputOp,
EpilogueOutputOp::kCount>::Epilogue;
/// Define the kernel-level GEMM operator.
using GemmKernel = kernel::SparseGemmRowBroadcast<Mma, Epilogue, ThreadblockSwizzle, SplitKSerial>;
};
////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 8,070
| 41.256545
| 110
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/cuda/cutlass/gemm_sparse_row_broadcast.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/arch/arch.h>
#include <cutlass/device_kernel.h>
#include <cutlass/gemm/threadblock/threadblock_swizzle.h>
#include <cutlass/gemm/kernel/sparse_gemm.h>
#include <ATen/native/sparse/cuda/cutlass/default_gemm_sparse_row_broadcast.h>
#include <cutlass/gemm/device/default_gemm_configuration.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*! Gemm device-level operator. This is an interface to efficient CUTLASS GEMM kernels that may
be invoked from host code.
The contributions of this class are:
1. At compile time, it maps data types and high-level structural parameters onto
specific CUTLASS components.
2. At runtime, it maps logical arguments to GEMM problems to kernel parameters.
3. At runtime, it launches kernels on the device.
The intent is to provide a convenient mechanism for interacting with most plausible GEMM
configurations for each supported architecture. Consequently, not all parameters are exposed
to the top-level interface. Rather, sensible defaults at each level of the CUTLASS hierarchy
are selected to tradeoff simplicity of the interface with flexibility. We expect
most configurations to be specified at this level. Applications with more exotic requirements
may construct their kernels of interest using CUTLASS components at the threadblock, warp,
and thread levels of abstraction.
CUTLASS exposes computations using the functor design pattern in which objects compose some
internal state with an overloaded function call operator. This enables decoupling of
initialization from execution, possibly reducing overhead during steady state phases of
application execution.
CUTLASS device-level operators expose an Arguments structure encompassing each logical
input to the computation. This is distinct from the kernel-level Params structure pattern
which contains application-specific precomputed state needed by the device code.
Example of a CUTLASS GEMM operator implementing the functionality of cuBLAS's SGEMM NN
is as follows:
//
// Instantiate the CUTLASS GEMM operator.
//
cutlass::gemm::device::Gemm<
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor,
float,
cutlass::layout::ColumnMajor
> gemm_op;
//
// Launch the GEMM operation on the device
//
cutlass::Status status = gemm_op({
{m, n, k}, // GemmCoord problem_size,
{A, lda}, // TensorRef<float, layout::ColumnMajor> ref_A,
{B, ldb}, // TensorRef<float, layout::ColumnMajor> ref_B,
{C, ldc}, // TensorRef<float, layout::ColumnMajor> ref_C,
{D, ldd}, // TensorRef<float, layout::ColumnMajor> ref_D,
{alpha, beta} // EpilogueOutputOp::Params epilogue_op_params
});
A simplified view of the template is listed below.
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Element type for C and D matrix operands
typename ElementC,
/// Layout type for C and D matrix operands
typename LayoutC,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for. This is the minimum SM that
/// supports the intended feature. The device kernel can be built
/// targeting any SM larger than this number.
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Warp-level tile size (concept: GemmShape)
typename InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle,
/// Number of stages used in the pipelined mainloop
int Stages
>
class Gemm;
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Element type for C and D matrix operands
typename ElementC_,
/// Layout type for C and D matrix operands
typename LayoutC_,
/// Element type for internal accumulation
typename ElementAccumulator_ = ElementC_,
/// Operator class tag
typename OperatorClass_ = arch::OpClassSimt,
/// Tag indicating architecture to tune for
typename ArchTag_ = arch::Sm70,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::InstructionShape,
/// Epilogue output operator
typename EpilogueOutputOp_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::EpilogueOutputOp,
/// Threadblock-level swizzling operator
typename ThreadblockSwizzle_ =
typename threadblock::GemmIdentityThreadblockSwizzle<>,
/// Number of stages used in the pipelined mainloop
int Stages =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kStages,
/// Access granularity of A matrix in units of elements
int AlignmentA =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentA,
/// Access granularity of B matrix in units of elements
int AlignmentB =
DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB_,
ElementC_, ElementAccumulator_>::kAlignmentB,
/// If true, kernel supports split-K with serial reduction
bool SplitKSerial = false,
/// Operation performed by GEMM
typename Operator_ = typename DefaultGemmConfiguration<
OperatorClass_, ArchTag_, ElementA_, ElementB_, ElementC_,
ElementAccumulator_>::Operator>
class SparseGemmRowBroadcast {
public:
using ElementA = ElementA_;
using LayoutA = LayoutA_;
using TensorRefA = TensorRef<ElementA const, LayoutA>;
using ElementB = ElementB_;
using LayoutB = LayoutB_;
using TensorRefB = TensorRef<ElementB const, LayoutB>;
using ElementC = ElementC_;
using LayoutC = LayoutC_;
using TensorRefC = TensorRef<ElementC const, LayoutC>;
using TensorRefD = TensorRef<ElementC, LayoutC>;
using ElementAccumulator = ElementAccumulator_;
using OperatorClass = OperatorClass_;
using ArchTag = ArchTag_;
using ThreadblockShape = ThreadblockShape_;
using WarpShape = WarpShape_;
using InstructionShape = InstructionShape_;
using EpilogueOutputOp = EpilogueOutputOp_;
using ThreadblockSwizzle = ThreadblockSwizzle_;
using Operator = Operator_;
using MathOperator = Operator;
static int const kStages = Stages;
static int const kAlignmentA = AlignmentA;
static int const kAlignmentB = AlignmentB;
static int const kAlignmentC = EpilogueOutputOp::kCount;
static bool const kSplitKSerial = SplitKSerial;
static ComplexTransform const kTransformA = ComplexTransform::kNone;
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Define the kernel
using GemmKernel = typename kernel::DefaultSparseGemmRowBroadcast<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementC,
LayoutC,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
kStages,
kSplitKSerial,
Operator
>::GemmKernel;
using ElementE = typename GemmKernel::ElementE;
using LayoutE = typename GemmKernel::LayoutE;
static int const kAlignmentE = 128 / sizeof_bits<ElementE>::value;
static int const kSparse = GemmKernel::kSparse;
static int const kMetaSizeInBits = GemmKernel::kMetaSizeInBits;
static int const kElementsPerElementE = GemmKernel::kElementsPerElementE;
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_size;
TensorRef<ElementA const, LayoutA> ref_A;
TensorRef<ElementB const, LayoutB> ref_B;
TensorRef<ElementC const, LayoutC> ref_C;
TensorRef<ElementC, LayoutC> ref_D;
TensorRef<ElementE const, LayoutE> ref_E;
typename EpilogueOutputOp::Params epilogue;
int split_k_slices;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments(): problem_size(0, 0, 0), split_k_slices(1) {
}
/// Constructs an Arguments structure
CUTLASS_HOST_DEVICE
Arguments(
GemmCoord problem_size_,
TensorRef<ElementA const, LayoutA> ref_A_,
TensorRef<ElementB const, LayoutB> ref_B_,
TensorRef<ElementC const, LayoutC> ref_C_,
TensorRef<ElementC, LayoutC> ref_D_,
TensorRef<ElementE, LayoutE> ref_E_,
typename EpilogueOutputOp::Params epilogue_ =
typename EpilogueOutputOp::Params(),
int split_k_slices = 1
):
problem_size(problem_size_),
ref_A(ref_A_),
ref_B(ref_B_),
ref_C(ref_C_),
ref_D(ref_D_),
ref_E(ref_E_),
epilogue(epilogue_),
split_k_slices(split_k_slices) {
}
};
private:
/// Kernel parameters object
typename GemmKernel::Params params_;
public:
/// Constructs the GEMM.
SparseGemmRowBroadcast() { }
/// Determines whether the GEMM can execute the given problem.
static Status can_implement(Arguments const &args) {
if (!kSplitKSerial && args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
Status status = GemmKernel::can_implement(
args.problem_size,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref()
);
if (status != Status::kSuccess) {
return status;
}
return Status::kSuccess;
}
/// Gets the workspace size
static size_t get_workspace_size(Arguments const &args) {
size_t bytes = 0;
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord tiled_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial && args.split_k_slices > 1) {
bytes += sizeof(int) * size_t(tiled_shape.m()) * size_t(tiled_shape.n());
}
return bytes;
}
/// Initializes GEMM state from arguments.
Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {
// Determine grid shape
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(
args.problem_size,
{ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK},
args.split_k_slices);
if (kSplitKSerial) {
if (args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
size_t bytes = get_workspace_size(args);
cudaError_t result = cudaMemsetAsync(workspace, 0, bytes, stream);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
}
else {
if (args.split_k_slices > 1) {
return Status::kErrorInvalidProblem;
}
}
// Initialize the Params structure
params_ = typename GemmKernel::Params{
args.problem_size,
grid_shape,
args.ref_A.non_const_ref(),
args.ref_B.non_const_ref(),
args.ref_C.non_const_ref(),
args.ref_D,
args.ref_E.non_const_ref(),
args.epilogue,
static_cast<int *>(workspace)
};
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
if (smem_size >= (48 << 10)) {
cudaError_t result = cudaFuncSetAttribute(Kernel<GemmKernel>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
}
return Status::kSuccess;
}
/// Lightweight update given a subset of arguments
Status update(Arguments const &args, void *workspace = nullptr) {
if (kSplitKSerial && args.split_k_slices > 1) {
if (!workspace) {
return Status::kErrorWorkspaceNull;
}
}
params_.ref_A.reset(args.ref_A.non_const_ref().data());
params_.ref_B.reset(args.ref_B.non_const_ref().data());
params_.ref_C.reset(args.ref_C.non_const_ref().data());
params_.ref_D.reset(args.ref_D.data());
params_.ref_E.reset(args.ref_E.non_const_ref().data());
params_.output_op = args.epilogue;
params_.semaphore = static_cast<int *>(workspace);
return Status::kSuccess;
}
/// Runs the kernel using initialized state.
Status run(cudaStream_t stream = nullptr) {
ThreadblockSwizzle threadblock_swizzle;
dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);
dim3 block(GemmKernel::kThreadCount, 1, 1);
int smem_size = int(sizeof(typename GemmKernel::SharedStorage));
cutlass::Kernel<GemmKernel><<<grid, block, smem_size, stream>>>(params_);
cudaError_t result = cudaGetLastError();
return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;
}
/// Runs the kernel using initialized state.
Status operator()(cudaStream_t stream = nullptr) {
return run(stream);
}
/// Runs the kernel using initialized state.
Status operator()(
Arguments const &args,
void *workspace = nullptr,
cudaStream_t stream = nullptr) {
Status status = initialize(args, workspace, stream);
if (status == Status::kSuccess) {
status = run(stream);
}
return status;
}
};
} // namespace device
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 17,267
| 32.530097
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/native/sparse/cuda/cutlass/sparse_gemm_row_broadcast.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/semaphore.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
bool SplitKSerial ///! If true, code supporting split-K via serial reduction is enabled.
>
struct SparseGemmRowBroadcast {
using Mma = Mma_;
using Epilogue = Epilogue_;
using OutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static bool const kSplitKSerial = SplitKSerial;
static int const kSparse = Mma::kSparse;
static int const kMetaSizeInBits = Mma::kMetaSizeInBits;
static int const kMaxID2 = Mma::kMaxID2;
static int const kElementsPerElementE = Mma::kElementsPerElementE;
using ElementE = typename Mma::ElementE;
using LayoutE = typename Mma::LayoutE;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
/// Parameters structure
struct Params {
cutlass::gemm::GemmCoord problem_size;
cutlass::gemm::GemmCoord grid_tiled_shape;
int swizzle_log_tile;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorA::TensorRef ref_A;
typename Mma::IteratorB::Params params_B;
typename Mma::IteratorB::TensorRef ref_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::OutputTileIterator::TensorRef ref_C;
typename Epilogue::OutputTileIterator::Params params_D;
typename Epilogue::OutputTileIterator::TensorRef ref_D;
typename Mma::IteratorE::Params params_E;
typename Mma::IteratorE::TensorRef ref_E;
typename OutputOp::Params output_op;
int *semaphore;
int gemm_k_iterations;
int gemm_k_size;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(): swizzle_log_tile(0), semaphore(0), gemm_k_iterations(0), gemm_k_size(0) { }
CUTLASS_HOST_DEVICE
Params(
cutlass::gemm::GemmCoord const & problem_size,
cutlass::gemm::GemmCoord const & grid_tiled_shape,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E,
typename OutputOp::Params output_op = typename OutputOp::Params(),
int *workspace = nullptr
):
problem_size(problem_size),
grid_tiled_shape(grid_tiled_shape),
swizzle_log_tile(ThreadblockSwizzle().get_log_tile(grid_tiled_shape)),
params_A(ref_A.layout()),
ref_A(ref_A),
params_B(ref_B.layout()),
ref_B(ref_B),
params_C(ref_C.layout()),
ref_C(ref_C),
params_D(ref_D.layout()),
ref_D(ref_D),
params_E(ref_E.layout()),
ref_E(ref_E),
output_op(output_op) {
int total_gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
int gemm_k_iterations = (total_gemm_k_iterations + grid_tiled_shape.k() - 1) / grid_tiled_shape.k();
gemm_k_size = gemm_k_iterations * Mma::Shape::kK;
semaphore = workspace;
}
};
/// Shared memory storage structure
union SharedStorage {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
};
//
// Methods
//
CUTLASS_HOST_DEVICE
SparseGemmRowBroadcast() { }
/// Determines whether kernel satisfies alignment
static Status can_implement(
cutlass::gemm::GemmCoord const & problem_size,
typename Mma::IteratorA::TensorRef ref_A,
typename Mma::IteratorB::TensorRef ref_B,
typename Epilogue::OutputTileIterator::TensorRef ref_C,
typename Epilogue::OutputTileIterator::TensorRef ref_D,
typename Mma::IteratorE::TensorRef ref_E) {
static int const kAlignmentA = Mma::IteratorA::AccessType::kElements;
static int const kAlignmentB = Mma::IteratorB::AccessType::kElements;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
static int const kAlignmentE = Mma::IteratorE::AccessType::kElements;
if (!TensorRef_aligned(ref_A, kAlignmentA)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_B, kAlignmentB)) {
return Status::kErrorMisalignedOperand;
}
// NOTE: Changed!
/*
if (!TensorRef_aligned(ref_C, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
*/
if (!TensorRef_aligned(ref_D, kAlignmentC)) {
return Status::kErrorMisalignedOperand;
}
if (!TensorRef_aligned(ref_E, kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
if ((problem_size.m() % kAlignmentA) || ((problem_size.k() / kSparse) % kAlignmentA) ||
(problem_size.n() % kAlignmentB) || (problem_size.k() % kAlignmentB) ||
(problem_size.m() % kAlignmentC) || (problem_size.n() % kAlignmentC) ||
(problem_size.m() % kAlignmentE) || ((problem_size.k() / kSparse) % kAlignmentE)) {
return Status::kErrorMisalignedOperand;
}
// The k dimension has to be the multiple of the Threadblock k because out
// of bound meta data would be initialized to 0 by acync.zfill but 0 is not
// a valid meta data.
if (problem_size.k() % Mma::Shape::kK) {
return Status::kErrorMisalignedOperand;
}
// M dimension has to be multiple of 32 (sparse float) or 16 (sparse int)
// because of the row reordering of operand E
static int const kAlignmentM = (sizeof(ElementE) == 2) ? 32 : 16;
if (problem_size.m() % kAlignmentM) {
return Status::kErrorMisalignedOperand;
}
return Status::kSuccess;
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const ¶ms, SharedStorage &shared_storage) {
// Compute threadblock location
ThreadblockSwizzle threadblock_swizzle;
cutlass::gemm::GemmCoord threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
// Early exit if CTA is out of range
if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||
params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {
return;
}
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
cutlass::MatrixCoord tb_offset_B{
threadblock_tile_offset.k() * params.gemm_k_size,
threadblock_tile_offset.n() * Mma::Shape::kN
};
cutlass::MatrixCoord tb_offset_E{
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.k() * params.gemm_k_size / kSparse,
};
// Problem size is a function of threadblock index in the K dimension
int problem_size_k = min(
params.problem_size.k(),
(threadblock_tile_offset.k() + 1) * params.gemm_k_size);
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size_k - tb_offset_B.row() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A, B, and E operands
typename Mma::IteratorA iterator_A(
params.params_A,
params.ref_A.data(),
{params.problem_size.m(), problem_size_k / kSparse},
thread_idx,
tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B,
params.ref_B.data(),
{problem_size_k, params.problem_size.n()},
thread_idx,
tb_offset_B);
typename Mma::IteratorE iterator_E(
params.params_E, params.ref_E.data(),
{params.problem_size.m(),
problem_size_k / kSparse / kElementsPerElementE},
thread_idx, tb_offset_E);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Main loop
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
if (!kSplitKSerial || gemm_k_iterations > 0) {
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, iterator_E, accumulators);
}
//
// Epilogue
//
OutputOp output_op(params.output_op);
//
// Masked tile iterators constructed from members
//
threadblock_tile_offset =
threadblock_swizzle.get_tile_offset(params.swizzle_log_tile);
//assume identity swizzle
MatrixCoord threadblock_offset(
threadblock_tile_offset.m() * Mma::Shape::kM,
threadblock_tile_offset.n() * Mma::Shape::kN
);
int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m();
// Construct the semaphore.
Semaphore semaphore(params.semaphore + block_idx, thread_idx);
// If performing a reduction via split-K, fetch the initial synchronization
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// Fetch the synchronization lock initially but do not block.
semaphore.fetch();
// Indicate which position in a serial reduction the output operator is currently updating
output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k());
}
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_C(
params.params_C,
params.ref_C.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
// Tile iterator writing to destination tensor.
typename Epilogue::OutputTileIterator iterator_D(
params.params_D,
params.ref_D.data(),
params.problem_size.mn(),
thread_idx,
threadblock_offset
);
Epilogue epilogue(
shared_storage.epilogue,
thread_idx,
warp_idx,
lane_idx);
// Wait on the semaphore - this latency may have been covered by iterator construction
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
// For subsequent threadblocks, the source matrix is held in the 'D' tensor.
if (threadblock_tile_offset.k()) {
iterator_C = iterator_D;
}
semaphore.wait(threadblock_tile_offset.k());
__threadfence();
}
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op, iterator_D, accumulators, iterator_C);
//
// Release the semaphore
//
if (kSplitKSerial && params.grid_tiled_shape.k() > 1) {
int lock = 0;
if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) {
// The final threadblock resets the semaphore for subsequent grids.
lock = 0;
}
else {
// Otherwise, the semaphore is incremented
lock = threadblock_tile_offset.k() + 1;
}
__threadfence();
semaphore.release(lock);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
| 13,632
| 32.74505
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/attention.h
|
#pragma once
#include <ATen/ATen.h>
#include <c10/macros/Export.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/transformers/attention.h>
#include <c10/util/Optional.h>
namespace at {
namespace native {
using fused_sdp_choice_fn = int64_t (*)(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal, c10::optional<double> scale);
DECLARE_DISPATCH(fused_sdp_choice_fn, _fused_sdp_choice_stub);
TORCH_API Tensor bmm_nt(const Tensor& a, const Tensor& b);
TORCH_API Tensor masked_softmax(
Tensor& attn_scores,
c10::optional<Tensor> attn_mask,
const Tensor& query,
c10::optional<int64_t> mask_type = {});
TORCH_API Tensor transform0213_gemm_nt_bias(
const Tensor& a,
const Tensor& b,
const Tensor& c,
const Tensor& query);
TORCH_API Tensor bmm_nn(Tensor& out, const Tensor& a, const Tensor& b);
TORCH_API void debug_assert_shape(int line, const Tensor& t, c10::IntArrayRef shape);
TORCH_API Tensor qkv_projection(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const Tensor& qkv_weight);
} // namespace native
} // namespace at
| 1,226
| 28.214286
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/sdp_utils_cpp.h
|
#pragma once
#include <ATen/Context.h>
#include <ATen/core/Tensor.h>
#include <c10/core/SymFloat.h>
#include <cmath>
#include <cstdint>
namespace sdp {
constexpr int32_t num_backends = 3;
enum class SDPBackend {
error = -1,
math = 0,
flash_attention = 1,
efficient_attention = 2
};
// Note that if this changed make sure to update
// the templated enum in mem_eff/kernel_forward.h and mem_eff/kernel_backward.h
enum class CustomMaskType {
NoCustomMask = 0,
CausalFromTopLeft = 1,
CausalFromBottomRight = 2,
NumCustomMaskTypes,
};
inline c10::SymFloat calculate_scale(
const at::Tensor& query,
c10::optional<double> scale) {
const auto softmax_scale = scale.has_value()
? scale.value()
: (c10::SymFloat(1.0) / (c10::SymFloat(query.sym_size(-1)).sqrt()));
return c10::SymFloat(softmax_scale);
}
} // namespace sdp
| 856
| 22.805556
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/fmha.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <cuda.h>
#include <vector>
#ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGeneratorImpl.h>
#else
#include <ATen/cuda/CUDAGeneratorImpl.h>
#endif
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <ATen/native/transformers/cuda/flash_attn/fmha_utils.h>
namespace pytorch_fmha {
constexpr int TOTAL_DIM = 0;
constexpr int H_DIM = 1;
constexpr int D_DIM = 2;
////////////////////////////////////////////////////////////////////////////////////////////////////
struct Qkv_params {
// The QKV matrices.
void *__restrict__ q_ptr;
void *__restrict__ k_ptr;
void *__restrict__ v_ptr;
// The stride between rows of the Q, K and V matrices.
// size_t qkv_stride_in_elts;
// size_t qkv_stride_in_bytes;
// TD [2022-04-16]: We're using 32-bit indexing to save registers.
// The code probably won't work for arrays larger than 2GB.
uint32_t q_row_stride_in_elts;
uint32_t k_row_stride_in_elts;
uint32_t v_row_stride_in_elts;
uint32_t q_head_stride_in_elts;
uint32_t k_head_stride_in_elts;
uint32_t v_head_stride_in_elts;
// The number of heads.
int h;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
struct FMHA_fprop_params : public Qkv_params {
// The O matrix (output).
void * __restrict__ o_ptr;
// The stride between rows of O.
// size_t o_stride_in_elts;
// size_t o_stride_in_bytes;
uint32_t o_row_stride_in_elts;
uint32_t o_head_stride_in_elts;
uint32_t o_tmp_row_stride_in_elts;
uint32_t o_tmp_head_stride_in_elts;
// The pointer to the O_tmp matrix, which holds O intermediate value during
// the loop;
void *__restrict__ o_tmp_ptr;
// The pointer to the S matrix.
void * __restrict__ s_ptr;
// The stride between rows of the S matrix.
// int64_t s_stride_in_bytes;
uint32_t s_stride_in_bytes;
// The pointer to the softmax sum.
void * __restrict__ softmax_lse_ptr;
// The dimensions.
int b, seqlen_q, seqlen_k, d;
// The scaling factors for the kernel.
float scale_bmm1f;
uint32_t scale_bmm1;
// array of length b+1 holding starting offset of each sequence.
int * __restrict__ cu_seqlens_q;
int * __restrict__ cu_seqlens_k;
int *__restrict__ blockmask;
// The dropout probability (probability of keeping an activation).
float p_dropout;
uint32_t p_dropout_in_uint;
uint16_t p_dropout_in_uint16_t;
// Scale factor of 1 / (1 - p_dropout).
float rp_dropout;
float scale_bmm1_rp_dropout;
// Scale factor of 1 / (1 - p_dropout), in half2.
uint32_t scale_dropout;
// Random state.
at::PhiloxCudaState philox_args;
int64_t * extragraph_offset;
int64_t * seed;
bool is_bf16;
bool is_causal;
int num_splits; // How many SMs per attention matrix.
};
////////////////////////////////////////////////////////////////////////////////////////////////////
struct FMHA_dgrad_params : public FMHA_fprop_params {
// The dQKV matrices.
void *__restrict__ dq_ptr;
void *__restrict__ dk_ptr;
void *__restrict__ dv_ptr;
// // To accumulate dK and dV in case we're splitting the bwd along seqlen_q dimension
// void *__restrict__ dk_accum_ptr;
// void *__restrict__ dv_accum_ptr;
// The stride between rows of the dQ, dK and dV matrices.
// TD [2022-04-16]: We're using 32-bit indexing to save registers.
// The code probably won't work for arrays larger than 2GB.
uint32_t dq_row_stride_in_elts;
uint32_t dk_row_stride_in_elts;
uint32_t dv_row_stride_in_elts;
uint32_t dq_head_stride_in_elts;
uint32_t dk_head_stride_in_elts;
uint32_t dv_head_stride_in_elts;
// The dO matrix. We assume it is contiguous.
void * __restrict__ do_ptr;
// The pointer to the softmax d sum.
void * __restrict__ dsoftmax_sum;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename Kernel_params>
struct Launch_params{
Launch_params(cudaDeviceProp * props_,
cudaStream_t stream_,
bool is_dropout_,
bool return_softmax_)
: elts_per_thread(0)
, props(props_)
, stream(stream_)
, is_dropout(is_dropout_)
, return_softmax(return_softmax_) {
}
size_t elts_per_thread;
cudaDeviceProp * props;
cudaStream_t stream;
bool is_dropout;
bool return_softmax;
Kernel_params params;
int num_full_heads;
int num_main_groups;
int heads_last_wave;
int main_steps;
int rest_steps;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params);
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params);
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params);
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure);
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure);
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure);
void run_fmha_block_fp16_sm80(Launch_params<FMHA_fprop_params> &launch_params, const bool configure);
void run_fmha_block_dgrad_fp16_sm80(const FMHA_dgrad_params ¶ms, cudaStream_t stream);
}; // namespace pytorch_fmha
| 7,298
| 32.948837
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/fmha_api.h
|
#pragma once
#include <cstddef>
#include <ATen/ATen.h>
#include <c10/util/Exception.h>
namespace pytorch_fmha {
TORCH_API
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
mha_fwd(const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
const at::Tensor &k, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
const at::Tensor &v, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
at::Tensor &out,
const at::Tensor &cu_seqlens_q, // b+1
const at::Tensor &cu_seqlens_k, // b+1
const int max_seqlen_q_,
const int max_seqlen_k_,
const float p_dropout,
const float softmax_scale,
const bool zero_tensors,
const bool is_causal,
const bool return_softmax,
const int num_splits);
TORCH_API
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
mha_bwd(const at::Tensor &dout, // total_q x num_heads, x head_size
const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
const at::Tensor &k, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
const at::Tensor &v, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
const at::Tensor &out, // total_q x num_heads x head_size
const at::Tensor &softmax_lse_, // b x h x s softmax logsumexp
at::Tensor &dq, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
at::Tensor &dk, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
at::Tensor &dv, // total_k x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
const at::Tensor &cu_seqlens_q, // b+1
const at::Tensor &cu_seqlens_k, // b+1
const int max_seqlen_q_,
const int max_seqlen_k_, // max sequence length to choose the kernel
const float p_dropout, // probability to drop
const float softmax_scale,
const bool zero_tensors,
const bool is_causal,
const int num_splits,
at::Tensor philox_seed,
at::Tensor philox_offset
);
} // namespace fmha
| 2,218
| 42.509804
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h
|
// Copyright (c) 2022, Tri Dao.
#pragma once
#include <vector>
#include <cuda_fp16.h>
#include <cuda_bf16.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/transformers/cuda/flash_attn/fmha.h>
#include <ATen/native/transformers/cuda/flash_attn/static_switch.h>
#include <ATen/native/transformers/cuda/flash_attn/fmha_fprop_kernel_1xN.h>
namespace pytorch_fmha {
// Find the number of splits that maximizes the occupancy. For example, if we have
// batch * n_heads = 48 and we have 108 SMs, having 2 splits (efficiency = 0.89) is
// better than having 3 splits (efficiency = 0.67). However, we also don't want too many
// splits as that would incur more HBM reads/writes.
// So we find the best efficiency, then find the smallest number of splits that gets 95%
// of the best efficiency.
// [2022-11-25] TD: Mark this as "inline" otherwise we get "multiple definition" error.
inline int num_splits_heuristic_fwd(int batch_nheads, int num_SMs, int ctas_per_sm, int max_splits) {
float max_efficiency = 0.f;
std::vector<float> efficiency;
efficiency.reserve(max_splits);
for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
float n_waves = float(batch_nheads * num_splits) / (num_SMs * ctas_per_sm);
float eff = n_waves / ceil(n_waves);
// printf("num_splits = %d, eff = %f\n", num_splits, eff);
if (eff > max_efficiency) { max_efficiency = eff; }
efficiency.push_back(eff);
}
for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
if (efficiency[num_splits - 1] > 0.95 * max_efficiency) {
// printf("num_splits chosen = %d\n", num_splits);
return num_splits;
}
}
return 1;
}
template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Return_softmax>
__global__ void fmha_fwd_loop_kernel(FMHA_fprop_params params) {
fmha::device_1xN_loop<Kernel_traits, Is_dropout, Is_causal, Return_softmax>(params);
}
template<typename Kernel_traits>
void run_fmha_fwd_loop(Launch_params<FMHA_fprop_params> &launch_params) {
constexpr int blocksize_c = Kernel_traits::Cta_tile_p::N;
const int loop_steps = (launch_params.params.seqlen_k + blocksize_c - 1) / blocksize_c;
constexpr int smem_size_softmax_lse = Kernel_traits::Smem_dp_sum::BYTES_PER_TILE;
// Don't need smem_size_softmax_lse if we're not looping
const int smem_size = fmha::get_dynamic_smem_size<Kernel_traits>()
+ (loop_steps > 1 ? smem_size_softmax_lse : 0);
// Work-around for gcc 7. It doesn't like nested BOOL_SWITCH.
// https://github.com/kokkos/kokkos-kernels/issues/349
// https://github.com/HazyResearch/flash-attention/issues/21
BOOL_SWITCH(launch_params.is_dropout, IsDropoutConst, ([&] {
auto kernel = launch_params.params.is_causal
? (launch_params.return_softmax
? &fmha_fwd_loop_kernel<Kernel_traits, IsDropoutConst, true, true>
: &fmha_fwd_loop_kernel<Kernel_traits, IsDropoutConst, true, false>)
: (launch_params.return_softmax
? &fmha_fwd_loop_kernel<Kernel_traits, IsDropoutConst, false, true>
: &fmha_fwd_loop_kernel<Kernel_traits, IsDropoutConst, false, false>);
if( smem_size >= 48 * 1024 ) {
FMHA_CHECK_CUDA(cudaFuncSetAttribute(
kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
// Automatically set num_splits to maximize occupancy
if (launch_params.params.num_splits <= 0) {
int ctas_per_sm;
cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&ctas_per_sm, kernel, Kernel_traits::THREADS, smem_size);
auto dprops = at::cuda::getCurrentDeviceProperties();
// printf("CTAS_PER_SM = %d, nSMs = %d\n", ctas_per_sm, dprops->multiProcessorCount);
constexpr int M = Kernel_traits::Cta_tile_p::M;
launch_params.params.num_splits = num_splits_heuristic_fwd(
launch_params.params.b * launch_params.params.h, dprops->multiProcessorCount,
ctas_per_sm,
/*max_splits=*/std::min(30, (launch_params.params.seqlen_q + M - 1 / M))
);
}
// printf("smem_size = %d\n", smem_size);
dim3 grid(launch_params.params.b, launch_params.params.h, launch_params.params.num_splits);
kernel<<<grid, Kernel_traits::THREADS, smem_size, launch_params.stream>>>(
launch_params.params);
FMHA_CHECK_CUDA(cudaPeekAtLastError());
}));
}
}; // namespace pytorch_fmha
| 4,607
| 46.505155
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/fmha_kernel.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/transformers/cuda/flash_attn/fmha.h>
#include <ATen/native/transformers/cuda/flash_attn/utils.h>
#include <ATen/native/transformers/cuda/flash_attn/smem_tile.h>
#include <ATen/native/transformers/cuda/flash_attn/gmem_tile.h>
#include <ATen/native/transformers/cuda/flash_attn/mask.h>
#include <ATen/native/transformers/cuda/flash_attn/softmax.h>
#include <ATen/native/transformers/cuda/flash_attn/philox.cuh>
namespace fmha {
////////////////////////////////////////////////////////////////////////////////////////////////////
template<int THREADS_PER_CTA>
struct BlockInfoPadded {
template<typename Params>
__device__ BlockInfoPadded(const Params ¶ms,
const int bidb,
const int bidh,
const int tidx)
: bidb(bidb), bidh(bidh), h(params.h) {
// The block index.
sum_s_k = params.cu_seqlens_k[bidb];
actual_seqlen_k = params.cu_seqlens_k[bidb + 1] - sum_s_k;
sum_s_q = params.cu_seqlens_q[bidb];
actual_seqlen_q = params.cu_seqlens_q[bidb + 1] - sum_s_q;
tidx_global = (bidb * params.h + bidh) * THREADS_PER_CTA + tidx;
}
__device__ bool stop_early(const int start_col = 0) const {
return actual_seqlen_k <= start_col;
}
int actual_seqlen_q;
int actual_seqlen_k;
int sum_s_q;
int sum_s_k;
int bidh;
int bidb;
int tidx_global;
int h;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace fmha
| 3,399
| 41.5
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/fmha_utils.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime_api.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_fp16.h>
#include <cuda_bf16.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define FMHA_CHECK_CUDA( call ) \
do { \
cudaError_t status_ = call; \
if( status_ != cudaSuccess ) { \
fprintf( stderr, \
"CUDA error (%s:%d): %s\n", \
__FILE__, \
__LINE__, \
cudaGetErrorString( status_ ) ); \
exit( 1 ); \
} \
} while( 0 )
////////////////////////////////////////////////////////////////////////////////////////////////////
enum Data_type { DATA_TYPE_FP16, DATA_TYPE_BF16, DATA_TYPE_FP32, DATA_TYPE_INT32, DATA_TYPE_INT8 };
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline void set_alpha( uint32_t &alpha, float norm, Data_type dtype ) {
if( dtype == DATA_TYPE_FP16 ) {
half x = __float2half_rn( norm );
uint16_t h = reinterpret_cast<const uint16_t &>( x );
ushort2 h2 = { h, h };
alpha = reinterpret_cast<const uint32_t &>( h2 );
} else if( dtype == DATA_TYPE_BF16 ) {
__nv_bfloat16 x = __float2bfloat16( norm );
uint16_t h = reinterpret_cast<const uint16_t &>( x );
ushort2 h2 = { h, h };
alpha = reinterpret_cast<const uint32_t &>( h2 );
} else if( dtype == DATA_TYPE_FP32 ) {
alpha = reinterpret_cast<const uint32_t &>( norm );
} else if( dtype == DATA_TYPE_INT32 ) {
int32_t inorm = static_cast<int32_t>( norm );
alpha = reinterpret_cast<const uint32_t &>( inorm );
} else {
assert( false );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline size_t get_size_in_bytes( size_t n, Data_type dtype ) {
switch( dtype ) {
case DATA_TYPE_FP32:
return n * 4;
case DATA_TYPE_FP16:
return n * 2;
case DATA_TYPE_BF16:
return n * 2;
case DATA_TYPE_INT32:
return n * 4;
case DATA_TYPE_INT8:
return n;
default:
assert( false );
return 0;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| 4,921
| 47.732673
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/gemm.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <ATen/native/transformers/cuda/flash_attn/utils.h>
#include <cutlass/cutlass.h>
#include <cutlass/gemm/warp/default_mma_tensor_op.h>
#include <cutlass/layout/layout.h>
#include <cutlass/arch/mma.h>
#include <cutlass/array.h>
#include <cutlass/numeric_types.h>
namespace fmha {
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Data_type_, int NUM_ELTS_, int BITS_PER_ELT_, int ALIGNMENT_ >
struct Fragment_base_ {
// The data type.
using Data_type = Data_type_;
// default input type
using Input_type_ = Data_type_;
// Does it store the array of elements.
static constexpr bool HAS_ELTS = BITS_PER_ELT_ >= 8;
// The number of elements.
static constexpr int NUM_ELTS = NUM_ELTS_;
// The size of element in bits.
static constexpr int BITS_PER_ELT = BITS_PER_ELT_;
// The size of byte of a single register.
static constexpr int BYTES_PER_REG = 4;
// The size in bits.
static constexpr int BITS_PER_REG = BYTES_PER_REG * 8;
// The number of registers needed to store the fragment.
static constexpr int NUM_REGS = DivUpConstexpr(NUM_ELTS * BITS_PER_ELT, BITS_PER_REG);
// The size in bytes (as returned by sizeof(Fragment_base<>).
static constexpr int SIZE_IN_BYTES = NUM_REGS * BYTES_PER_REG;
// The alignment.
static constexpr int ALIGNMENT = ALIGNMENT_ > 0 ? ALIGNMENT_ : MinConstexpr(NUM_REGS * BYTES_PER_REG, 16);
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<
// The type of the elements.
typename Data_type_,
// The number of elements.
int NUM_ELTS_,
// The alignment if you want to force a value -- use 0 otherwise.
int ALIGNMENT_ = 0,
// The base class.
typename Base_ = Fragment_base_<Data_type_, NUM_ELTS_, 8 * sizeof(Data_type_), ALIGNMENT_>
>
struct alignas(static_cast<int>(Base_::ALIGNMENT)) Fragment : public Base_ {
// The size of a load/store.
static constexpr int BYTES_PER_LOAD_STORE = Base_::NUM_REGS * sizeof(uint32_t);
// Clear the fragment. Using PTX in that code seems to produce better SASS...
inline __device__ void clear() {
#pragma unroll
for( int ii = 0; ii < Base_::NUM_REGS; ++ii ) {
asm volatile("mov.u32 %0, 0; \n" : "=r"(this->reg(ii)) : );
}
}
// Immutable access to a register.
inline __device__ const uint32_t& reg(int ii) const {
return this->regs_[ii];
}
// Mutable access to a register.
inline __device__ uint32_t& reg(int ii) {
return this->regs_[ii];
}
uint32_t regs_[Base_::NUM_REGS];
// Immutable access to the elements.
inline __device__ const Data_type_& elt(int ii) const {
return reinterpret_cast<const Data_type_*>(&this->regs_[0])[ii];
}
// Mutable access to the elements.
inline __device__ Data_type_& elt(int ii) {
return reinterpret_cast<Data_type_*>(&this->regs_[0])[ii];
}
// Immutable access to the elements with a cast.
template< typename Cast_type >
inline __device__ const Cast_type& elt_as(int ii) const {
return reinterpret_cast<const Cast_type*>(&this->regs_[0])[ii];
}
// Mutable access to the elements.
template< typename Cast_type >
inline __device__ Cast_type& elt_as(int ii) {
return reinterpret_cast<Cast_type*>(&this->regs_[0])[ii];
}
// Add another fragment.
inline __device__ void add(const Fragment &other) {
// TODO (TD 2022-04-09): Shouldn't this be NUM_REGS instead of NUM_ELTS?
// Also are we doing int addition or __half2 addition?
#pragma unroll
for( int ii = 0; ii < NUM_ELTS_; ++ii ) {
this->elt(ii) += other.elt(ii);
}
}
// Multiply by another fragment.
inline __device__ void hmul(const Fragment &other) {
#pragma unroll
for( int ii = 0; ii < Base_::NUM_REGS; ++ii ) {
this->reg(ii) = fmha::hmul2(this->reg(ii), other.reg(ii));
}
}
template <typename elem_type>
inline __device__ void hrelu_() {
#pragma unroll
for( int ii = 0; ii < Base_::NUM_REGS; ++ii ) {
this->reg(ii) = fmha::hrelu2<elem_type>(this->reg(ii));
}
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout >
struct Fragment_a : public Fragment<uint16_t, 8> {
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Layout >
struct Fragment_b : public Fragment<uint16_t, 8> {
};
////////////////////////////////////////////////////////////////////////////////////////////////////
struct Fragment_accumulator : public Fragment<float, 8> {
// The base class.
using Base = Fragment<float, 8>;
// Add two fragments.
template< typename Other_fragment_ >
inline __device__ void add(const Other_fragment_ &other) {
for( int ii = 0; ii < Base::NUM_ELTS; ++ii ) {
this->elt(ii) = this->elt(ii) + other.elt(ii);
}
}
inline __device__ void mul_(const float other) {
for( int ii = 0; ii < Base::NUM_ELTS; ++ii ) {
this->elt(ii) *= other;
}
}
// Do the HMMA.
template< typename Layout_a, typename Layout_b >
inline __device__ void mma(const Fragment_a<Layout_a> &a,
const Fragment_b<Layout_b> &b) {
asm volatile( \
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 \n" \
" {%0, %1, %2, %3}, \n" \
" {%4, %5, %6, %7}, \n" \
" {%8, %9}, \n" \
" {%0, %1, %2, %3}; \n" \
: "+f"( elt(0)), "+f"( elt(1)), "+f"( elt(2)), "+f"( elt(3))
: "r"(a.reg(0)), "r"(a.reg(1)), "r"(a.reg(2)), "r"(a.reg(3))
, "r"(b.reg(0)), "r"(b.reg(1)));
asm volatile( \
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 \n" \
" {%0, %1, %2, %3}, \n" \
" {%4, %5, %6, %7}, \n" \
" {%8, %9}, \n" \
" {%0, %1, %2, %3}; \n" \
: "+f"( elt(4)), "+f"( elt(5)), "+f"( elt(6)), "+f"( elt(7))
: "r"(a.reg(0)), "r"(a.reg(1)), "r"(a.reg(2)), "r"(a.reg(3))
, "r"(b.reg(2)), "r"(b.reg(3)));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Fragment, int M, int N >
inline __device__ void clear(Fragment (&frag)[M][N]) {
#pragma unroll
for( int mi = 0; mi < M; ++mi ) {
#pragma unroll
for( int ni = 0; ni < N; ++ni ) {
frag[mi][ni].clear();
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Accumulator_type, int WARPS_K >
struct Clear_accumulator {
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< int WARPS_K >
struct Clear_accumulator<float, WARPS_K> {
template< typename Acc, int M, int N >
static inline __device__ void apply(Acc (&acc)[M][N], bool = false) {
fmha::clear(acc);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename Acc, typename A, typename B, int M, int N>
inline __device__ void gemm(Acc (&acc)[M][N], const A (&a)[M], const B (&b)[N]) {
#pragma unroll
for( int mi = 0; mi < M; ++mi ) {
#pragma unroll
for( int ni = 0; ni < N; ++ni ) {
acc[mi][ni].mma(a[mi], b[ni]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically maps half types => cutlass data types
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Type_>
struct HalfTypeToCutlassType { using Type = Type_; };
/// Statically maps __half => cutlass::half_t
template <> struct HalfTypeToCutlassType<__half> {
using Type = cutlass::half_t;
};
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
template <> struct HalfTypeToCutlassType<__nv_bfloat16> {
using Type = cutlass::bfloat16_t;
};
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename elem_type, typename Acc, typename A, typename B, int M, int N>
inline __device__ void gemm_cl(Acc (&acc)[M][N], const A (&a)[M], const B (&b)[N]) {
using Shape = cutlass::gemm::GemmShape<16 * M, 16 * N, 16>;
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>;
#elif defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
#else
assert(0);
// THIS IS NOT CORRECT BUT THE ASSERT WILL STOP THIS
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
// TD [2022-06-02] We don't support Volta (SM70) yet.
#endif
using Element = typename HalfTypeToCutlassType<elem_type>::Type;
using ElementC = float;
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using WarpMma = typename cutlass::gemm::warp::DefaultMmaTensorOp<
Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC,
cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd, 1, true>::Type;
constexpr int kIters = Shape::kK / InstructionShape::kK;
// using FragmentA = typename WarpMma::FragmentA;
// using FragmentB = typename WarpMma::FragmentB;
using FragmentA = typename WarpMma::ArchMmaOperator::FragmentA;
using FragmentB = typename WarpMma::ArchMmaOperator::FragmentB;
using FragmentC = typename WarpMma::FragmentC;
// if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y) == 0) {
// printf("FragmentA::kStorageElements = %d\n", FragmentA::kStorageElements);
// printf("Archmma::FragmentA::kStorageElements = %d\n", WarpMma::ArchMmaOperator::FragmentA::kStorageElements);
// printf("FragmentB::kStorageElements = %d\n", FragmentB::kStorageElements);
// printf("Archmma::FragmentB::kStorageElements = %d\n", WarpMma::ArchMmaOperator::FragmentB::kStorageElements);
// printf("FragmentC::kStorageElements = %d\n", FragmentC::kStorageElements);
// printf("Archmma::FragmentC::kStorageElements = %d\n", WarpMma::ArchMmaOperator::FragmentC::kStorageElements);
// }
// static_assert(FragmentA::kStorageElements == M * a[0].NUM_REGS);
// static_assert(FragmentB::kStorageElements == N * b[0].NUM_REGS);
static_assert(FragmentA::kStorageElements * kIters == a[0].NUM_REGS);
static_assert(FragmentB::kStorageElements * kIters * 16 / InstructionShape::kN == b[0].NUM_REGS);
static_assert(FragmentC::kStorageElements == M * N * acc[0][0].NUM_REGS);
// const FragmentA a_cl = reinterpret_cast<const FragmentA (&)>(a);
// const FragmentB b_cl = reinterpret_cast<const FragmentB (&)>(b);
FragmentC c_cl = reinterpret_cast<FragmentC (&)>(acc);
FragmentA a_cl[kIters][M];
FragmentA b_cl[kIters][N];
constexpr int kRegs = InstructionShape::kK == 16 ? 4 : 2;
#pragma unroll
for (int iter = 0; iter < kIters; iter++) {
#pragma unroll
for (int mi = 0; mi < M; mi++) {
uint32_t *a_ptr = a_cl[iter][mi].raw_data();
#pragma unroll
for (int ki = 0; ki < kRegs; ki++) {
a_ptr[ki] = a[mi].regs_[iter * kRegs + ki];
}
}
}
#pragma unroll
for (int iter = 0; iter < kIters; iter++) {
#pragma unroll
for (int ni = 0; ni < N; ni++) {
uint32_t *b_ptr = b_cl[iter][ni].raw_data();
#pragma unroll
for (int ki = 0; ki < kRegs; ki++) {
// b_ptr[ki] = b[ni].regs_[iter * kRegs + ki];
// TD [2022-06-02] For some reason the order for frag_b is different.
b_ptr[ki] = b[ni].regs_[InstructionShape::kK == 16 ? iter * kRegs + ki : ki * kRegs + iter];
}
}
}
WarpMma mma_op;
// mma_op(c_cl, a_cl, b_cl, c_cl);
#pragma unroll
for (int iter = 0; iter < kIters; iter++) {
mma_op(c_cl, reinterpret_cast<const typename WarpMma::FragmentA (&)>(a_cl[iter]),
reinterpret_cast<const typename WarpMma::FragmentB (&)>(b_cl[iter]), c_cl);
}
// The modified c_cl is not copied back into acc, idk why
#pragma unroll
for (int mi = 0; mi < M; mi++) {
#pragma unroll
for (int ni = 0; ni < N; ni++) {
#pragma unroll
for (int i =0; i < 8; i++) {
acc[mi][ni].elt(i) = c_cl[mi * N * 8 + ni * 8 + i];
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template<
// The number of rows in the CTA tile.
int M_,
// The number of cols in the CTA tile.
int N_,
// The number of elements in the the K dimension of the GEMM loop.
int K_,
// The number of rows of warps.
int WARPS_M_,
// The number of cols of warps.
int WARPS_N_,
// The number of warps in the K dimension of the GEMM loop.
int WARPS_K_>
struct Cta_tile_ {
static constexpr int M = M_, N = N_, K = K_;
// The number of warps.
static constexpr int WARPS_M = WARPS_M_, WARPS_N = WARPS_N_, WARPS_K = WARPS_K_;
// The number of warps per CTA.
static constexpr int WARPS_PER_CTA = WARPS_M * WARPS_N * WARPS_K;
// The number of threads per warp.
static constexpr int THREADS_PER_WARP = 32;
// The number of threads per CTA.
static constexpr int THREADS_PER_CTA = WARPS_PER_CTA * THREADS_PER_WARP;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename Cta_tile>
struct Hmma_tile {
// The number of elements computed with a single warp-MMA.
static constexpr int M_PER_MMA = 16, N_PER_MMA = 16, K_PER_MMA = 16;
// The number of elements computed with a single CTA-MMA.
static constexpr int M_PER_MMA_PER_CTA = M_PER_MMA * Cta_tile::WARPS_M,
N_PER_MMA_PER_CTA = N_PER_MMA * Cta_tile::WARPS_N,
K_PER_MMA_PER_CTA = K_PER_MMA * Cta_tile::WARPS_K;
// The number of MMAs needed to compute the GEMM.
static constexpr int MMAS_M = DivUpConstexpr(Cta_tile::M, M_PER_MMA_PER_CTA),
MMAS_N = DivUpConstexpr(Cta_tile::N, N_PER_MMA_PER_CTA),
MMAS_K = DivUpConstexpr(Cta_tile::K, K_PER_MMA_PER_CTA);
// // The number of elements computed per warp.
// static constexpr int M_PER_WARP = MMAS_M * M_PER_MMA,
// N_PER_WARP = MMAS_N * N_PER_MMA,
// K_PER_WARP = MMAS_K * K_PER_MMA;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
using A_type = uint16_t;
using B_type = uint16_t;
using C_type = uint16_t;
using Accumulator_type = float;
using Epilogue_type = float;
constexpr int BITS_PER_ELEMENT_A = sizeof(A_type) * 8;
constexpr int BITS_PER_ELEMENT_B = sizeof(B_type) * 8;
constexpr int BITS_PER_ELEMENT_C = sizeof(C_type) * 8;
////////////////////////////////////////////////////////////////////////////////////////////////////
template<int M, int N, int K, int WARPS_M, int WARPS_N, int WARPS_K>
using Cta_tile_extd = Cta_tile_<M, N, K, WARPS_M, WARPS_N, WARPS_K>;
////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename Cta_tile_>
using Cta_tile_with_k_with_padding = Cta_tile_extd<Cta_tile_::M,
Cta_tile_::N,
Next_power_of_two<Cta_tile_::K>::VALUE,
Cta_tile_::WARPS_M,
Cta_tile_::WARPS_N,
Cta_tile_::WARPS_K>;
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace fmha
| 18,091
| 38.93819
| 120
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/gmem_tile.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <ATen/cuda/CUDAContext.h>
#include <cuda_fp16.h>
#include <cuda_bf16.h>
#include <ATen/native/transformers/cuda/flash_attn/utils.h>
namespace fmha {
template<
// The dimensions of the tile computed by the CTA.
typename Cta_tile_,
// The number of bits per element.
int BITS_PER_ELEMENT,
// The number of rows of Q, K or V loaded by this tile.
int ROWS_,
// The number of columns.
int COLS,
int BYTES_PER_LDGS_ = 16
>
struct Gmem_tile_qkv {
using Cta_tile = Cta_tile_;
static constexpr int BYTES_PER_ELEMENT = BITS_PER_ELEMENT / 8;
// The size of each LDG.
static constexpr int BYTES_PER_LDG = BYTES_PER_LDGS_;
// The size of a row in bytes.
static constexpr int BYTES_PER_ROW = COLS * BITS_PER_ELEMENT / 8;
// The number of threads to load a "row" of the matrix.
static constexpr int THREADS_PER_ROW = BYTES_PER_ROW / BYTES_PER_LDG;
static constexpr int ROWS = ROWS_;
// The number of "rows" loaded per LDG.
static constexpr int ROWS_PER_LDG = Cta_tile::THREADS_PER_CTA / THREADS_PER_ROW;
// The number of LDGs needed to load a chunk of the Q matrix.
static constexpr int LDGS = DivUpConstexpr(ROWS, ROWS_PER_LDG);
// Ctor.
template< typename BInfo >
inline __device__ Gmem_tile_qkv(void *ptr_, const uint32_t row_stride_in_elts,
const uint32_t head_stride_in_elts, const int headdim,
const BInfo &binfo, const int tidx, bool use_seqlen_q)
: row_stride_in_bytes(row_stride_in_elts * BYTES_PER_ELEMENT)
, actual_seqlen(use_seqlen_q ? binfo.actual_seqlen_q : binfo.actual_seqlen_k)
, ptr(reinterpret_cast<char *>(ptr_))
, tidx_(tidx)
, col_predicate((tidx % THREADS_PER_ROW) * (BYTES_PER_LDG / BYTES_PER_ELEMENT) < headdim) {
// Compute the position in the sequence (within the CTA for the moment).
int row = tidx / THREADS_PER_ROW;
// Compute the position of the thread in the row.
int col = tidx % THREADS_PER_ROW;
// Store the row as we need it to disable the loads.
// TD [2022-04-16]: To minimize registers, we'll recompute row_ instead of storing it
// row_ = row;
// The row offset in the batched GEMM. For each seq element, we store QKV in that order.
// int64_t row_offset = (int64_t)row * params.qkv_stride_in_bytes;
uint32_t row_offset = (uint32_t)(((use_seqlen_q ? binfo.sum_s_q : binfo.sum_s_k) + row) * row_stride_in_bytes);
// Add the block index.
// row_offset += (int64_t)((binfo.sum_s * NUM_MATS + qkv_offset) * binfo.h + binfo.bidh) * BYTES_PER_ROW;
row_offset += (uint32_t)(binfo.bidh * head_stride_in_elts * BYTES_PER_ELEMENT);
// Assemble the final pointer.
ptr += row_offset + col * BYTES_PER_LDG;
}
// Store data to shared memory.
template< typename Smem_tile >
inline __device__ void commit(Smem_tile &smem_tile) {
smem_tile.store(fetch_);
}
inline __device__ void load() {
int row_ = tidx_ / THREADS_PER_ROW;
const void *ptrs[LDGS];
uint32_t preds[LDGS];
#pragma unroll
for( int ii = 0; ii < LDGS; ++ii ) {
// ptrs[ii] = ptr + (int64_t)ii * ROWS_PER_LDG * row_stride_in_bytes;
ptrs[ii] = ptr + (uint32_t)ii * ROWS_PER_LDG * row_stride_in_bytes;
preds[ii] = col_predicate && ((row_ + ii * ROWS_PER_LDG) < min(ROWS, actual_seqlen));
fetch_[ii] = make_uint4(0, 0, 0, 0);
}
// not packing predicates removes restrictions (e.g. FP16 384, 4 warps)
Ldg_functor<uint4, LDGS> fct(fetch_, ptrs);
#pragma unroll
for( int ii = 0; ii < LDGS; ++ii ) {
fct.load(ii, preds[ii]);
}
}
// Store data to memory.
inline __device__ void store(const uint4 (&data)[LDGS]) {
int row_ = tidx_ / THREADS_PER_ROW;
#pragma unroll
for( int ii = 0; ii < LDGS; ++ii ) {
// char *ptr_ = ptr + (int64_t)ii * ROWS_PER_LDG * row_stride_in_bytes;
char *ptr_ = ptr + (uint32_t)ii * ROWS_PER_LDG * row_stride_in_bytes;
if (col_predicate && (row_ + ii * ROWS_PER_LDG) < min(ROWS, actual_seqlen)) {
fmha::stg(ptr_, data[ii]);
}
}
}
inline __device__ void move(const int steps = 1) {
// ptr += (int64_t)ROWS * row_stride_in_bytes * steps;
ptr += (uint32_t)ROWS * row_stride_in_bytes * steps;
actual_seqlen -= ROWS * steps;
}
// The stride between rows for the QKV matrice.
// int64_t row_stride_in_bytes;
const uint32_t row_stride_in_bytes;
// The pointer.
char *ptr;
// The fetch registers.
uint4 fetch_[LDGS];
// Keep track of the row the thread is processing as we move the tile.
// int row_;
const int tidx_;
// The length of the sequence loaded by that memory tile.
int actual_seqlen;
const bool col_predicate;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<
typename Cta_tile,
int BYTES_PER_ELEMENT = 2
>
struct Gmem_tile_o {
static_assert(BYTES_PER_ELEMENT == 2 || BYTES_PER_ELEMENT == 4);
// The mma tile.
using Mma_tile = fmha::Hmma_tile<Cta_tile>;
// The size of each element.
// static constexpr int BYTES_PER_ELEMENT = 2;
// The size of each STG.
static constexpr int BYTES_PER_STG = BYTES_PER_ELEMENT * 4;
static constexpr int COLS = Cta_tile::N;
// The size of a row in bytes.
static constexpr int BYTES_PER_ROW = COLS * BYTES_PER_ELEMENT;
// The number of threads to store a "row" of the matrix.
static constexpr int THREADS_PER_ROW = BYTES_PER_ROW / BYTES_PER_STG;
// The number of "rows" stored per iteration of the loop. The output of 1 MMA.
static constexpr int ROWS = Cta_tile::M;
// The number of "rows" stored per iteration of the loop. The output of 1 MMA.
static constexpr int ROWS_PER_LOOP = ROWS <= 64 ? ROWS : (int)Mma_tile::M_PER_MMA_PER_CTA;
// The number of outter loop for the stores.
static constexpr int LOOPS = ROWS / ROWS_PER_LOOP;
// The number of "rows" stored per STG.
static constexpr int ROWS_PER_STG = Cta_tile::THREADS_PER_CTA / THREADS_PER_ROW;
// Do we have to guard against partial writes/reads.
static constexpr bool HAS_INCOMPLETE_STG = Cta_tile::M % ROWS_PER_STG != 0;
// The number of STGs needed to store a chunk of the Q matrix.
static constexpr int STGS_PER_LOOP = DivUpConstexpr(ROWS_PER_LOOP, ROWS_PER_STG);
// The number of STGs needed to store a chunk of the Q matrix in total.
static constexpr int STGS = STGS_PER_LOOP * LOOPS;
// Ctor.
template<typename BInfo>
// inline __device__ Gmem_tile_o(void *ptr, const size_t row_stride_in_elts, const BInfo &binfo, const int tidx)
inline __device__ Gmem_tile_o(void *ptr, const uint32_t row_stride_in_elts,
const uint32_t head_stride_in_elts, const int headdim,
const BInfo &binfo, const int tidx)
: row_stride_in_bytes(row_stride_in_elts * BYTES_PER_ELEMENT)
, actual_seqlen_q(binfo.actual_seqlen_q)
, ptr_(reinterpret_cast<char *>(ptr))
, tidx_(tidx)
, col_predicate((tidx % THREADS_PER_ROW) * (BYTES_PER_STG / BYTES_PER_ELEMENT) < headdim) {
// Compute the position in the sequence (within the CTA for the moment).
int row = tidx / THREADS_PER_ROW;
// Compute the position of the thread in the row.
int col = tidx % THREADS_PER_ROW;
// Store the row as we need it to disable loads.
// row_ = row;
// The row offset in the batched GEMM.
// int64_t row_offset = (int64_t)row * row_stride_in_bytes + binfo.bidx * BYTES_PER_ROW;
uint32_t row_offset = (uint32_t)((binfo.sum_s_q + row) * row_stride_in_bytes);
row_offset += (uint32_t)(binfo.bidh * head_stride_in_elts * BYTES_PER_ELEMENT);
// Assemble the final pointer.
ptr_ += row_offset + col * BYTES_PER_STG;
// Is that thread active on the last STG?
if( HAS_INCOMPLETE_STG ) {
is_active_for_last_stg_ = row + (STGS - 1) * ROWS_PER_STG < Cta_tile::M;
}
}
// Store data to global memory.
template<typename elem_type=__half>
inline __device__ void store(const uint4 (&src)[STGS_PER_LOOP], int mi) {
int row_ = tidx_ / THREADS_PER_ROW;
#pragma unroll
for( int ii = 0; ii < STGS_PER_LOOP; ++ii ) {
int jj = mi * STGS_PER_LOOP + ii;
if ((!col_predicate) || (row_ + jj * ROWS_PER_STG >= this->actual_seqlen_q)) {
break;
}
if (BYTES_PER_ELEMENT == 4) {
if( !HAS_INCOMPLETE_STG || (jj < STGS - 1 || this->is_active_for_last_stg_) ) {
fmha::stg(this->ptr_ + jj * ROWS_PER_STG * this->row_stride_in_bytes, src[ii]);
}
} else if (BYTES_PER_ELEMENT == 2) {
float x = reinterpret_cast<const float &>(src[ii].x);
float y = reinterpret_cast<const float &>(src[ii].y);
float z = reinterpret_cast<const float &>(src[ii].z);
float w = reinterpret_cast<const float &>(src[ii].w);
uint2 out = fmha::float4_pack<elem_type>(x, y, z, w);
if( !HAS_INCOMPLETE_STG || (jj < STGS - 1 || this->is_active_for_last_stg_) ) {
fmha::stg(this->ptr_ + jj * ROWS_PER_STG * this->row_stride_in_bytes, out);
}
}
}
}
// Store data to global memory with atomicAdd.
inline __device__ void atomic_add(const uint4 (&src)[STGS_PER_LOOP], int mi) {
static_assert(BYTES_PER_ELEMENT == 4); // Only do atomic add on floats
int row_ = tidx_ / THREADS_PER_ROW;
#pragma unroll
for( int ii = 0; ii < STGS_PER_LOOP; ++ii ) {
int jj = mi * STGS_PER_LOOP + ii;
if ((!col_predicate) || (row_ + jj * ROWS_PER_STG >= this->actual_seqlen_q)) {
break;
}
if( !HAS_INCOMPLETE_STG || (jj < STGS - 1 || this->is_active_for_last_stg_) ) {
float *ptr_ = reinterpret_cast<float *>(this->ptr_ + jj * ROWS_PER_STG * this->row_stride_in_bytes);
#pragma unroll
for (int jj = 0; jj < 4; ++jj) {
atomicAdd(ptr_ + jj, reinterpret_cast<const float(&)[4]>(src[ii])[jj]);
}
}
}
}
// Load data from global memory.
inline __device__ void load(uint4 (&dst)[STGS_PER_LOOP], int mi) {
static_assert(BYTES_PER_ELEMENT == 4);
int row_ = tidx_ / THREADS_PER_ROW;
#pragma unroll
for( int ii = 0; ii < STGS_PER_LOOP; ++ii ) {
int jj = mi * STGS_PER_LOOP + ii;
if ((!col_predicate) || (row_ + jj * ROWS_PER_STG >= this->actual_seqlen_q)) {
break;
}
if( !HAS_INCOMPLETE_STG || (jj < STGS - 1 || this->is_active_for_last_stg_) ) {
fmha::ldg(dst[ii], this->ptr_ + jj * ROWS_PER_STG * this->row_stride_in_bytes);
}
}
}
inline __device__ void move(const int steps = 1) {
// row_ += ROWS * steps;
// ptr_ += (int64_t)ROWS * row_stride_in_bytes * steps;
ptr_ += (uint32_t)ROWS * row_stride_in_bytes * steps;
actual_seqlen_q -= ROWS * steps;
}
// The stride between rows for the QKV matrice.
// int64_t row_stride_in_bytes;
const uint32_t row_stride_in_bytes;
// The pointer.
char *ptr_;
// Is the thread active for the last STG?
int is_active_for_last_stg_;
// The length of the sequence loaded by that memory tile.
int actual_seqlen_q;
const int tidx_;
const bool col_predicate;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Cta_tile, int BYTES_PER_ELEMENT >
struct Gmem_tile_mma_sd {
// The mma tile.
using Mma_tile = fmha::Hmma_tile<Cta_tile>;
// Each STG stores 8 elements.
static constexpr int BYTES_PER_STG = BYTES_PER_ELEMENT * 8;
// The number of MMAs in the M dimension.
static constexpr int MMAS_M = Mma_tile::MMAS_M;
// The number of MMAs in the N dimension.
static constexpr int MMAS_N = Mma_tile::MMAS_N;
// The number of rows computed per MMA per thread block.
static constexpr int M_PER_MMA_PER_CTA = Mma_tile::M_PER_MMA_PER_CTA;
// The number of cols computed per MMA per thread block.
static constexpr int N_PER_MMA_PER_CTA = Mma_tile::N_PER_MMA_PER_CTA;
// The number of threads per block.
static constexpr int THREADS_PER_CTA = Cta_tile::THREADS_PER_CTA;
// The size of each row in bytes. I.e. how many bytes are stored per STG.
static constexpr int BYTES_PER_ROW = THREADS_PER_CTA * BYTES_PER_STG;
// The distance between elements stored per loop (in bytes).
static constexpr int LOOP_STRIDE_BYTES = MMAS_M * MMAS_N * BYTES_PER_ROW;
// The type of elements stored per STG.
using Type = typename fmha::Uint_from_size_in_bytes<BYTES_PER_STG>::Type;
// Ctor.
template<typename Params>
inline __device__ Gmem_tile_mma_sd(void *ptr, const Params ¶ms, const int bidb, const int bidh, const int tidx)
: ptr_(static_cast<char *>(ptr)) {
// The block index.
// size_t bidx = bidb * params.h + bidh;
uint32_t bidx = bidb * params.h + bidh;
// The distance between two blocks (in bytes).
// const size_t block_stride_bytes = params.seqlen_q * params.seqlen_k * BYTES_PER_ELEMENT;
const uint32_t block_stride_bytes = params.seqlen_q * params.seqlen_k * BYTES_PER_ELEMENT;
// Set store location for each thread at the beginning of the loop
ptr_ += bidx * block_stride_bytes + tidx * BYTES_PER_STG;
}
// Store to global memory.
inline __device__ void store(const Type &data, const int mi, const int ni) {
// size_t offset = (mi * MMAS_N + ni) * BYTES_PER_ROW;
uint32_t offset = (mi * MMAS_N + ni) * BYTES_PER_ROW;
fmha::stg(ptr_ + offset, data);
}
// Load from global memory.
inline __device__ void load(Type &data, const int mi, const int ni) {
// size_t offset = (mi * MMAS_N + ni) * BYTES_PER_ROW;
uint32_t offset = (mi * MMAS_N + ni) * BYTES_PER_ROW;
fmha::ldg(data, ptr_ + offset);
}
// Move to the next tile.
inline __device__ void move(const int steps = 1) {
ptr_ += LOOP_STRIDE_BYTES * steps;
}
// The pointer in global memory.
char *ptr_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Cta_tile, typename Base = Gmem_tile_mma_sd<Cta_tile, sizeof(uint16_t)> >
struct Gmem_tile_mma_s : public Base {
// The number of mmas in the vertical dimension.
static constexpr int M = Base::MMAS_M;
// The number of mmas in the horizontal dimension.
static constexpr int N = Base::MMAS_N;
// The type of the vectors stored by each STG.
using Type = typename Base::Type;
// Ctor.
template< typename Params, typename Block_info >
inline __device__ Gmem_tile_mma_s(const Params ¶ms, const Block_info& binfo, const int tidx)
: Base(params.s_ptr, params, binfo.bidb, binfo.bidh, tidx) {
}
// Store to global memory.
template<typename Mask, typename Fragment>
inline __device__ void store(const Fragment (&frag)[N][M], const Mask& mask){
#pragma unroll
for( int mi = 0; mi < M; mi++ ) {
#pragma unroll
for( int ni = 0; ni < N; ni++ ) {
uint4 dst;
dst.x = frag[ni][mi].reg(0);
dst.y = frag[ni][mi].reg(2);
dst.z = frag[ni][mi].reg(1);
dst.w = frag[ni][mi].reg(3);
if( mask.any_valid(mi, ni) ) {
Base::store(dst, mi, ni);
}
}
}
}
// Load from global memory.
template<typename Mask>
inline __device__ void load(uint4 (®s)[M][N], const Mask &mask) {
#pragma unroll
for( int mi = 0; mi < M; mi++ ) {
#pragma unroll
for( int ni = 0; ni < N; ni++ ) {
regs[mi][ni] = make_uint4(0, 0, 0, 0);
if( mask.any_valid(mi, ni) ) {
Base::load(regs[mi][ni], mi, ni);
}
}
}
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
template<
// The dimensions of the tile computed by the CTA.
typename Cta_tile
>
struct Gmem_summary_stats {
// The Mma tile.
using Mma_tile = fmha::Hmma_tile<Cta_tile>;
// The number of MMAs in M/N dimensions.
static constexpr int MMAS_M = Mma_tile::MMAS_M;
// The size of each element.
static constexpr int BYTES_PER_ELEMENT = 4;
static constexpr int BYTES_PER_MMA = (Cta_tile::THREADS_PER_WARP / 4) * 2 * BYTES_PER_ELEMENT;
static constexpr int ROWS = Cta_tile::M;
// Ctor.
template<typename Params>
inline __device__ Gmem_summary_stats(void *ptr, const Params ¶ms, const int tidx)
: ptr_(reinterpret_cast<char *>(ptr)), tidx_(tidx) {
// The block index for the batch.
const int bidb = blockIdx.x;
// The block index for the head.
const int bidh = blockIdx.y;
// The block index.
// size_t bidx = bidb * params.h + bidh;
uint32_t bidx = bidb * params.h + bidh;
// Extract the position in the warp.
int warp = tidx / Cta_tile::THREADS_PER_WARP;
int lane = tidx % Cta_tile::THREADS_PER_WARP;
// The distance between two blocks (in bytes).
// size_t block_stride_bytes = params.seqlen_q * BYTES_PER_ELEMENT;
uint32_t block_stride_bytes = params.seqlen_q * BYTES_PER_ELEMENT;
// Set store location for each thread at the beginning of the loop
ptr_row_ = ptr_ + bidx * block_stride_bytes;
ptr_ += bidx * block_stride_bytes + (lane / 4) * BYTES_PER_ELEMENT;
}
// Store data to global memory.
inline __device__ void store(const uint32_t (&data)[MMAS_M * 2]) {
int warp = tidx_ / Cta_tile::THREADS_PER_WARP;
int lane = tidx_ % Cta_tile::THREADS_PER_WARP;
if ((warp == 0) && (lane % 4 == 0)) {
#pragma unroll
for (int mi = 0; mi < MMAS_M; ++mi) {
// TODO: Not sure if it's right for MMAS_M > 1
fmha::stg(ptr_ + mi * BYTES_PER_MMA + 0 * BYTES_PER_ELEMENT, data[mi * 2 + 0]);
fmha::stg(ptr_ + mi * BYTES_PER_MMA + 8 * BYTES_PER_ELEMENT, data[mi * 2 + 1]);
}
}
}
// Store data to global memory.
inline __device__ void store_row(const uint32_t (&data)[MMAS_M], const int row) {
#pragma unroll
for (int mi = 0; mi < MMAS_M; ++mi) {
// TODO: Not sure if it's right for MMAS_M > 1
fmha::stg(ptr_row_ + mi * BYTES_PER_MMA + row * BYTES_PER_ELEMENT, data[mi]);
}
}
// Load from global memory.
inline __device__ void load(uint32_t (&data)[MMAS_M * 2]) {
#pragma unroll
for (int mi = 0; mi < MMAS_M; ++mi) {
// TODO: Not sure if it's right for MMAS_M > 1
fmha::ldg(data[mi * 2 + 0], ptr_ + mi * BYTES_PER_MMA + 0 * BYTES_PER_ELEMENT);
fmha::ldg(data[mi * 2 + 1], ptr_ + mi * BYTES_PER_MMA + 8 * BYTES_PER_ELEMENT);
}
}
// Load from global memory.
inline __device__ void load_next(uint32_t (&data)[MMAS_M * 2], int move_steps=1) {
char *ptr_next = ptr_ + move_steps * ROWS * BYTES_PER_ELEMENT;
#pragma unroll
for (int mi = 0; mi < MMAS_M; ++mi) {
// TODO: Not sure if it's right for MMAS_M > 1
fmha::ldg(data[mi * 2 + 0], ptr_next + mi * BYTES_PER_MMA + 0 * BYTES_PER_ELEMENT);
fmha::ldg(data[mi * 2 + 1], ptr_next + mi * BYTES_PER_MMA + 8 * BYTES_PER_ELEMENT);
}
}
// Store data to global memory.
template <int N>
inline __device__ void load_row(uint32_t (&data)[N], const int row[N]) {
#pragma unroll
for (int ni = 0; ni < N; ++ni) {
fmha::ldg(data[ni], ptr_row_ + row[ni] * BYTES_PER_ELEMENT);
}
}
// Move the pointer to the next location.
inline __device__ void move() {
ptr_ += ROWS * BYTES_PER_ELEMENT;
ptr_row_ += ROWS * BYTES_PER_ELEMENT;
}
// Move the pointer to the next location.
inline __device__ void move(const int steps) {
ptr_ += ROWS * BYTES_PER_ELEMENT * steps;
ptr_row_ += ROWS * BYTES_PER_ELEMENT * steps;
}
// The pointer.
char *ptr_;
char *ptr_row_;
const int tidx_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace fmha
| 22,937
| 40.32973
| 119
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/kernel_traits.h
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <ATen/cuda/CUDAContext.h>
#include <cuda_fp16.h>
#include <ATen/native/transformers/cuda/flash_attn/gemm.h>
#include <ATen/native/transformers/cuda/flash_attn/gmem_tile.h>
#pragma once
////////////////////////////////////////////////////////////////////////////////////////////////////
template<int S, int D, int STEP, int WARPS_M, int WARPS_N, uint32_t FLAGS = 0x08u, typename elem_type_=__half>
struct FMHA_kernel_traits {
// The CTA description for the 1st GEMM.
using Cta_tile_p = fmha::Cta_tile_extd<STEP, S, D, WARPS_M, WARPS_N, 1>;
// The CTA description for the 2nd GEMM.
using Cta_tile_o = fmha::Cta_tile_extd<STEP, D, S, WARPS_M, 1, WARPS_N>;
// Do we use one buffer for K and V.
static constexpr bool SHARE_SMEM_FOR_K_AND_V = (FLAGS & 0x08u) != 0u;
// Do we keep K in registers.
static constexpr bool K_IN_REGS = (FLAGS & 0x10u) == 0u;
// Do we keep V in registers.
static constexpr bool V_IN_REGS = (FLAGS & 0x100u) == 0u;
// The global memory tile to load Q.
using Gmem_tile_q = fmha::Gmem_tile_qkv<Cta_tile_p, fmha::BITS_PER_ELEMENT_A, STEP, D>;
// The shared memory tile to swizzle Q.
// using Smem_tile_q = fmha::Smem_tile_a<Cta_tile_p, fmha::Row, Gmem_tile_q::BYTES_PER_LDG, 1>;
using Smem_tile_q = fmha::Smem_tile_a<Cta_tile_p, fmha::Row, Gmem_tile_q::BYTES_PER_LDG, 2>;
// The global memory tile to load K.
using Gmem_tile_k = fmha::Gmem_tile_qkv<Cta_tile_p, fmha::BITS_PER_ELEMENT_B, S, D>;
// The shared memory tile to swizzle K.
using Smem_tile_k = fmha::Smem_tile_b<Cta_tile_p, fmha::Col>;
// The global memory tile to load V.
using Gmem_tile_v = fmha::Gmem_tile_qkv<Cta_tile_o, fmha::BITS_PER_ELEMENT_B, S, D>;
// The shared memory tile to swizzle V.
using Smem_tile_v = fmha::Smem_tile_v<Cta_tile_o>;
// The global memory tile to store O.
using Gmem_tile_o = fmha::Gmem_tile_o<Cta_tile_o>;
// The shared memory tile for O.
using Smem_tile_o = fmha::Smem_tile_o<Cta_tile_o>;;
// The global memory tile to load/store S.
using Gmem_tile_s = fmha::Gmem_tile_mma_s<Cta_tile_p>;
// The shared memory tile to transpose S.
using Smem_tile_st = fmha::Smem_tile_mma_transposed<Cta_tile_p>;
using Gmem_tile_do = fmha::Gmem_tile_qkv<Cta_tile_p, fmha::BITS_PER_ELEMENT_A, STEP, D>;
// // The global memory tile to store the accumulated dK and dV
// // Hack: we set BYTES_PER_LDGS=32 to emulate the access pattern of dK and dV
// // where there are 16 bits per lements and 16 bytes per load. In reality we won't
// // be issue any load or store of size 32 bytes.
// using Gmem_tile_dkv_accum = fmha::Gmem_tile_qkv<Cta_tile_o, 32, S, D, 32>;
// The global memory tile to store the softmax sum.
using Gmem_softmax_sum = fmha::Gmem_summary_stats<Cta_tile_p>;
// The shared memory tile to store dp sum.
using Smem_dp_sum = fmha::Smem_tile_dp_sum<Gmem_tile_q, 2>;
using elem_type = elem_type_;
// Make sure the number of threads match.
static_assert((int)Gmem_tile_o::THREADS_PER_ROW == (int)Smem_tile_o::THREADS_PER_ROW, "");
// The number of threads.
static constexpr int THREADS = Cta_tile_p::THREADS_PER_CTA;
// Make sure the number of threads matches both CTAs.
static_assert(THREADS == Cta_tile_o::THREADS_PER_CTA, "");
// The amount of shared memory needed to load Q and K.
static constexpr int BYTES_PER_SMEM_QK = Smem_tile_q::BYTES_PER_TILE + Smem_tile_k::BYTES_PER_TILE;
// The extra amount of shared memory needed to load V.
static constexpr int BYTES_PER_SMEM_V = SHARE_SMEM_FOR_K_AND_V ? 0u : Smem_tile_v::BYTES_PER_TILE;
// The amount of shared memory needed for Q, K and V..
static constexpr int BYTES_PER_SMEM_QKV = BYTES_PER_SMEM_QK + BYTES_PER_SMEM_V;
// The amount of shared memory needed to load Q and store O.
static constexpr int BYTES_PER_SMEM_QO = Smem_tile_q::BYTES_PER_TILE + Smem_tile_o::BYTES_PER_TILE;
// The amount of shared memory needed for Q, K, V and O.
static constexpr int BYTES_PER_SMEM = fmha::MaxConstexpr(BYTES_PER_SMEM_QKV, BYTES_PER_SMEM_QO);
// Make sure we have enough shared memory.
static_assert(Smem_tile_q::BYTES_PER_TILE + Smem_tile_o::BYTES_PER_TILE <= BYTES_PER_SMEM, "");
};
////////////////////////////////////////////////////////////////////////////////////////////////////
| 6,155
| 49.459016
| 110
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/flash_attn/static_switch.h
|
// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
// and https://github.com/facebookresearch/xformers/blob/main/xformers/csrc/attention/cuda/fmha/gemm_kernel_utils.h#L8
#pragma once
/// @param COND - a boolean expression to switch by
/// @param CONST_NAME - a name given for the constexpr bool variable.
/// @param ... - code to execute for true and false
///
/// Usage:
/// ```
/// BOOL_SWITCH(flag, BoolConst, ([&] {
/// some_function<BoolConst>(...);
/// }));
/// ```
/// We need "({" and "})" to make sure that the code is a single argument being passed to the macro.
#define BOOL_SWITCH(COND, CONST_NAME, F) \
{ \
if (COND) { \
constexpr bool CONST_NAME = true; \
F(); \
} else { \
constexpr bool CONST_NAME = false; \
F(); \
} \
}
// modified from BOOL_SWITCH
// because MSVC cannot handle std::conditional with constexpr variable
#define FP16_SWITCH(COND, F) \
{ \
if (COND) { \
using elem_type = __nv_bfloat16; \
F(); \
} else { \
using elem_type = __half; \
F(); \
} \
}
| 1,686
| 40.146341
| 118
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/debug_utils.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cfloat>
#include <cstdio>
#include <cmath>
////////////////////////////////////////////////////////////////////////////////
// Debugging functions
////////////////////////////////////////////////////////////////////////////////
// Nans & inf detection
#define NANCHECK(frag) \
{ \
for (int _i = 0; _i < frag.size(); ++_i) { \
assert(std::isfinite(float(frag[_i]))); \
assert(!std::isnan(float(frag[_i]))); \
} \
}
// Print on the first thread of the first block
#if 1
#define PRINT_WARP_ID 0
#define PRINT_LANE_ID 0
#define PRINT_B0_T0(msg, ...) \
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && \
threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_T0(msg, ...) \
if (threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \
threadIdx.z == 0) { \
printf(msg "\n", ##__VA_ARGS__); \
}
#define PRINT_TX_LX(msg, ...) \
for (int bx = 0; bx < gridDim.x; ++bx) { \
for (int by = 0; by < gridDim.y; ++by) { \
for (int bz = 0; bz < gridDim.z; ++bz) { \
for (int tx = 0; tx < blockDim.x; ++tx) { \
for (int ty = 0; ty < blockDim.y; ++ty) { \
for (int tz = 0; tz < blockDim.z; ++tz) { \
__syncthreads(); \
if (blockIdx.x == bx && blockIdx.y == by && blockIdx.z == bz && \
threadIdx.x == tx && threadIdx.y == ty && \
threadIdx.z == tz) { \
printf( \
"[%d,%d,%d][%d,%d,%d]" msg "\n", \
bx, \
by, \
bz, \
tx, \
ty, \
tz, \
##__VA_ARGS__); \
} \
} \
} \
} \
} \
} \
}
#else
#define PRINT_B0_T0
#define PRINT_TX_LX
#endif
struct __string_view {
char const* data;
std::size_t size;
};
#if __cplusplus >= 201402L
template <class T>
constexpr __string_view __get_type_name() {
char const* p = __PRETTY_FUNCTION__;
while (*p++ != '=')
;
for (; *p == ' '; ++p)
;
char const* p2 = p;
int count = 1;
for (;; ++p2) {
switch (*p2) {
case '[':
++count;
break;
case ']':
--count;
if (!count)
return {p, std::size_t(p2 - p)};
}
}
return {};
}
#else
template <class T>
constexpr __string_view __get_type_name() {
return {"unsupported", 11};
}
#endif
// Print a given array
#define PRINT_ACCUM8_T0_L0_START(name, accum, start) \
PRINT_B0_T0( \
"%s[%d:%d] - {%f, %f, %f, %f, %f, %f, %f, %f}", \
name, \
int(start), \
int(start + 8), \
float(accum[start + 0]), \
float(accum[start + 1]), \
float(accum[start + 2]), \
float(accum[start + 3]), \
float(accum[start + 4]), \
float(accum[start + 5]), \
float(accum[start + 6]), \
float(accum[start + 7]));
#define PRINT_ACCUM8_T0_L0(name, accum) PRINT_ACCUM8_T0_L0_START(name, accum, 0)
#define PRINT_FRAG_T0_L0(name, frag) \
{ \
auto typeStr = __get_type_name<decltype(frag)>(); \
PRINT_B0_T0("printing %s (%s)", name, typeStr.data); \
for (int _start = 0; _start < frag.size(); _start += 8) { \
PRINT_ACCUM8_T0_L0_START(" ", frag, _start); \
} \
/*__syncthreads(); \
NANCHECK(frag); */ \
}
#define PRINT_ARRAY_T0_L0_INCR(name, array, length, incr) \
{ \
PRINT_B0_T0("printing %s (len=%d)", name, int(length)); \
for (int _start = 0; _start < length; _start += incr) { \
PRINT_ACCUM8_T0_L0_START(" ", array, _start); \
} \
}
#define PRINT_ARRAY_T0_L0(name, array, length) \
PRINT_ARRAY_T0_L0_INCR(name, array, length, 8)
// Print a 4x4 matrix
#define PRINT_TENSOR4x4_T0_L0_START(name, ref, start_x, start_y) \
PRINT_B0_T0( \
"%s[%d:%d, %d:%d]:\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f", \
name, \
int(start_x), \
int(start_x + 4), \
int(start_y), \
int(start_y + 4), \
float(ref.at({start_x + 0, start_y + 0})), \
float(ref.at({start_x + 0, start_y + 1})), \
float(ref.at({start_x + 0, start_y + 2})), \
float(ref.at({start_x + 0, start_y + 3})), \
float(ref.at({start_x + 1, start_y + 0})), \
float(ref.at({start_x + 1, start_y + 1})), \
float(ref.at({start_x + 1, start_y + 2})), \
float(ref.at({start_x + 1, start_y + 3})), \
float(ref.at({start_x + 2, start_y + 0})), \
float(ref.at({start_x + 2, start_y + 1})), \
float(ref.at({start_x + 2, start_y + 2})), \
float(ref.at({start_x + 2, start_y + 3})), \
float(ref.at({start_x + 3, start_y + 0})), \
float(ref.at({start_x + 3, start_y + 1})), \
float(ref.at({start_x + 3, start_y + 2})), \
float(ref.at({start_x + 3, start_y + 3})));
#define PRINT_TENSOR4x4_T0_L0(name, ref) \
PRINT_TENSOR4x4_T0_L0_START(name, ref, 0, 0)
#define PRINT_PROBLEM_SIZE(name, ps) \
PRINT_B0_T0( \
"%s.problem_size: {.m=%d, .n=%d, .k=%d}", \
name, \
int(ps.m()), \
int(ps.n()), \
int(ps.k()))
template <typename LambdaIterator, typename LaneOffsetT, typename AccumT>
CUTLASS_DEVICE void print_warp_accum(
AccumT accum,
LaneOffsetT lane_offset,
int32_t num_rows,
int32_t num_cols) {
bool is_main = blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 &&
threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0;
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
if (col % 32 == 0) {
if (is_main) {
printf("\nmat[%3d, %3d:%3d]", row, col, col + 32);
}
__syncthreads();
}
LambdaIterator::iterateRows(
lane_offset,
[&](int accum_m) {},
[&](int accum_m, int accum_n, int idx) {
if (row == accum_m && col == accum_n &&
(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0)) {
printf(" %6.1f", float(accum[idx]));
}
},
[&](int accum_m) {});
__syncthreads();
}
if (is_main) {
printf("\n");
}
}
}
| 10,254
| 47.601896
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm_kernel_utils.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cutlass/arch/mma.h>
////////////////////////////////////////////////////////////////////////////////
// Some helper functions
////////////////////////////////////////////////////////////////////////////////
#define DISPATCH_TYPES(tensor, func) \
{ \
if (query.scalar_type() == at::ScalarType::Float) { \
using scalar_t = float; \
func(); \
} else if (query.scalar_type() == at::ScalarType::Half) { \
using scalar_t = cutlass::half_t; \
func(); \
} else if (query.scalar_type() == at::ScalarType::BFloat16) { \
using scalar_t = cutlass::bfloat16_t; \
func(); \
} else { \
TORCH_CHECK(false, "Only fp32, half & bf16 supported at the moment"); \
} \
}
#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \
{ \
if (BOOL_V) { \
constexpr bool BOOL_NAME = true; \
F(); \
} else { \
constexpr bool BOOL_NAME = false; \
F(); \
} \
}
#define DISPATCH_ARCHTAG(CC, func) \
{ \
if (CC >= 80) { \
using ArchTag = cutlass::arch::Sm80; \
func(); \
} else if (CC >= 75) { \
using ArchTag = cutlass::arch::Sm75; \
func(); \
} else if (CC >= 70) { \
using ArchTag = cutlass::arch::Sm70; \
func(); \
} else if (CC >= 50) { \
using ArchTag = cutlass::arch::Sm50; \
func(); \
} else { \
TORCH_CHECK( \
false, \
"Your device is too old. We require compute capability >= 50"); \
} \
}
#define CHECK_NOSPARSE_CONTIGUOUS_CUDA(TENSOR) \
TORCH_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
TORCH_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
TORCH_CHECK(TENSOR.is_contiguous());
#define CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(TENSOR) \
TORCH_CHECK(TENSOR.is_cuda(), #TENSOR " must be a CUDA tensor"); \
TORCH_CHECK(!TENSOR.is_sparse(), #TENSOR " must be a dense tensor"); \
TORCH_CHECK( \
TENSOR.stride(-1) == 1, #TENSOR ": last dimension must be contiguous");
#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \
TORCH_CHECK( \
uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned")
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK( \
B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
namespace gemm_kernel_utils {
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template <typename integer>
constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m) {
return ((n + m - 1) / m) * m;
}
////////////////////////////////////////////////////////////////////////////////
// Determine the type of GEMM we do (TensorCores or not, Shapes ...)
// TODO: Maybe we could rely on Cutlass's DefaultGemm templates
////////////////////////////////////////////////////////////////////////////////
// Fallback to Simt (FMA on cuda cores) if not in a special case below
template <typename ArchTag, typename scalar_t_, typename Enable = void>
struct DefaultGemmType {
static constexpr int ThreadK = 8;
static constexpr int WarpK = 8;
static constexpr int kMinimumAlignment = 1;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using OpClass = cutlass::arch::OpClassSimt;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f32
template <typename ArchTag>
struct DefaultGemmType<
ArchTag,
float,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 80>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAddFastF32;
};
// Specialization for tensorcores with f16/bf16 - Sm75+
template <typename ArchTag, typename scalar_t>
struct DefaultGemmType<
ArchTag,
scalar_t,
typename cutlass::platform::enable_if<
ArchTag::kMinComputeCapability >= 75 &&
cutlass::sizeof_bits<scalar_t>::value == 16>::type> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 4;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Specialization for tensorcores with f16 - Volta
template <>
struct DefaultGemmType<cutlass::arch::Sm70, cutlass::half_t, void> {
static constexpr int ThreadK = 32;
static constexpr int WarpK = 32;
static constexpr int kMinimumAlignment = 2;
using OpClass = cutlass::arch::OpClassTensorOp;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
using Operator = cutlass::arch::OpMultiplyAdd;
};
// Enables to do
// `auto x = kCondition ? fa(arg) : fb(arg)`
// when `fa` and `fb` have different types
template <bool kVal, typename TA, typename TB>
struct call_conditional;
template <typename TA, typename TB>
struct call_conditional<true, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(ta(arg)) {
return ta(arg);
}
};
template <typename TA, typename TB>
struct call_conditional<false, TA, TB> {
template <typename Arg>
static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg)
-> decltype(tb(arg)) {
return tb(arg);
}
};
////////////////////////////////////////////////////////////////////////////////
// Mark a variable as warp-uniform - enables some compiler optimizations
// The cheapest way to do it is just to broadcast it from lane 0
////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_DEVICE T warp_uniform(T value) {
struct {
union {
T value;
uint32_t asInt;
};
} p;
p.value = value;
p.asInt = __shfl_sync(0xffffffff, (unsigned)p.asInt, 0);
return p.value;
}
template <typename T>
CUTLASS_DEVICE T* warp_uniform(T* ptr) {
struct {
union {
T* ptr;
uint32_t asInt[2];
};
} p;
p.ptr = ptr;
p.asInt[0] = warp_uniform(p.asInt[0]);
p.asInt[1] = warp_uniform(p.asInt[1]);
return p.ptr;
}
} // namespace gemm_kernel_utils
| 8,608
| 39.995238
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <ATen/core/TensorBody.h>
#include <c10/util/Exception.h>
#include <cutlass/bfloat16.h>
#include <cutlass/half.h>
/**
* kernels expect 4D bias/bias.grad with shape
* (batch_sz, n_heads, n_queries, n_keys). common bias shapes users may pass
* are:
* - (n_queries, n_keys)
* - (batch_sz * n_heads, n_queries, n_keys)
* - (batch_sz, n_heads, n_queries, n_keys)
*
* expand the bias as needed - be careful to only create a view with different
* shape/strides, no copies allowed.
*/
inline at::Tensor get_bias_4d_view(
const at::Tensor& bias,
int batch_sz,
int n_heads,
int n_queries,
int n_keys) {
TORCH_CHECK(
bias.size(-2) == n_queries,
"bias.size(-2) != n_queries: ",
bias.size(-2),
" != ",
n_queries);
TORCH_CHECK(
bias.size(-1) == n_keys,
"bias.size(-1) != n_keys: ",
bias.size(-1),
" != ",
n_keys);
switch (bias.dim()) {
case 2: // (n_queries, n_keys) - broadcast across all batches and heads
return bias.unsqueeze(0).unsqueeze(0).expand(
{batch_sz, n_heads, n_queries, n_keys});
case 3: // (batch_sz * n_heads, n_queries, n_keys) - just reshape
TORCH_CHECK(bias.size(0) == batch_sz * n_heads);
return bias.view({batch_sz, n_heads, n_queries, n_keys});
case 4: // (batch_sz, n_heads, n_queries, n_keys) - do nothing
TORCH_CHECK(bias.size(0) == batch_sz);
TORCH_CHECK(bias.size(1) == n_heads)
return bias;
default:
TORCH_CHECK(false, "bias can only have ndims in {2, 3, 4}");
}
}
template <typename scalar_t>
struct CutlassToAtenDtype;
template <>
struct CutlassToAtenDtype<cutlass::half_t> {
using scalar_t = cutlass::half_t;
static constexpr __host__ at::ScalarType atScalarType() {
return at::ScalarType::Half;
}
};
template <>
struct CutlassToAtenDtype<cutlass::bfloat16_t> {
using scalar_t = cutlass::bfloat16_t;
static constexpr __host__ at::ScalarType atScalarType() {
return at::ScalarType::BFloat16;
}
};
template <>
struct CutlassToAtenDtype<float> {
using scalar_t = float;
static constexpr __host__ at::ScalarType atScalarType() {
return at::ScalarType::Float;
}
};
| 2,425
| 25.955556
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/epilogue/epilogue_pipelined.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
File copied from <cutlass/epilogue/threadblock/epilogue.h>
then modified to:
(1) load 2 source fragments at the same time (pipelining)
(2) support reading from a different dtype
(3) pass the row id to the OutputOp if it takes it
(see MemoryEfficientAttentionNormalize)
Note that in general the fragment passed to the OutputOp could
span multiple rows but it does not happen with the configurations we have
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <cassert>
#endif
#include <ATen/cuda/CUDAContext.h>
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/layout/vector.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_coord.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/regular_tile_iterator.h>
#include <cutlass/epilogue/threadblock/epilogue_base.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
#include <cutlass/numeric_types.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
template <typename Op>
struct ApplyEpilogueOp {
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum,
typename Op::FragmentOutput const& source) {
return output_op(accum, source);
}
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum) {
return output_op(accum);
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept:
///< gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator writing output tensors
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting
///< accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing
///< accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading
///< from SMEM
typename OutputOp_, ///< Output operator
typename Padding_, ///< Padding added to SMEM allocation to avoid bank
///< conflicts (concept: MatrixShape)
int FragmentsPerPartition =
1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is
///< large
(!IsEpilogueFunctorHeavy<OutputOp_>::value),
typename OutputTileSourceIterator_ =
OutputTileIterator_ ///< Tile iterator reading tensors
>
class EpiloguePipelined : public EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<
Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using OutputTileSourceIterator = OutputTileSourceIterator_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
using ElementSource = typename OutputTileSourceIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef =
typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType = Array<
typename OutputTileIterator::Element,
OutputTileIterator::kElementsPerAccess>;
using SourceAccessType = Array<
typename OutputTileSourceIterator::Element,
OutputTileSourceIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType = Array<
typename WarpTileIterator::Element,
OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1
? Base::kFragmentsPerIteration
: kPartitionsK;
static int constexpr kSmemPointerOffset =
Base::SharedStorage::StorageShape::kCount / kSmemTiles;
public:
static_assert(
OutputTileSourceIterator::Fragment::kElements ==
OutputTileIterator::Fragment::kElements,
"Mismatch between input tile and output tile iterator (kElements)");
static_assert(
OutputTileSourceIterator::kIterations == OutputTileIterator::kIterations,
"Mismatch between input tile and output tile iterator (kIterations)");
static_assert(
SharedLoadIterator::Fragment::kElements ==
OutputTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(
OutputTileIterator::kElementsPerAccess,
"OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(
!(OutputTileIterator::Fragment::kElements %
OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
public:
/// Constructor
CUTLASS_DEVICE
EpiloguePipelined(
typename Base::SharedStorage& shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
)
: Base(shared_storage, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.reference(), thread_idx) {}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const& output_op, ///< Output operator
OutputTileIterator
destination_iterator, ///< Tile iterator for destination
AccumulatorTile const&
accumulators, ///< Complete warp-level accumulator tile
OutputTileSourceIterator
source_iterator) { ///< Threadblock tile coordinate in GEMM (in units
///< of threadblock tiles)
if (!output_op.is_source_needed()) {
compute_source_not_needed_(output_op, destination_iterator, accumulators);
} else {
compute_source_needed_(
output_op, destination_iterator, accumulators, source_iterator);
}
}
CUTLASS_DEVICE
void operator()(
OutputOp const& output_op, ///< Output operator
OutputTileIterator
destination_iterator, ///< Tile iterator for destination
AccumulatorTile const&
accumulators) { ///< Complete warp-level accumulator tile
compute_source_not_needed_(output_op, destination_iterator, accumulators);
}
private:
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(
AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(
kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(
size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator) {
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(
iterator_begin, warp_tile_iterator),
0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
static_assert(
kPartitionsK == 1 || Base::kFragmentsPerIteration == 1,
"One of these must be exactly 1.");
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const& output_op, ///< Output operator
OutputTileIterator
destination_iterator, ///< Tile iterator for destination
AccumulatorTile const&
accumulators ///< Complete warp-level accumulator tile
) {
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll( \
IterationsUnroll \
? OutputTileIterator::kIterations / Base::kFragmentsPerIteration \
: 1)
for (int iter = 0; iter < OutputTileIterator::kIterations;
iter += Base::kFragmentsPerIteration) {
//
// Convert and store fragment
//
__syncthreads();
acc2smem_source_not_needed<cutlass::make_index_sequence<
OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::
push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename SharedLoadIterator::Fragment
aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
if (p < Base::kFragmentsPerIteration - 1) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
} else if (kPartitionsK > 1) {
plus<typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(
aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset(
(1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator_source_not_needed_(
destination_iterator.thread_start_row(),
output_fragment,
output_op,
aligned_accum_fragment[0]);
//
// Store the final result
//
destination_iterator.store(output_fragment);
++destination_iterator;
}
if (Base::kFragmentsPerIteration > 1) {
shared_load_iterator_.add_pointer_offset(
kSmemPointerOffset * (1 - Base::kFragmentsPerIteration));
}
}
}
template <class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(
AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(
size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator) {
int dummy[] = {
(pos == Seq) &&
(helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const& output_op, ///< Output operator
OutputTileIterator
destination_iterator, ///< Tile iterator for destination
AccumulatorTile const&
accumulators, ///< Complete warp-level accumulator tile
OutputTileSourceIterator
source_iterator ///< Threadblock tile coordinate in GEMM (in units of
///< threadblock tiles)
) {
typename OutputTileSourceIterator::Fragment source_fragment[2];
source_fragment[0].clear();
source_iterator.load(source_fragment[0]);
++source_iterator;
source_fragment[1].clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
if (iter > 0) {
__syncthreads();
}
//
// Load the source for next iteration (pipelining)
//
if (iter + 1 < OutputTileIterator::kIterations) {
source_iterator.load(source_fragment[(iter + 1) % 2]);
}
++source_iterator;
acc2smem_source_needed<
cutlass::make_index_sequence<OutputTileIterator::kIterations>>::
push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment
aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
// If the number of k-slices is > 1 - perform a reduction amongst the
// k-slices
if (kPartitionsK > 1) {
plus<typename SharedLoadIterator::Fragment> add_fragments;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kPartitionsK; ++i) {
shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
shared_load_iterator_.load(aligned_accum_fragment[i]);
aligned_accum_fragment[0] = add_fragments(
aligned_accum_fragment[0], aligned_accum_fragment[i]);
}
shared_load_iterator_.add_pointer_offset(
(1 - kPartitionsK) * kSmemPointerOffset);
}
//
// Compute the output result
//
typename OutputTileIterator::Fragment output_fragment;
apply_output_operator_(
destination_iterator.thread_start_row(),
output_fragment,
output_op,
aligned_accum_fragment[0],
source_fragment[iter % 2]);
//
// Store the final result
//
destination_iterator.store(output_fragment);
++destination_iterator;
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(
int begin_row,
typename OutputTileIterator::Fragment& output_fragment,
OutputOp const& output_op, ///< Output operator
typename SharedLoadIterator::Fragment const& aligned_accum_fragment,
typename OutputTileSourceIterator::Fragment const& source_fragment) {
OutputAccessType* output_frag_ptr =
reinterpret_cast<OutputAccessType*>(&output_fragment);
AccumulatorAccessType const* compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
SourceAccessType const* source_frag_ptr =
reinterpret_cast<SourceAccessType const*>(&source_fragment);
int const kOutputOpIterations = OutputTileIterator::Fragment::kElements /
OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
output_op,
begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
compute_frag_ptr[i],
source_frag_ptr[i]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
int begin_row,
typename OutputTileIterator::Fragment& output_fragment,
OutputOp const& output_op, ///< Output operator
typename SharedLoadIterator::Fragment const& aligned_accum_fragment) {
OutputAccessType* output_frag_ptr =
reinterpret_cast<OutputAccessType*>(&output_fragment);
AccumulatorAccessType const* compute_frag_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
int const kOutputOpIterations = OutputTileIterator::Fragment::kElements /
OutputTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
// Call the output operator
output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
output_op,
begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
compute_frag_ptr[i]);
}
}
// This should be constexpr, but it's only supported on c++14
constexpr int CUTLASS_HOST_DEVICE getRowOffset(int i) {
using ThreadMap = typename OutputTileIterator::ThreadMap;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
int frag_idx = ThreadMap::kElementsPerAccess *
(frag_row_idx * ThreadMap::Iterations::kColumn + column);
if (i < frag_idx + ThreadMap::kElementsPerAccess) {
return row_offset;
}
}
}
}
}
return -1;
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 22,387
| 34.256693
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/epilogue/epilogue_rescale_output.h
|
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory
to match canonical tensor layouts in global memory. Epilogues support
conversion and reduction operations.
This is a copy of cutlass/epilogue/threadblock/epilogue.h that can
handle "row_id" as a first argument, as uses it to get the corresponding
`m_prime` / `s_prime` to rescale the output.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#else
#include <cassert>
#endif
#include <ATen/cuda/CUDAContext.h>
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/layout/vector.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_coord.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/regular_tile_iterator.h>
#include <cutlass/epilogue/threadblock/epilogue_base.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
#include <cutlass/numeric_types.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/thread/scale_type.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/epilogue/epilogue_pipelined.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies a linear combination operator to an array of elements.
// output <- alpha * accumulator + beta * source
// with:
// alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise)
// beta = alpha / m_prime (renormalize the output when the max changes)
// source is the current output
template <
typename ElementOutput_, ///< Data type used to store tensors
typename ElementSource_, //< Data type for source (usually matches
//`ElementOutput`)
int Count, ///< Number of elements computed per operation.
///< Usually it is 128/sizeof_bits<ElementOutput_>,
///< but we use 64 or 32 sometimes when there are not enough data
///< to store
typename ElementAccumulator_, ///< Accumulator data type
typename ElementCompute_, ///< Data type used to compute linear combination
bool isFirst,
bool isLast,
typename FragmentAlphaBeta_,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
class MemoryEfficientAttentionNormalize {
public:
using ElementOutput = ElementOutput_;
using ElementSource = ElementSource_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
static int const kCount = Count;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentSource = Array<ElementSource, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kCount>;
using ComputeFragment = Array<ElementCompute, kCount>;
using FragmentAlphaBeta = FragmentAlphaBeta_;
static FloatRoundStyle const kRound = Round;
private:
//
// Data members
//
FragmentAlphaBeta const& s_prime_;
FragmentAlphaBeta const& m_prime_;
public:
/// Constructs the function object, possibly loading from pointers in host
/// memory
CUTLASS_HOST_DEVICE
MemoryEfficientAttentionNormalize(
FragmentAlphaBeta const& s_prime,
FragmentAlphaBeta const& m_prime)
: s_prime_(s_prime), m_prime_(m_prime) {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return !isFirst;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Computes linear scaling: D = alpha * accumulator + beta * source
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
int row,
FragmentAccumulator const& accumulator,
FragmentSource const& source) const {
assert(!isFirst);
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementSource, kCount, Round>
source_converter;
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
ComputeFragment converted_source = source_converter(source);
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
// Perform binary operations
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_add_source;
multiply_add<ComputeFragment> mul_add_accumulator;
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
ElementCompute beta = alpha * m_prime_[row];
intermediate = mul_add_source(beta, converted_source); // X = beta * C
intermediate = mul_add_accumulator(
alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
return destination_converter(intermediate);
}
/// Computes linear scaling: D = alpha * accumulator
CUTLASS_HOST_DEVICE
FragmentOutput operator()(int row, FragmentAccumulator const& accumulator)
const {
assert(isFirst);
// Convert source to interal compute numeric type
NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
accumulator_converter;
// Convert to destination numeric type
NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round>
destination_converter;
ComputeFragment converted_accumulator = accumulator_converter(accumulator);
ComputeFragment intermediate;
multiplies<ComputeFragment> mul_accumulator;
ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
intermediate = mul_accumulator(
alpha, converted_accumulator); // X = alpha * C + uniform
return destination_converter(intermediate);
}
};
} // namespace thread
namespace threadblock {
template <
typename EO,
typename ES,
int Count,
typename EA,
typename EC,
bool F,
bool L,
typename FAB,
FloatRoundStyle R>
struct ApplyEpilogueOp<thread::MemoryEfficientAttentionNormalize<
EO,
ES,
Count,
EA,
EC,
F,
L,
FAB,
R>> {
using Op = thread::
MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>;
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum,
typename Op::FragmentSource const& source) {
return output_op(row_id, accum, source);
}
static CUTLASS_DEVICE typename Op::FragmentOutput apply(
Op const& output_op,
int row_id,
typename Op::FragmentAccumulator const& accum) {
return output_op(row_id, accum);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 7,426
| 30.604255
| 97
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/epilogue/epilogue_thread_apply_logsumexp.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Functor performing linear combination operations used by epilogues.
*/
#pragma once
#include <ATen/cuda/CUDAContext.h>
#include <cuda_fp16.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/thread/activation.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element, int ElementsPerAccess>
struct ArrayExponential {
CUTLASS_HOST_DEVICE
Array<Element, ElementsPerAccess> operator()(
Array<Element, ElementsPerAccess> const& input) const {
Array<Element, ElementsPerAccess> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ElementsPerAccess; ++i) {
result[i] = expf(input[i]);
}
return result;
}
};
template <int ElementsPerAccess>
struct ArrayExponential<half_t, ElementsPerAccess> {
CUTLASS_DEVICE
Array<half_t, ElementsPerAccess> operator()(
Array<half_t, ElementsPerAccess> const& input) const {
Array<half_t, ElementsPerAccess> result;
int const kVectorCount = ElementsPerAccess / 2;
__half2 const* input_ptr =
reinterpret_cast<__half2 const*>(input.raw_data());
__half2* res_ptr = reinterpret_cast<__half2*>(result.raw_data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kVectorCount; ++i) {
res_ptr[i] = h2exp(input_ptr[i]);
}
return result;
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Applies:
/// output <- (input - lse).exp()
template <
typename ElementOutput_, // output
typename ElementLSE_, // accumulator from LSE
typename ElementAccumulator_, // accumulator from matmul
typename ElementCompute_, // intermediate compute (and exp calculation)
int ElementsPerAccess>
class ApplyLogSumExp {
public:
using ElementOutput = ElementOutput_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementLSE = ElementLSE_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
static const ScaleType::Kind kScale =
cutlass::epilogue::thread::ScaleType::NoBetaScaling;
using FragmentOutput = Array<ElementOutput, kCount>;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentLSE = Array<ElementLSE, kElementsPerAccess>;
using FragmentScaleBias = FragmentLSE; // Used by epilogue_smem_accumulator.h
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
ApplyLogSumExp() {}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const {
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
CUTLASS_HOST_DEVICE
FragmentOutput operator()(
FragmentAccumulator const& AB,
FragmentLSE const& scale_unused,
// bias used as LSE
FragmentLSE const& bias) const {
FragmentCompute frag_AB = NumericArrayConverter<
ElementCompute,
ElementAccumulator,
kElementsPerAccess>()(AB);
FragmentCompute frag_lse_compute =
NumericArrayConverter<ElementCompute, ElementLSE, kElementsPerAccess>()(
bias);
FragmentCompute frag_compute;
minus<FragmentCompute> minus_lse;
detail::ArrayExponential<ElementCompute, kElementsPerAccess> apply_exp;
frag_compute = minus_lse(frag_AB, frag_lse_compute);
frag_compute = apply_exp(frag_compute);
return NumericArrayConverter<
ElementOutput,
ElementCompute,
kElementsPerAccess>()(frag_compute);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,147
| 33.539326
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma_multistage.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma_pipelined.h>
#include <cutlass/gemm/threadblock/mma_multistage.h>
#include <cutlass/gemm/threadblock/mma_pipelined.h>
template <typename Mma, int kMaxK>
struct MakeCustomMma;
template <
typename Shape,
typename IteratorA,
typename SmemIteratorA,
cutlass::arch::CacheOperation::Kind CacheOpA,
typename IteratorB,
typename SmemIteratorB,
cutlass::arch::CacheOperation::Kind CacheOpB,
typename ElementC,
typename LayoutC,
typename Policy,
int Stages,
cutlass::gemm::SharedMemoryClearOption SharedMemoryClear,
int kMaxK>
struct MakeCustomMma<
cutlass::gemm::threadblock::MmaMultistage<
Shape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
ElementC,
LayoutC,
Policy,
Stages,
SharedMemoryClear>,
kMaxK> {
// Reduce the number of stages if we don't need that many
static int constexpr kStages =
kMaxK == cutlass::platform::numeric_limits<int>::max()
? Stages
: cutlass::const_min(
Stages,
(kMaxK + int(Shape::kK) - 1) / int(Shape::kK));
using Mma = cutlass::gemm::threadblock::CustomMmaMultistage<
Shape,
IteratorA,
SmemIteratorA,
CacheOpA,
IteratorB,
SmemIteratorB,
CacheOpB,
ElementC,
LayoutC,
Policy,
kStages,
SharedMemoryClear,
kMaxK>;
};
template <
typename Shape,
typename IteratorA,
typename SmemIteratorA,
typename IteratorB,
typename SmemIteratorB,
typename ElementC,
typename LayoutC,
typename Policy,
int kMaxK>
struct MakeCustomMma<
cutlass::gemm::threadblock::MmaPipelined<
Shape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
Policy>,
kMaxK> {
using Mma = cutlass::gemm::threadblock::CustomMmaPipelined<
Shape,
IteratorA,
SmemIteratorA,
IteratorB,
SmemIteratorB,
ElementC,
LayoutC,
Policy>;
};
| 2,491
| 23.673267
| 87
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma_base.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include <cutlass/aligned_buffer.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/gemm/threadblock/mma_base.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Number of stages,
int Stages,
/// Used for partial specialization
typename Enable = bool>
class CustomMmaBase {
public:
///< Size of the Gemm problem - concept: gemm::GemmShape<>
using Shape = Shape_;
///< Policy describing tuning details
using Policy = Policy_;
//
// Dependent types
//
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Shape describing the overall GEMM computed from shared memory
/// by each warp.
using WarpGemm = typename Policy::Operator::Shape;
/// Shape describing the number of warps filling the CTA
using WarpCount = GemmShape<
Shape::kM / WarpGemm::kM,
Shape::kN / WarpGemm::kN,
Shape::kK / WarpGemm::kK>;
/// Number of warp-level GEMM oeprations
static int const kWarpGemmIterations =
(WarpGemm::kK / Operator::Policy::MmaShape::kK);
/// Number of stages
static int const kStages = Stages;
//
// Nested structs
//
/// Shared storage object needed by threadblock-scoped GEMM
template <typename Element, typename OperandShape, typename OperandLayout>
struct OperandSharedStorage {
AlignedBuffer<Element, OperandShape::kCount> buffer;
using TensorRef = TensorRef<Element, OperandLayout>;
CUTLASS_DEVICE
static OperandLayout Layout() {
return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn});
}
/// Returns a TensorRef to the operand
CUTLASS_HOST_DEVICE
TensorRef ref() {
return TensorRef{buffer.data(), Layout()};
}
};
/// Shape of the A matrix operand in shared memory
using ShapeA = MatrixShape<
Shape::kM + Policy::SmemPaddingA::kRow,
Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
/// Shape of the B matrix operand in shared memory
using ShapeB = MatrixShape<
Shape::kK * kStages + Policy::SmemPaddingB::kRow,
Shape::kN + Policy::SmemPaddingB::kColumn>;
using SharedStorageA = OperandSharedStorage<
typename Operator::ElementA,
ShapeA,
typename Operator::LayoutA>;
using SharedStorageB = OperandSharedStorage<
typename Operator::ElementB,
ShapeB,
typename Operator::LayoutB>;
using TensorRefA = typename SharedStorageA::TensorRef;
using TensorRefB = typename SharedStorageB::TensorRef;
struct SharedStorage {
/// Buffer for A operand
SharedStorageA operand_A;
/// Buffer for B operand
SharedStorageB operand_B;
};
protected:
//
// Data members
//
/// Iterator to load a warp-scoped tile of A operand from shared memory
typename Operator::IteratorA warp_tile_iterator_A_;
/// Iterator to load a warp-scoped tile of B operand from shared memory
typename Operator::IteratorB warp_tile_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
CustomMmaBase(
///< Shared storage needed for internal use by threadblock-scoped GEMM
SharedStorageA& shared_storageA,
SharedStorageB& shared_storageB,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: warp_tile_iterator_A_(shared_storageA.ref(), lane_idx),
warp_tile_iterator_B_(shared_storageB.ref(), lane_idx) {}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,241
| 32.923913
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma_pipelined.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Template for a double-buffered threadblock-scoped GEMM kernel.
*/
#pragma once
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/gemm.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/gemm/custom_mma_base.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math
/// instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Iterates over tiles of A operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorA_,
/// Iterates over tiles of A operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorA_,
/// Iterates over tiles of B operand in global memory
// (concept: ReadableTileIterator | ForwardTileIterator |
// MaskedTileIterator)
typename IteratorB_,
/// Iterates over tiles of B operand in shared memory
/// (concept: WriteableTileIterator | RandomAccessTileIterator)
typename SmemIteratorB_,
/// Data type of accumulator matrix
typename ElementC_,
/// Data type of accumulator matrix
typename LayoutC_,
/// Policy describing tuning details (concept: MmaPolicy)
typename Policy_,
/// Transformation applied to A operand
typename TransformA_ = NumericArrayConverter<
typename SmemIteratorA_::Element,
typename IteratorA_::Element,
IteratorA_::Fragment::kElements>,
///
/// Transformation applied to B operand
typename TransformB_ = NumericArrayConverter<
typename SmemIteratorB_::Element,
typename IteratorB_::Element,
IteratorB_::Fragment::kElements>,
/// Used for partial specialization
typename Enable = bool>
class CustomMmaPipelined : public CustomMmaBase<Shape_, Policy_, 2> {
public:
///< Base class
using Base = CustomMmaBase<Shape_, Policy_, 2>;
using Shape =
Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
using IteratorA =
IteratorA_; ///< Iterates over tiles of A operand in global memory
using IteratorB =
IteratorB_; ///< Iterates over tiles of B operand in global memory
using ElementC = ElementC_; ///< Data type of accumulator matrix
using LayoutC = LayoutC_; ///< Layout of accumulator matrix
using Policy = Policy_; ///< Policy describing tuning details
using SmemIteratorA = SmemIteratorA_;
using SmemIteratorB = SmemIteratorB_;
using TransformA = TransformA_;
using TransformB = TransformB_;
//
// Dependent types
//
/// Fragment of operand A loaded from global memory
using FragmentA = typename IteratorA::Fragment;
/// Fragment of operand B loaded from global memory
using FragmentB = typename IteratorB::Fragment;
/// Fragment of accumulator tile
using FragmentC = typename Policy::Operator::FragmentC;
/// Warp-level Mma
using Operator = typename Policy::Operator;
/// Obtain the arch tag from the warp-level operator
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = Operator::kTransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = Operator::kTransformB;
// staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
static_assert(
(Base::kStages == 2),
"MmaPipelined requires kStages set to value 2");
static bool const kSmemContainsEntireMat = false;
private:
using WarpFragmentA = typename Operator::FragmentA;
using WarpFragmentB = typename Operator::FragmentB;
protected:
/// Iterator to write threadblock-scoped tile of A operand to shared memory
SmemIteratorA smem_iterator_A_;
/// Iterator to write threadblock-scoped tile of B operand to shared memory
SmemIteratorB smem_iterator_B_;
public:
/// Construct from tensor references
CUTLASS_DEVICE
CustomMmaPipelined(
typename Base::SharedStorageA& shared_storageA,
typename Base::SharedStorageB& shared_storageB,
int thread_idx, ///< ID within the threadblock
int warp_idx, ///< ID of warp
int lane_idx ///< ID of each thread within a warp
)
: Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx),
smem_iterator_A_(shared_storageA.ref(), thread_idx),
smem_iterator_B_(shared_storageB.ref(), thread_idx) {
// Compute warp location within threadblock tile by mapping the warp_id to
// three coordinates:
// _m: the warp's position within the threadblock along the M dimension
// _n: the warp's position within the threadblock along the N dimension
// _k: the warp's position within the threadblock along the K dimension
int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
// Add per-warp offsets in units of warp-level tiles
this->warp_tile_iterator_A_.add_tile_offset(
{warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
this->warp_tile_iterator_B_.add_tile_offset(
{Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
}
CUTLASS_DEVICE
CustomMmaPipelined(
///< Shared storage needed for internal use by threadblock-scoped GEMM
typename Base::SharedStorage& st,
///< ID within the threadblock
int thread_idx,
///< ID of warp
int warp_idx,
///< ID of each thread within a warp
int lane_idx)
: CustomMmaPipelined(
st.operand_A,
st.operand_B,
thread_idx,
warp_idx,
lane_idx) {}
CUTLASS_DEVICE
bool set_prologue_done(bool value) {
// NOT IMPLEMENTED FOR PIPELINED
}
CUTLASS_DEVICE
bool set_zero_outside_bounds(bool value) {
// NOT NEEDED FOR PIPELINED
// shared memory will always be zero-filled
}
template <bool kLoadA = true, bool kLoadB = true>
CUTLASS_DEVICE static void prologue(
typename Base::SharedStorage& shared_storage,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
int thread_idx,
int problem_size_k) {
prologue<kLoadA, kLoadB>(
shared_storage.operand_A,
shared_storage.operand_B,
iterator_A,
iterator_B,
thread_idx,
problem_size_k);
}
template <bool kLoadA = true, bool kLoadB = true>
CUTLASS_DEVICE static void prologue(
typename Base::SharedStorageA& shared_storageA,
typename Base::SharedStorageB& shared_storageB,
///< iterator over A operand in global memory
IteratorA iterator_A,
///< iterator over B operand in global memory
IteratorB iterator_B,
int thread_idx,
int problem_size_k) {
// NOT IMPLEMENTED FOR PIPELINED
}
/// Perform a threadblock-scoped matrix multiply-accumulate
CUTLASS_DEVICE
void operator()(
int gemm_k_iterations, ///< number of iterations of the mainloop
FragmentC& accum, ///< destination accumulator tile
IteratorA iterator_A, ///< iterator over A operand in global memory
IteratorB iterator_B, ///< iterator over B operand in global memory
FragmentC const& src_accum, ///< source accumulator tile
TransformA transform_A =
TransformA(), ///< transformation applied to A fragment
TransformB transform_B =
TransformB()) { ///< transformation applied to B fragment
//
// Prologue
//
// Perform accumulation in the 'd' output operand
accum = src_accum;
FragmentA tb_frag_A;
FragmentB tb_frag_B;
tb_frag_A.clear();
tb_frag_B.clear();
// The last kblock is loaded in the prolog
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
++this->smem_iterator_A_;
++this->smem_iterator_B_;
__syncthreads();
// Pair of fragments used to overlap shared memory loads and math
// instructions
WarpFragmentA warp_frag_A[2];
WarpFragmentB warp_frag_B[2];
this->warp_tile_iterator_A_.set_kgroup_index(0);
this->warp_tile_iterator_B_.set_kgroup_index(0);
this->warp_tile_iterator_A_.load(warp_frag_A[0]);
this->warp_tile_iterator_B_.load(warp_frag_B[0]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
Operator warp_mma;
int smem_write_stage_idx = 1;
// Avoid reading out of bounds
iterator_A.clear_mask(gemm_k_iterations <= 1);
iterator_B.clear_mask(gemm_k_iterations <= 1);
// Issue loads during the first warp-level matrix multiply-add *AFTER*
// issuing shared memory loads (which have the tighest latency requirement).
//
// Mainloop
//
// Note: The main loop does not support Base::kWarpGemmIterations == 2.
CUTLASS_GEMM_LOOP
for (; gemm_k_iterations > 0; --gemm_k_iterations) {
//
// Loop over GEMM K dimension
//
CUTLASS_PRAGMA_UNROLL
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations;
++warp_mma_k) {
// Load warp-level tiles from shared memory, wrapping to k offset if
// this is the last group as the case may be.
if (warp_mma_k == Base::kWarpGemmIterations - 1) {
// Write fragments to shared memory
this->smem_iterator_A_.store(transform_A(tb_frag_A));
this->smem_iterator_B_.store(transform_B(tb_frag_B));
__syncthreads();
++this->smem_iterator_A_;
++this->smem_iterator_B_;
// Add negative offsets to return iterators to the 'start' of the
// circular buffer in shared memory
if (smem_write_stage_idx == 1) {
this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
} else {
this->warp_tile_iterator_A_.add_tile_offset(
{0,
-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations});
this->warp_tile_iterator_B_.add_tile_offset(
{-Base::kStages * Policy::kPartitionsK *
Base::kWarpGemmIterations,
0});
}
smem_write_stage_idx ^= 1;
}
this->warp_tile_iterator_A_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_B_.set_kgroup_index(
(warp_mma_k + 1) % Base::kWarpGemmIterations);
this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
++this->warp_tile_iterator_A_;
++this->warp_tile_iterator_B_;
if (warp_mma_k == 0) {
iterator_A.load(tb_frag_A);
iterator_B.load(tb_frag_B);
++iterator_A;
++iterator_B;
// Avoid reading out of bounds if this was the last loop iteration
iterator_A.clear_mask(gemm_k_iterations <= 2);
iterator_B.clear_mask(gemm_k_iterations <= 2);
}
warp_mma(
accum,
warp_frag_A[warp_mma_k % 2],
warp_frag_B[warp_mma_k % 2],
accum);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,144
| 34.099256
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm/find_default_mma.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
/*! \file
\brief Cutlass provides helper template functions to figure out the right
data structures to instantiate to run a GEMM with various parameters (see
`cutlass/gemm/threadblock/default_mma.h`). However, due to template
instantiation priority rules, it will only create an MmaMultiStage with
kStages=3 (otherwise creates an MmePipelined - which is not compatible with
FastF32). kStages=3 uses too much shared memory and we want to use kStages=2,
so we just copy-pasted some code from `default_mma.h` and
`default_mma_core.h` files and wrapped this template to allow our use case.
This is really only for the FastF32 case - aka using TensorCores with fp32.
*/
#pragma once
#include <cutlass/gemm/threadblock/default_mma.h>
#include <cutlass/gemm/threadblock/default_mma_core_simt.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm70.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm75.h>
#include <cutlass/gemm/threadblock/default_mma_core_sm80.h>
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
/// Element type for A matrix operand
typename ElementA,
/// Layout type for A matrix operand
typename LayoutA,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB,
/// Layout type for B matrix operand
typename LayoutB,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Layout type for C and D matrix operand
typename LayoutC,
/// Operator class tag
typename OperatorClass,
/// Tag indicating architecture to tune for
typename ArchTag,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
/// Number of stages used in the pipelined mainloop
int Stages,
/// Operation perfomed by GEMM
typename Operator,
typename Enable_ = void>
struct FindDefaultMma {
static constexpr bool AccumulatorsInRowMajor = false;
static constexpr SharedMemoryClearOption SharedMemoryClear =
SharedMemoryClearOption::kNone;
using DefaultMma = cutlass::gemm::threadblock::DefaultMma<
ElementA,
LayoutA,
kAlignmentA,
ElementB,
LayoutB,
kAlignmentB,
ElementAccumulator,
LayoutC,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
Stages,
Operator,
AccumulatorsInRowMajor,
SharedMemoryClear>;
};
/// Specialization for sm80 / FastF32 / multistage with kStages=2
template <
typename ElementA_,
/// Layout type for A matrix operand
typename LayoutA_,
/// Access granularity of A matrix in units of elements
int kAlignmentA,
typename ElementB_,
/// Layout type for B matrix operand
typename LayoutB_,
/// Access granularity of B matrix in units of elements
int kAlignmentB,
typename ElementAccumulator,
/// Threadblock-level tile size (concept: GemmShape)
typename ThreadblockShape,
/// Warp-level tile size (concept: GemmShape)
typename WarpShape,
/// Instruction-level tile size (concept: GemmShape)
typename InstructionShape,
int kStages,
typename Operator>
struct FindDefaultMma<
ElementA_,
LayoutA_,
kAlignmentA,
ElementB_,
LayoutB_,
kAlignmentB,
ElementAccumulator,
layout::RowMajor,
arch::OpClassTensorOp,
arch::Sm80,
ThreadblockShape,
WarpShape,
InstructionShape,
kStages,
Operator,
typename cutlass::platform::enable_if<(kAlignmentA > 1)>::type> {
using LayoutC = layout::RowMajor;
using OperatorClass = arch::OpClassTensorOp;
using ArchTag = arch::Sm80;
using DefaultMma_ = cutlass::gemm::threadblock::DefaultMma<
ElementA_,
LayoutA_,
kAlignmentA,
ElementB_,
LayoutB_,
kAlignmentB,
ElementAccumulator,
LayoutC,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
3,
Operator>;
struct DefaultMma : DefaultMma_ {
using MmaCore_ = typename DefaultMma_::MmaCore;
// Define the threadblock-scoped multistage matrix multiply
using ThreadblockMma = cutlass::gemm::threadblock::MmaMultistage<
typename MmaCore_::Shape,
typename DefaultMma_::IteratorA,
typename MmaCore_::SmemIteratorA,
MmaCore_::kCacheOpA,
typename DefaultMma_::IteratorB,
typename MmaCore_::SmemIteratorB,
MmaCore_::kCacheOpB,
ElementAccumulator,
LayoutC,
typename MmaCore_::MmaPolicy,
kStages>;
};
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 5,175
| 29.809524
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm/mma_accum_lambda_iterator.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cutlass/functional.h>
#include <cutlass/gemm/warp/mma_simt_tile_iterator.h>
#include <cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h>
#include <cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h>
#include <cutlass/matrix_shape.h>
/*
TensorCores have different accumulator layouts.
This file provides a class to easily map the accumulator
i-th element with the corresponding matrix row/col.
*/
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm80 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
return cutlass::MatrixCoord(
quad + tile_offset.row() * Shape::kRow,
lane_in_quad * kElementsPerAccess +
tile_offset.column() * Shape::kColumn);
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
// See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn +
col + lane_offset.column();
int idx = mma_accum_start + row * kElementsPerAccess + col;
op(accum_m, accum_n, idx);
}
}
endRow(accum_m);
}
}
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
// In each warp, 4 threads will work on the same row
// - the ones with the same `quad`
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1);
myValue = fn(myValue, otherV);
otherV = __shfl_xor_sync(0xffffffff, myValue, 2);
myValue = fn(myValue, otherV);
int lane_in_quad = (lane_id & 3);
return lane_in_quad == 0;
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSm70 {
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
using Policy = typename T::Policy;
using InstructionShape = typename T::InstructionShape;
using OpDelta = typename T::OpDelta;
using Shape = typename T::Shape;
using Element = accum_t;
static int const kElementsPerPartial = 4;
using EleShapePerPatial = typename cutlass::platform::conditional<
cutlass::platform::is_same<Element, float>::value,
cutlass::MatrixShape<2, 2>,
cutlass::MatrixShape<1, 4>>::type;
static int const kElementsPerMma = 8;
static int const kAccumulatorPatials = 2;
using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
int accum_m, accum_n;
if (cutlass::platform::is_same<Element, float>::value) {
// (quad[2],quad[0])+lane_in_quad[0]
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
// (quad[1])+lane_in_quad[1]
accum_n =
((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
(lane_in_quad & 2);
} else {
accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 +
lane_in_quad; // (quad[2],quad[0])
accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
}
return cutlass::MatrixCoord(
accum_m + tile_offset.row() * Shape::kRow,
accum_n + tile_offset.column() * Shape::kColumn);
}
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
static_assert(
cutlass::platform::is_same<Element, float>::value,
"update to support non-float accum");
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16
// T0 & T2 share same line within a quad
auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1);
myValue = fn(myValue, otherV);
// quad 0 and quad 2 are on the same lines
otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3);
myValue = fn(myValue, otherV);
return (lane_id & ((1 << 1) | (1 << 3))) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
int accum_m = tile_m * Policy::InterleavedTile::kRow +
mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn;
++tile_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn;
++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < kAccumulatorPatials; ++p) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
int mma_accum_start =
(((tile_n * Policy::TileIterations::kRow + tile_m) *
Policy::MmaIterations::kColumn +
mma_n) *
Policy::MmaIterations::kRow +
mma_m) *
kElementsPerMma;
int accum_n = tile_n * Policy::InterleavedTile::kColumn +
mma_n * QuadShapePerPatialMma::kColumn +
p * Policy::InterleavedTile::kColumn / 2 + n +
lane_offset.column();
int idx = mma_accum_start + p * kElementsPerPartial +
m * EleShapePerPatial::kColumn + n;
op(accum_m, accum_n, idx);
}
}
}
}
endRow(accum_m);
}
}
}
}
};
template <typename T, typename accum_t, int kWarpSize>
struct AccumLambdaIteratorSimt {
using Policy = typename T::Policy;
using Iterations = typename T::Iterations;
using Element = typename T::Element;
using Delta = typename T::Delta;
using Shape = typename T::Shape;
static_assert(
cutlass::platform::
is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
"only RowMajor is supported");
template <typename DT, typename F>
CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) {
CUTLASS_PRAGMA_UNROLL
for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) {
auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit);
myValue = fn(myValue, otherV);
}
return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0;
}
template <typename FA, typename FB, typename FC>
CUTLASS_DEVICE static void iterateRows(
cutlass::MatrixCoord& lane_offset,
FA beginRow,
FB op,
FC endRow) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
int accum_m = mma_m * Delta::kRow + m + lane_offset.row();
beginRow(accum_m);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
int accum_n =
mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN +
lane_offset.column();
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
int idx = n +
Policy::LaneMmaShape::kN *
(mma_n +
Iterations::kColumn *
(m + mma_m * Policy::LaneMmaShape::kM));
op(accum_m, accum_n + n, idx);
}
}
endRow(accum_m);
}
}
}
static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset(
int8_t lane_id,
int8_t warp_id,
typename T::TensorCoord const& tile_offset) {
static_assert(
cutlass::platform::is_same<
typename Policy::LaneLayout,
cutlass::layout::RowMajorInterleaved<1>>::value,
"");
typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
cutlass::MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
cutlass::MatrixCoord(Policy::LaneMmaShape::kM,
Policy::LaneMmaShape::kN);
return lane_offset +
tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn);
}
};
template <typename T, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator;
// Simt
template <typename S, typename P, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>,
accum_t,
kWarpSize> {
using WarpIterator = typename cutlass::gemm::warp::MmaSimtTileIterator<
S,
cutlass::gemm::Operand::kC,
accum_t,
cutlass::layout::RowMajor,
P,
1,
1>;
using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Volta
template <typename S1, typename S2, typename accum_t, int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
cutlass::MatrixShape<1, 1>>;
using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>;
};
// TensorOp - Sm75+
template <
typename S1,
typename S2,
typename S3,
typename accum_t,
int kWarpSize>
struct DefaultMmaAccumLambdaIterator<
cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>,
accum_t,
kWarpSize> {
using WarpIterator =
typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator<
S1,
accum_t,
cutlass::layout::RowMajor,
S2,
S3>;
using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>;
};
| 12,350
| 33.791549
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/iterators/default_warp_iterator_from_smem.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Instantiates the right WarpIterator to read from shared memory
The class `DefaultWarpIteratorAFromSharedMemory` is useful when reading
data dumped with `B2bGemm::accumToSmem`.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h>
#include <cutlass/platform/platform.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/iterators/warp_iterator_from_smem.h>
namespace cutlass {
namespace gemm {
namespace threadblock {
template <
typename WarpShape,
typename InstructionShape,
typename RegularWarpIterator,
typename Policy,
typename Enable = void>
struct DefaultWarpIteratorAFromSharedMemory {};
// TensorOp - Ampere half
template <typename RegularWarpIterator, typename Policy, int kInstrK>
struct DefaultWarpIteratorAFromSharedMemory<
cutlass::gemm::GemmShape<32, 32, 32>,
cutlass::gemm::GemmShape<16, 8, kInstrK>,
RegularWarpIterator,
Policy,
typename platform::enable_if<(
sizeof_bits<typename RegularWarpIterator::Element>::value == 16 &&
Policy::Operator::Policy::OpDelta::kRow == 1)>::type> {
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpShape = cutlass::MatrixShape<32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<16, 8, kInstrK>;
using WarpIterator = cutlass::gemm::warp::WarpIteratorFromSmem<
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>>;
};
// TensorOp - Ampere f32
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<16, 8, 8>,
RegularWarpIterator,
Policy,
typename platform::enable_if<(
sizeof_bits<typename RegularWarpIterator::Element>::value != 16 ||
Policy::Operator::Policy::OpDelta::kRow != 1)>::type> {
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
static constexpr auto kWarpSize = 32;
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpIterator =
cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
cutlass::MatrixShape<WarpShape::kM, WarpShape::kK>,
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::layout::RowMajor,
cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>,
OpDelta::kRow,
kWarpSize>;
};
// TensorOp - Volta
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<16, 16, 4>,
RegularWarpIterator,
Policy> {
using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>;
static constexpr auto kWarpSize = 32;
using OpDelta = typename Policy::Operator::Policy::OpDelta;
using WarpIterator =
cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator<
cutlass::MatrixShape<32, 32>, // MatrixShape<WarpShape::kM,
// WarpShape::kK>,
cutlass::gemm::Operand::kA,
typename RegularWarpIterator::Element,
cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>,
cutlass::MatrixShape<16, 4>,
OpDelta::kRow,
kWarpSize>;
};
// Simt
template <typename WarpShape, typename RegularWarpIterator, typename Policy>
struct DefaultWarpIteratorAFromSharedMemory<
WarpShape,
cutlass::gemm::GemmShape<1, 1, 1>,
RegularWarpIterator,
Policy> {
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr auto kWarpSize = 32;
// We just use the same iterator, as we reproduced the same shared-memory
// schema. Just modify it to handle non-complete tiles.
using WarpIterator = RegularWarpIterator;
};
} // namespace threadblock
} // namespace gemm
} // namespace cutlass
| 5,827
| 39.472222
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/iterators/epilogue_predicated_tile_iterator.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue iterator that supports prefetching
Mostly copied from <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
*/
#pragma once
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in
/// epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator |
/// ForwardTileIterator
///
template <
typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorPrefetch {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = layout::RowMajor;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(
ThreadMap::Iterations::kRow > 0,
"ThreadMap::Iterations::kRow must be > 0");
static_assert(
ThreadMap::Iterations::kGroup > 0,
"ThreadMap::Iterations::kGroup must be > 0");
static_assert(
ThreadMap::Iterations::kCluster > 0,
"ThreadMap::Iterations::kCluster must be > 0");
static_assert(
ThreadMap::Iterations::kColumn > 0,
"ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<
Element,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()) {}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() {
enable();
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have
/// been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(
sizeof(PredicatedTileIteratorParams::stride) == 8,
"Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorPrefetch(
PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params), indices_(indices) {
TensorCoord thread_offset =
ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) <
extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
}
if (ScatterD && !indices) {
mask_.clear();
}
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
LongIndex(thread_offset.column()) * sizeof(AccessType) /
kElementsPerAccess;
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) /
kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset) {
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
CUTLASS_DEVICE
void prefetch_all() {
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < kIterations; ++iter) {
prefetch();
++(*this);
}
}
CUTLASS_DEVICE
void prefetch() {
uint8_t* byte_pointer = byte_pointer_;
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
// on windows using unsigned long here gives the error
// error: asm operand type size(4) does not match
// type/size implied by constraint 'l'
uint64_t addr = (uint64_t)((void*)&memory_pointer
[column * ThreadMap::Delta::kColumn /
kElementsPerAccess]);
asm volatile("prefetch.global.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) *
LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const {
load_with_byte_offset(frag, 0);
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) *
LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess] =
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn +
column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) {
byte_pointer += params_.increment_row;
}
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const& frag) const {
store_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void downsample_load_with_byte_offset(
Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q;
int64_t byte_offset =
(input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void upsample_load_with_byte_offset(
Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const {
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster;
++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row +
ThreadMap::Iterations::kRow *
(group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow +
group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int row_add_P = add_P;
int row_add_Q = add_Q;
if (output_P > convolution_P - 2)
row_add_P = 0;
if (output_Q > convolution_Q - 2)
row_add_Q = 0;
int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
((output_P + row_add_P) / 2) * (convolution_Q / 2) +
(output_Q + row_add_Q) / 2;
int64_t byte_offset =
(input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer =
reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn;
++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr
[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer
[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) {
byte_pointer += params_.increment_row;
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) {
byte_pointer += params_.increment_group;
}
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
CUTLASS_DEVICE
MatrixCoord thread_start() const {
return MatrixCoord(thread_start_row_, thread_start_column_);
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const {
return thread_start_row_;
}
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const {
return thread_start_column_;
}
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const {
return extent_row_;
}
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const {
return extent_column_;
}
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorPrefetch& operator++() {
++state_[0];
if (!ScatterD) {
byte_pointer_ += params_.advance_row;
}
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ += (ThreadMap::Shape::kGroup - 1) *
ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup *
ThreadMap::Shape::kGroup * ThreadMap::Count::kRow *
ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() {
mask_.clear();
}
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() {
mask_.enable();
}
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const {
mask = mask_;
}
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) {
mask_ = mask;
}
};
template <typename IT>
struct MakePrefetchableIterator {
using Iterator = PredicatedTileIteratorPrefetch<
typename IT::ThreadMap,
typename IT::Element>;
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 23,855
| 30.681275
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/iterators/make_residual_last.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <ATen/native/transformers/cuda/mem_eff_attention/iterators/predicated_tile_access_iterator_residual_last.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/iterators/predicated_tile_iterator_residual_last.h>
namespace cutlass {
namespace transform {
namespace threadblock {
template <typename BaseIterator>
struct MakeIteratorResidualLast;
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
int AccessSize,
bool Gather>
struct MakeIteratorResidualLast<PredicatedTileIterator<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessSize,
Gather>> {
using Iterator = PredicatedTileIteratorResidualLast<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessSize,
Gather>;
};
template <
typename Shape,
typename Element,
typename Layout,
int AdvanceRank,
typename ThreadMap,
typename AccessType,
bool Gather>
struct MakeIteratorResidualLast<PredicatedTileAccessIterator<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessType,
Gather>> {
using Iterator = PredicatedTileAccessIteratorResidualLast<
Shape,
Element,
Layout,
AdvanceRank,
ThreadMap,
AccessType,
Gather>;
};
} // namespace threadblock
} // namespace transform
} // namespace cutlass
| 1,650
| 21.013333
| 116
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/iterators/transpose_warp_iterator.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <ATen/native/transformers/cuda/mem_eff_attention/iterators/warp_iterator_from_smem.h>
template <typename WarpIterator>
struct TransposeWarpIterator {
using Iterator = char;
static bool constexpr kSupportsTranspose = false;
};
template <
/// Operand identity
cutlass::gemm::Operand Operand,
/// Data type of A elements
typename Element,
typename InstructionShape,
bool kTranspose>
struct TransposeWarpIterator<
cutlass::gemm::warp::
WarpIteratorFromSmem<Operand, Element, InstructionShape, kTranspose>> {
using Iterator = cutlass::gemm::warp::
WarpIteratorFromSmem<Operand, Element, InstructionShape, !kTranspose>;
static bool constexpr kSupportsTranspose = true;
};
| 961
| 29.0625
| 94
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/iterators/warp_iterator_from_smem.h
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
*reserved. SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
*ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
*LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
*INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
*CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
*POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Inspired from
"cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM
operands from a RowMajor shared-memory layout into registers to use by A100
TensorCores.
The difference with "mma_tensor_op_tile_access_iterator.h" is that:
(1) We use "ldmatrix" to load tiles, rather than manual loads (slightly
faster) (2) We support to transpose the operand (eg read `A.transpose()` when
the shared memory holds `A`)
This is only implemented for the specific shapes.
*/
#pragma once
#include <cutlass/gemm/gemm.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
template <
/// Operand identity
Operand Operand_,
/// Data type of A elements
typename Element_,
typename InstructionShape_,
bool kTranspose = false>
class WarpIteratorFromSmem {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = cutlass::MatrixShape<32, 32>;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(
kOperand == Operand::kA,
"No support for OperandB at the moment");
/// Basic check
static_assert(
kOperand == Operand::kA || kOperand == Operand::kB,
"WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
static_assert(sizeof_bits<Element>::value == 16, "Only supported for half");
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
static_assert(InstructionShape::kRow == 16, "Only supports 16x8x8 / 16x8x16");
static_assert(
InstructionShape::kColumn == 8 || InstructionShape::kColumn == 16,
"Only supports 16x8x8 / 16x8x16");
/// Delta between *MMA operations (in units of *MMA operations, concept:
/// MatrixShape)
static int const kOpDelta = 1;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Number of elements accessed per Shared Memory load
static int const kElementsPerAccess =
(sizeof_bits<Element>::value >= 32 ? 1
: 32 / sizeof_bits<Element>::value);
using InstructionCount = MatrixShape<
Shape::kRow / InstructionShape::kRow,
Shape::kColumn / InstructionShape::kColumn>;
static int const kIterations = (kOperand == Operand::kA)
? InstructionCount::kColumn
: InstructionCount::kRow;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<
Element,
(kOperand == Operand::kA)
? (Shape::kRow* InstructionShape::kColumn / kThreads)
: (Shape::kColumn* InstructionShape::kRow / kThreads)>;
/// Memory access type
// using AccessType = AlignedArray<Element, kElementsPerAccess>;
using AccessType = Array<unsigned, 4>;
static int constexpr kWarpShapeDivisibleInner =
(kOperand == Operand::kA ? InstructionShape::kColumn
: InstructionShape::kRow);
static int constexpr kAccessesInner =
(kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
// Number of 32bits tiles to load per `ldmatrix`
static int const kTilesPerInstruction = InstructionShape::kRow / 8;
static_assert(kTilesPerInstruction == 2, "Only supports 16x8x16 and 16x8x8");
private:
/// Underlying tensor reference
TensorRef ref_;
/// Origin
MatrixCoord origin_;
/// Iterations in a tile
int iterations_;
public:
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem(TensorRef const& ref, int lane_id)
: WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id) {}
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id)
: ref_(ref), iterations_(0) {
// See also:
// https://docs.nvidia.com/cuda/archive/11.7.1/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-1688
// 16x8x8: kAccessesInner = 1 (1 ldmatrix.x4)
// 16x8x16: kAccessesInner = 2 (2 ldmatrix.x4)
int ldsm_vec_num = (lane_id >> 3);
if (kOperand == Operand::kA) {
origin_ = MatrixCoord(lane_id % 8, 0);
static_assert(
InstructionCount::kRow * kTilesPerInstruction == 4,
"can't use ldmatrix.x4");
int access_m_idx = ldsm_vec_num % kTilesPerInstruction;
int inner_idx = (ldsm_vec_num / kTilesPerInstruction) % kAccessesInner;
int inst_m_idx = ldsm_vec_num / (kTilesPerInstruction * kAccessesInner);
MatrixCoord offset(
access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
inner_idx * 4 * kElementsPerAccess);
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
origin_ += offset;
} else {
// XXX: This is not tested or used
origin_ = MatrixCoord(0, lane_id % 8);
static_assert(InstructionCount::kColumn * kAccessesInner == 4, "");
CUTLASS_PRAGMA_UNROLL
for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn;
++inst_n_idx) {
CUTLASS_PRAGMA_UNROLL
for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
int access_idx = inner_idx + kAccessesInner * inst_n_idx;
MatrixCoord offset(
inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8);
if (access_idx == ldsm_vec_num) {
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
origin_ += offset;
}
}
}
}
ref_.add_coord_offset(origin_);
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset) {
TensorCoord coord_offset(
tile_offset.row() * Shape::kRow, tile_offset.column() * Shape::kColumn);
if (kTranspose) {
coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()};
}
origin_ += coord_offset;
ref_.add_coord_offset(coord_offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
void advance() {
if (kOperand == Operand::kA) {
add_tile_offset({0, 1});
} else {
add_tile_offset({1, 0});
}
iterations_ = 0;
}
/// increase iterations in a tile
CUTLASS_HOST_DEVICE
WarpIteratorFromSmem& operator++() {
iterations_++;
if (iterations_ >= kIterations)
advance();
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_DEVICE
void load(Fragment& frag) const {
AccessType* access_ptr = reinterpret_cast<AccessType*>(&frag);
using LoadLayout = typename platform::
conditional<kTranspose, layout::ColumnMajor, layout::RowMajor>::type;
CUTLASS_PRAGMA_UNROLL
for (int access_m_idx = 0; access_m_idx <
(InstructionCount::kRow * kTilesPerInstruction * kAccessesInner) / 4;
++access_m_idx) {
MatrixCoord offset;
if (kOperand == Operand::kA) {
offset = MatrixCoord(
access_m_idx * 16, iterations_ * InstructionShape::kColumn);
} else {
offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0);
}
if (kTranspose) {
offset = MatrixCoord(offset.column(), offset.row());
}
cutlass::arch::ldsm<LoadLayout, 4>(
access_ptr[access_m_idx], ref_.data() + ref_.offset(offset));
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 10,055
| 34.284211
| 123
|
h
|
null |
pytorch-main/aten/src/ATen/native/transformers/cuda/mem_eff_attention/transform/tile_smem_loader.h
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/pitch_linear.h>
#include <cutlass/numeric_types.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/predicated_tile_iterator.h>
#include <cutlass/transform/threadblock/regular_tile_iterator.h>
template <
typename scalar_t, // scalar type
typename ThreadblockTileShape, // size of tile to load
int Threads, // number of participating threads
int ElementsPerAccess> // thread access width in elements
class TileSmemLoader {
public:
using SmemTile =
cutlass::AlignedBuffer<scalar_t, ThreadblockTileShape::kCount>;
using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
cutlass::layout::PitchLinearShape<
ThreadblockTileShape::kColumn, // contiguous
ThreadblockTileShape::kRow>, // strided
Threads, // Threads
ElementsPerAccess>; // ElementsPerAccess
using GmemTileIterator =
cutlass::transform::threadblock::PredicatedTileIterator<
ThreadblockTileShape, // Shape
scalar_t, // Element
cutlass::layout::RowMajor, // Layout
0, // AdvanceRank
ThreadMap>; // ThreadMap
using SmemTileIterator = cutlass::transform::threadblock::RegularTileIterator<
ThreadblockTileShape, // Shape
scalar_t, // Element
cutlass::layout::RowMajor, // Layout
0, // AdvanceRank
ThreadMap>; // ThreadMap
using Fragment = typename GmemTileIterator::Fragment;
/// load a tile from global memory into shared memory
CUTLASS_DEVICE
static void load(
GmemTileIterator tile_load_iter,
SmemTileIterator tile_store_iter) {
Fragment tb_frag;
tb_frag.clear();
tile_load_iter.load(tb_frag);
tile_store_iter.store(tb_frag);
__syncthreads();
}
};
| 2,152
| 31.134328
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/ufunc/add.h
|
#pragma once
#include <c10/macros/Macros.h>
#if !defined(__CUDACC__) && !defined(__HIPCC__)
#include <ATen/cpu/vec/functional.h>
#include <ATen/cpu/vec/vec.h>
#endif
namespace at {
namespace native {
namespace ufunc {
template <typename T>
C10_HOST_DEVICE C10_ALWAYS_INLINE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
return self + alpha * other;
}
#if !defined(__CUDACC__) && !defined(__HIPCC__)
using vec::Vectorized;
template <typename T>
C10_ALWAYS_INLINE Vectorized<T> add(Vectorized<T> self, Vectorized<T> other, Vectorized<T> alpha) __ubsan_ignore_undefined__ {
return vec::fmadd(other, alpha, self);
}
#endif
}}} // namespace at::native::ufunc
| 680
| 23.321429
| 126
|
h
|
null |
pytorch-main/aten/src/ATen/native/utils/Factory.h
|
#pragma once
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace mobile {
Tensor allocate_padded_contiguous_if_needed(
const Tensor& input,
c10::MemoryFormat memory_format);
// TODO: Remove this function when at::native::empty() is modified to accept a
// custom memory allocator.
at::Tensor empty_with_tail_padding(
IntArrayRef size,
const caffe2::TypeMeta dtype,
c10::MemoryFormat memory_format,
c10::optional<DimnameList> maybe_names);
} // namespace mobile
} // namespace native
} // namespace at
| 553
| 21.16
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/utils/ParamUtils.h
|
#pragma once
#include <c10/util/ArrayRef.h>
#include <vector>
namespace at {
namespace native {
template <typename T>
inline std::vector<T> _expand_param_if_needed(
ArrayRef<T> list_param,
const char* param_name,
int64_t expected_dim) {
if (list_param.size() == 1) {
return std::vector<T>(expected_dim, list_param[0]);
} else if ((int64_t)list_param.size() != expected_dim) {
std::ostringstream ss;
ss << "expected " << param_name << " to be a single integer value or a "
<< "list of " << expected_dim << " values to match the convolution "
<< "dimensions, but got " << param_name << "=" << list_param;
AT_ERROR(ss.str());
} else {
return list_param.vec();
}
}
inline std::vector<int64_t> expand_param_if_needed(
IntArrayRef list_param,
const char* param_name,
int64_t expected_dim) {
return _expand_param_if_needed(list_param, param_name, expected_dim);
}
inline std::vector<c10::SymInt> expand_param_if_needed(
SymIntArrayRef list_param,
const char* param_name,
int64_t expected_dim) {
return _expand_param_if_needed(list_param, param_name, expected_dim);
}
} // namespace native
} // namespace at
| 1,188
| 26.651163
| 76
|
h
|
null |
pytorch-main/aten/src/ATen/native/utils/ParamsHash.h
|
#pragma once
#include <memory>
#include <mutex>
namespace at::native {
// Hashing machinery for Params
// Fowler–Noll–Vo hash function
// see https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
template <typename Params>
struct ParamsHash {
// Params must be a POD because we read out its memory
// contents as char* when hashing
static_assert(std::is_standard_layout_v<Params>, "Params is not POD");
size_t operator()(const Params& params) const {
auto ptr = reinterpret_cast<const uint8_t*>(¶ms);
uint32_t value = 0x811C9DC5;
for (const auto i : c10::irange(sizeof(Params))) {
value ^= ptr[i];
value *= 0x01000193;
}
return (size_t)value;
}
};
template <typename Params>
struct ParamsEqual {
// Params must be a POD because we read out its memory
// contents as char* when comparing
static_assert(std::is_standard_layout_v<Params>, "Params is not POD");
bool operator()(const Params& a, const Params& b) const {
auto ptr1 = reinterpret_cast<const uint8_t*>(&a);
auto ptr2 = reinterpret_cast<const uint8_t*>(&b);
return memcmp(ptr1, ptr2, sizeof(Params)) == 0;
}
};
// Provide explicit byte-for-byte constructors to avoid uwittingly leaving
// padding bytes unitialized (e.g., when passing Params by value)
template <typename T>
struct ParamsWrapper {
T pod;
static_assert(std::is_standard_layout_v<T>, "ParamsWrapper cannot wrap non-POD data");
ParamsWrapper() {
memset(&(this->pod), 0, sizeof(this->pod));
}
ParamsWrapper(const ParamsWrapper &other) {
memcpy(&(this->pod), &(other.pod), sizeof(this->pod));
}
ParamsWrapper(ParamsWrapper &&other) {
memcpy(&(this->pod), &(other.pod), sizeof(this->pod));
}
ParamsWrapper& operator=(const ParamsWrapper &other) {
memcpy(&(this->pod), &(other.pod), sizeof(this->pod));
return *this;
}
ParamsWrapper& operator=(ParamsWrapper &&other) {
memcpy(&(this->pod), &(other.pod), sizeof(this->pod));
return *this;
}
inline friend bool operator==(const ParamsWrapper &lhs, const ParamsWrapper &rhs) {
auto ptr1 = reinterpret_cast<const uint8_t*>(&(lhs.pod));
auto ptr2 = reinterpret_cast<const uint8_t*>(&(rhs.pod));
return memcmp(ptr1, ptr2, sizeof(lhs.pod)) == 0;
}
};
// Wrapped version: this allows the outer struct to have custom copy and move
// constructors for additional safety
template <typename ParamsWrapper>
struct ParamsWrapperHash {
// Params must be a POD because we read out its memory
// contents as char* when hashing
static_assert(std::is_standard_layout_v<decltype(ParamsWrapper::pod)>, "ParamsWrapper cannot wrap non-POD data");
size_t operator()(const ParamsWrapper& params_wrapper) const {
auto ptr = reinterpret_cast<const uint8_t*>(&(params_wrapper.pod));
uint32_t value = 0x811C9DC5;
for (const auto i : c10::irange(sizeof(params_wrapper.pod))) {
value ^= ptr[i];
value *= 0x01000193;
}
return (size_t)value;
}
};
} // at::native
| 3,013
| 30.072165
| 115
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/VulkanOpaqueTensorImpl.h
|
#pragma once
#include <ATen/OpaqueTensorImpl.h>
namespace at {
// The only difference from OpaqueTensorImpl is faking strides(), stride(),
// is_contiguous(). The main intention for this is to be able to run torchscript
// model on Vulkan backend. Strides are not supported on Vulkan side, plan to
// support them.
template <typename OpaqueHandle>
struct VulkanOpaqueTensorImpl : public OpaqueTensorImpl<OpaqueHandle> {
VulkanOpaqueTensorImpl(
at::DispatchKeySet key_set,
const caffe2::TypeMeta data_type,
c10::Device device,
OpaqueHandle opaque_handle,
c10::IntArrayRef sizes,
c10::IntArrayRef strides)
: OpaqueTensorImpl<OpaqueHandle>(
key_set,
data_type,
device,
opaque_handle,
sizes,
false),
strides_(strides.vec()) {}
IntArrayRef strides_custom() const override {
return strides_;
}
SymIntArrayRef sym_strides_custom() const override {
return c10::fromIntArrayRefKnownNonNegative(strides_);
}
bool is_contiguous_custom(c10::MemoryFormat memory_format) const override {
return true;
}
private:
const char* tensorimpl_type_name() const override {
return "VulkanOpaqueTensorImpl";
}
// TODO: storing strides separately is unnecessary, the base TensorImpl
// has space for them
SmallVector<int64_t, 5> strides_;
};
} // namespace at
| 1,405
| 26.568627
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Adapter.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Pipeline.h>
#include <ATen/native/vulkan/api/Shader.h>
#include <ATen/native/vulkan/api/Utils.h>
#include <mutex>
#include <ostream>
namespace at {
namespace native {
namespace vulkan {
namespace api {
struct PhysicalDevice final {
// Handle
VkPhysicalDevice handle;
// Properties obtained from Vulkan
VkPhysicalDeviceProperties properties;
VkPhysicalDeviceMemoryProperties memory_properties;
std::vector<VkQueueFamilyProperties> queue_families;
// Metadata
uint32_t num_compute_queues;
bool has_unified_memory;
bool has_timestamps;
float timestamp_period;
explicit PhysicalDevice(const VkPhysicalDevice);
};
class DeviceHandle final {
public:
explicit DeviceHandle(const VkDevice device);
DeviceHandle(const DeviceHandle&) = delete;
DeviceHandle& operator=(const DeviceHandle&) = delete;
DeviceHandle(DeviceHandle&&) noexcept;
DeviceHandle& operator=(DeviceHandle&&) = delete;
~DeviceHandle();
private:
VkDevice handle_;
friend class Adapter;
};
//
// A Vulkan Adapter represents a logical device and all its properties. It
// manages all relevant properties of the underlying physical device, a
// handle to the logical device, and a number of compute queues available to
// the device. It is primarily responsible for managing the VkDevice handle
// which points to the logical device object on the GPU.
//
// This class is primarily used by the Runtime class, which holds one Adapter
// instance for each physical device visible to the VkInstance. Upon
// construction, this class will populate the physical device properties, but
// will not create the logical device until specifically requested via the
// init_device() funtion.
//
// init_device() will create the logical device and obtain the VkDevice handle
// for it. It will also create a number of compute queues up to the amount
// requested when the Adapter instance was constructed.
//
// Contexts (which represent one thread of execution) will request a compute
// queue from an Adapter. The Adapter will then select a compute queue to
// assign to the Context, attempting to balance load between all available
// queues. This will allow different Contexts (which typically execute on
// separate threads) to run concurrently.
//
#define NUM_QUEUE_MUTEXES 4
class Adapter final {
public:
explicit Adapter(
const VkInstance instance,
const PhysicalDevice& physical_device,
const uint32_t num_queues);
Adapter(const Adapter&) = delete;
Adapter& operator=(const Adapter&) = delete;
Adapter(Adapter&&) = delete;
Adapter& operator=(Adapter&&) = delete;
~Adapter() = default;
struct Queue {
uint32_t family_index;
uint32_t queue_index;
VkQueueFlags capabilities;
VkQueue handle;
};
private:
// Use a mutex to manage queue usage info since
// it can be accessed from multiple threads
std::mutex queue_usage_mutex_;
// Physical Device Info
PhysicalDevice physical_device_;
// Queue Management
std::vector<Queue> queues_;
std::vector<uint32_t> queue_usage_;
std::array<std::mutex, NUM_QUEUE_MUTEXES> queue_mutexes_;
// Handles
VkInstance instance_;
DeviceHandle device_;
// Device-level resource caches
ShaderLayoutCache shader_layout_cache_;
ShaderCache shader_cache_;
PipelineLayoutCache pipeline_layout_cache_;
ComputePipelineCache compute_pipeline_cache_;
// Memory Management
SamplerCache sampler_cache_;
MemoryAllocator vma_;
public:
// Physical Device metadata
inline VkPhysicalDevice physical_handle() const {
return physical_device_.handle;
}
inline VkDevice device_handle() const {
return device_.handle_;
}
inline bool has_unified_memory() const {
return physical_device_.has_unified_memory;
}
inline uint32_t num_compute_queues() const {
return physical_device_.num_compute_queues;
}
inline bool timestamp_compute_and_graphics() const {
return physical_device_.has_timestamps;
}
inline float timestamp_period() const {
return physical_device_.timestamp_period;
}
// Queue Management
Queue request_queue();
void return_queue(Queue&);
// Caches
inline ShaderLayoutCache& shader_layout_cache() {
return shader_layout_cache_;
}
inline ShaderCache& shader_cache() {
return shader_cache_;
}
inline PipelineLayoutCache& pipeline_layout_cache() {
return pipeline_layout_cache_;
}
inline ComputePipelineCache& compute_pipeline_cache() {
return compute_pipeline_cache_;
}
// Memory Allocation
inline SamplerCache& sampler_cache() {
return sampler_cache_;
}
inline MemoryAllocator& vma() {
return vma_;
}
// Command Buffer Submission
void submit_cmd(
const Queue&,
const VkCommandBuffer,
const VkFence fence = VK_NULL_HANDLE);
void submit_cmds(
const Adapter::Queue&,
const std::vector<VkCommandBuffer>&,
const VkFence fence = VK_NULL_HANDLE);
// Miscellaneous
inline utils::uvec3 local_work_group_size() const {
return {
4u,
4u,
4u,
};
}
std::string stringize() const;
friend std::ostream& operator<<(std::ostream&, const Adapter&);
};
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 5,390
| 23.958333
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Allocator.h
|
#pragma once
//
// Do NOT include vk_mem_alloc.h directly.
// Always include this file (Allocator.h) instead.
//
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#define VMA_VULKAN_VERSION 1000000
#ifdef USE_VULKAN_WRAPPER
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#else
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0
#endif /* USE_VULKAN_WRAPPER */
#define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (32ull * 1024 * 1024)
#define VMA_SMALL_HEAP_MAX_SIZE (256ull * 1024 * 1024)
#define VMA_STATS_STRING_ENABLED 0
#ifdef VULKAN_DEBUG
#define VMA_DEBUG_ALIGNMENT 4096
#define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY 0
#define VMA_DEBUG_DETECT_CORRUPTION 1
#define VMA_DEBUG_GLOBAL_MUTEX 1
#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
#define VMA_DEBUG_MARGIN 64
#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY 256
#define VMA_RECORDING_ENABLED 1
#define VMA_DEBUG_LOG(format, ...)
/*
#define VMA_DEBUG_LOG(format, ...) do { \
printf(format, __VA_ARGS__); \
printf("\n"); \
} while(false)
*/
#endif /* VULKAN_DEBUG */
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnullability-completeness"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif /* __clang__ */
#include <include/vk_mem_alloc.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif /* __clang__ */
#endif /* USE_VULKAN_API */
| 1,338
| 22.491228
| 63
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Command.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Descriptor.h>
#include <ATen/native/vulkan/api/Pipeline.h>
#include <ATen/native/vulkan/api/Resource.h>
#include <ATen/native/vulkan/api/Shader.h>
#include <c10/util/ArrayRef.h>
namespace at {
namespace native {
namespace vulkan {
namespace api {
class CommandBuffer final {
public:
explicit CommandBuffer(
const VkCommandBuffer,
const VkCommandBufferUsageFlags);
CommandBuffer(const CommandBuffer&) = delete;
CommandBuffer& operator=(const CommandBuffer&) = delete;
CommandBuffer(CommandBuffer&&) noexcept;
CommandBuffer& operator=(CommandBuffer&&) noexcept;
~CommandBuffer() = default;
// The lifecycle of a command buffer is as follows:
enum State {
INVALID, // Used to indicate the command buffer is moved from
NEW, // Set during constructor
RECORDING, // Set during call to begin(), dispatch(), and
// copy_*_to_*()
PIPELINE_BOUND, // Set during call to bind_pipeline()
DESCRIPTORS_BOUND, // Set during call to bind_descriptors()
BARRIERS_INSERTED, // Set during call to insert_barrier()
READY, // Set during call to end()
SUBMITTED, // Set during call to get_submit_handle()
};
struct Bound {
VkPipeline pipeline;
VkPipelineLayout pipeline_layout;
utils::uvec3 local_workgroup_size;
VkDescriptorSet descriptors;
explicit Bound()
: pipeline{VK_NULL_HANDLE},
pipeline_layout{VK_NULL_HANDLE},
local_workgroup_size{0u, 0u, 0u},
descriptors{VK_NULL_HANDLE} {}
inline void reset() {
pipeline = VK_NULL_HANDLE;
pipeline_layout = VK_NULL_HANDLE;
local_workgroup_size = {0u, 0u, 0u};
descriptors = VK_NULL_HANDLE;
}
};
private:
VkCommandBuffer handle_;
VkCommandBufferUsageFlags flags_;
State state_;
Bound bound_;
public:
inline bool is_reusable() {
return !(flags_ & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
}
inline void invalidate() {
handle_ = VK_NULL_HANDLE;
bound_.reset();
}
void begin();
void end();
void bind_pipeline(
const VkPipeline,
const VkPipelineLayout,
const utils::uvec3);
void bind_descriptors(const VkDescriptorSet);
void insert_barrier(const PipelineBarrier& pipeline_barrier);
void dispatch(const utils::uvec3&);
void copy_buffer_to_buffer(
const api::VulkanBuffer&,
const api::VulkanBuffer&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&);
void copy_texture_to_texture(
const api::VulkanImage&,
const api::VulkanImage&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&);
void copy_texture_to_buffer(
const api::VulkanImage&,
const api::VulkanBuffer&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&);
void copy_buffer_to_texture(
const api::VulkanBuffer&,
const api::VulkanImage&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&);
void write_timestamp(const VkQueryPool, const uint32_t) const;
void reset_querypool(const VkQueryPool, const uint32_t, const uint32_t) const;
VkCommandBuffer get_submit_handle(const bool final_use = false);
inline operator bool() const {
return VK_NULL_HANDLE != handle_;
}
};
struct CommandPoolConfig final {
uint32_t cmdPoolInitialSize;
uint32_t cmdPoolBatchSize;
};
class CommandPool final {
public:
explicit CommandPool(
const VkDevice,
const uint32_t,
const CommandPoolConfig&);
CommandPool(const CommandPool&) = delete;
CommandPool& operator=(const CommandPool&) = delete;
CommandPool(CommandPool&&) = delete;
CommandPool& operator=(CommandPool&&) = delete;
~CommandPool();
private:
VkDevice device_;
uint32_t queue_family_idx_;
VkCommandPool pool_;
CommandPoolConfig config_;
// New Buffers
std::mutex mutex_;
std::vector<VkCommandBuffer> buffers_;
size_t in_use_;
public:
CommandBuffer get_new_cmd(bool reusable = false);
void flush();
private:
void allocate_new_batch(const uint32_t);
};
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 4,362
| 23.931429
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Common.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <c10/util/Exception.h>
#include <utility>
#include <ATen/native/vulkan/api/vk_api.h>
/*
* Check that the return code of a Vulkan API call is VK_SUCCESS, throwing an
* error with the returned code if not. If STRIP_ERROR_MESSAGES is defined then
* only the return code will be preserved.
*/
#ifdef STRIP_ERROR_MESSAGES
#define VK_CHECK(function) \
do { \
const VkResult result = (function); \
if (VK_SUCCESS != result) { \
throw c10::Error( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str(result)); \
} \
} while (false)
#else
#define VK_CHECK(function) \
do { \
const VkResult result = (function); \
if (VK_SUCCESS != result) { \
throw c10::Error( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str( \
C10_STRINGIZE(__FILE__), \
"[", \
C10_STRINGIZE(__LINE__), \
"] Expected VK_SUCCESS, got VkResult of ", \
result)); \
} \
} while (false)
#endif /* STRIP_ERROR_MESSAGES */
#endif /* USE_VULKAN_API */
| 1,881
| 42.767442
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Context.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Adapter.h>
#include <ATen/native/vulkan/api/Command.h>
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Descriptor.h>
#include <ATen/native/vulkan/api/Pipeline.h>
#include <ATen/native/vulkan/api/QueryPool.h>
#include <ATen/native/vulkan/api/Resource.h>
#include <ATen/native/vulkan/api/Runtime.h>
#include <ATen/native/vulkan/api/Shader.h>
namespace at {
namespace native {
namespace vulkan {
namespace api {
struct ContextConfig final {
uint32_t cmdSubmitFrequency;
CommandPoolConfig cmdPoolConfig;
DescriptorPoolConfig descriptorPoolConfig;
QueryPoolConfig queryPoolConfig;
};
//
// Vulkan Context holds onto all relevant Vulkan state as it pertains to our
// use of Vulkan in PyTorch. A Context is associated with one, and only one,
// Adapter as a precursor to multi-GPU support. All Vulkan tensors in PyTorch
// are associated with a Context to make tensor <-> device affinity explicit.
// The context is currently a global object, but technically it does not need
// to be if we were to make it explicit to the user.
//
class Context final {
public:
explicit Context(size_t adapter_i, const ContextConfig&);
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
Context(Context&&) = delete;
Context& operator=(Context&&) = delete;
~Context();
private:
// Config
ContextConfig config_;
// Important handles
Adapter* adapter_p_;
VkDevice device_;
Adapter::Queue queue_;
// Resource Pools
CommandPool command_pool_;
DescriptorPool descriptor_pool_;
FencePool fences_;
// Diagnostics
// TODO: remove USE_VULKAN_GPU_DIAGNOSTICS
bool enable_op_profiling_{false};
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
QueryPool querypool_;
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
// Command buffers submission
std::mutex cmd_mutex_;
CommandBuffer cmd_;
uint32_t submit_count_;
// Memory Management
std::mutex buffer_clearlist_mutex_;
std::vector<VulkanBuffer> buffers_to_clear_;
std::mutex image_clearlist_mutex_;
std::vector<VulkanImage> images_to_clear_;
public:
// Adapter access
inline Adapter* adapter_ptr() {
return adapter_p_;
}
inline void enable_op_profiling() {
enable_op_profiling_ = true;
}
inline void disable_op_profiling() {
enable_op_profiling_ = false;
}
inline bool op_profiling_enabled() {
return enable_op_profiling_;
}
inline VkDevice device() {
return device_;
}
inline VkQueue queue() {
return queue_.handle;
}
// Device Caches
inline ShaderLayoutCache& shader_layout_cache() {
return adapter_ptr()->shader_layout_cache();
}
inline ShaderCache& shader_cache() {
return adapter_ptr()->shader_cache();
}
inline PipelineLayoutCache& pipeline_layout_cache() {
return adapter_ptr()->pipeline_layout_cache();
}
inline ComputePipelineCache& pipeline_cache() {
return adapter_ptr()->compute_pipeline_cache();
}
// Resource Pools
inline DescriptorPool& descriptor_pool() {
return descriptor_pool_;
}
inline FencePool& fences() {
return fences_;
}
// Diagnostics
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
inline QueryPool& querypool() {
return querypool_;
}
inline void reset_querypool() {
set_cmd();
querypool_.reset(cmd_);
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
// Memory Management
void register_buffer_cleanup(VulkanBuffer& buffer) {
std::lock_guard<std::mutex> bufferlist_lock(buffer_clearlist_mutex_);
buffers_to_clear_.emplace_back(std::move(buffer));
}
void register_image_cleanup(VulkanImage& image) {
std::lock_guard<std::mutex> imagelist_lock(image_clearlist_mutex_);
images_to_clear_.emplace_back(std::move(image));
}
// GPU RPC
inline std::unique_lock<std::mutex> dispatch_lock() {
return std::unique_lock<std::mutex>(cmd_mutex_);
}
inline void set_cmd(bool reusable = false) {
if (!cmd_) {
cmd_ = command_pool_.get_new_cmd(reusable);
cmd_.begin();
}
}
private:
DescriptorSet submit_compute_prologue(
CommandBuffer&,
const ShaderInfo&,
const utils::uvec3&);
void submit_compute_epilogue(
CommandBuffer&,
const DescriptorSet&,
const PipelineBarrier&,
const utils::uvec3&);
public:
template <class S, class D>
void submit_copy(
const PipelineBarrier&,
const S&,
const D&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const VkFence fence_handle);
template <typename... Arguments>
void submit_compute_job(
const ShaderInfo&,
const PipelineBarrier&,
const utils::uvec3&,
const utils::uvec3&,
const VkFence fence_handle,
Arguments&&...);
void submit_cmd_to_gpu(
const VkFence fence_handle = VK_NULL_HANDLE,
const bool final_use = false);
void flush();
};
class UniformParamsBuffer final {
private:
Context* context_p_;
VulkanBuffer vulkan_buffer_;
public:
UniformParamsBuffer() : context_p_{nullptr}, vulkan_buffer_{} {}
template <typename Block>
UniformParamsBuffer(Context* context_p, const Block& block)
: context_p_(context_p),
vulkan_buffer_(
context_p_->adapter_ptr()->vma().create_params_buffer(block)) {}
UniformParamsBuffer(const UniformParamsBuffer&);
UniformParamsBuffer& operator=(const UniformParamsBuffer&);
UniformParamsBuffer(UniformParamsBuffer&&) = default;
UniformParamsBuffer& operator=(UniformParamsBuffer&&) = default;
~UniformParamsBuffer() {
if (vulkan_buffer_) {
context_p_->register_buffer_cleanup(vulkan_buffer_);
}
}
VulkanBuffer& buffer() {
return vulkan_buffer_;
}
};
class StorageBuffer final {
private:
Context* context_p_;
c10::ScalarType dtype_;
size_t numel_;
VulkanBuffer vulkan_buffer_;
public:
StorageBuffer(
Context* context_p,
const c10::ScalarType dtype,
const size_t numel,
const bool gpuonly = false)
: context_p_(context_p),
dtype_(dtype),
numel_(numel),
vulkan_buffer_(context_p_->adapter_ptr()->vma().create_storage_buffer(
c10::elementSize(dtype_) * numel_,
gpuonly)) {}
StorageBuffer(const StorageBuffer&) = delete;
StorageBuffer& operator=(const StorageBuffer&) = delete;
StorageBuffer(StorageBuffer&&) = default;
StorageBuffer& operator=(StorageBuffer&&) = default;
~StorageBuffer() {
context_p_->register_buffer_cleanup(vulkan_buffer_);
}
inline c10::ScalarType dtype() {
return dtype_;
}
inline VulkanBuffer& buffer() {
return vulkan_buffer_;
}
};
bool available();
// The global runtime is retrieved using this function, where it is declared as
// a static local variable.
Context* context();
namespace detail {
template <size_t... Indices, typename... Arguments>
inline void bind(
DescriptorSet& descriptor_set,
const std::index_sequence<Indices...>,
Arguments&&... arguments) {
C10_UNUSED const int _[]{
0,
(descriptor_set.bind(Indices, std::forward<Arguments>(arguments)), 0)...,
};
}
} // namespace detail
template <class S, class D>
inline void record_copy(
CommandBuffer& cmd,
const S& source,
const D& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset) = delete;
template <>
inline void record_copy<VulkanBuffer, VulkanBuffer>(
CommandBuffer& cmd,
const VulkanBuffer& source,
const VulkanBuffer& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset) {
cmd.copy_buffer_to_buffer(
source, destination, copy_range, src_offset, dst_offset);
}
template <>
inline void record_copy<VulkanImage, VulkanImage>(
CommandBuffer& cmd,
const VulkanImage& source,
const VulkanImage& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset) {
cmd.copy_texture_to_texture(
source, destination, copy_range, src_offset, dst_offset);
}
template <>
inline void record_copy<VulkanImage, VulkanBuffer>(
CommandBuffer& cmd,
const VulkanImage& source,
const VulkanBuffer& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset) {
cmd.copy_texture_to_buffer(
source, destination, copy_range, src_offset, dst_offset);
}
template <>
inline void record_copy<VulkanBuffer, VulkanImage>(
CommandBuffer& cmd,
const VulkanBuffer& source,
const VulkanImage& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset) {
cmd.copy_buffer_to_texture(
source, destination, copy_range, src_offset, dst_offset);
}
template <class S, class D>
inline void Context::submit_copy(
const PipelineBarrier& pipeline_barrier,
const S& source,
const D& destination,
const api::utils::uvec3& copy_range,
const api::utils::uvec3& src_offset,
const api::utils::uvec3& dst_offset,
const VkFence fence_handle) {
// Serialize recording to the shared command buffer. Do not initialize with a
// mutex just yet, since in some cases it will be externally managed.
std::unique_lock<std::mutex> cmd_lock;
// Refer to comments in submit_compute_job for explanation.
if (fence_handle == VK_NULL_HANDLE) {
cmd_lock = std::unique_lock<std::mutex>(cmd_mutex_);
}
set_cmd();
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
uint32_t log_idx = UINT32_MAX;
if (enable_op_profiling_) {
std::string label = "cmd_copy";
log_idx = querypool_.shader_profile_begin(
cmd_, label, create_extent3d({0, 0, 0}), create_extent3d({0, 0, 0}));
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
cmd_.insert_barrier(pipeline_barrier);
record_copy(cmd_, source, destination, copy_range, src_offset, dst_offset);
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
if (enable_op_profiling_) {
querypool_.shader_profile_end(cmd_, log_idx);
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
submit_count_++;
if (fence_handle != VK_NULL_HANDLE ||
submit_count_ >= config_.cmdSubmitFrequency) {
submit_cmd_to_gpu(fence_handle);
}
}
template <typename... Arguments>
inline void Context::submit_compute_job(
const ShaderInfo& shader,
const PipelineBarrier& pipeline_barrier,
const utils::uvec3& global_work_group,
const utils::uvec3& local_work_group_size,
const VkFence fence_handle,
Arguments&&... arguments) {
// Serialize recording to the shared command buffer. Do not initialize with a
// mutex just yet, since in some cases it will be externally managed.
std::unique_lock<std::mutex> cmd_lock;
// If a fence was passed, then assume that the host intends to sync with
// the GPU, implying there will be imminent calls to fence.wait() and flush().
// We therefore assume the mutex is externally managed in this case, and the
// calling thread has already locked the mutex prior to calling the function,
// and will release the mutex manually after calling flush(). This will
// prevent more dispatches from being recorded until we have flushed the
// Context.
if (fence_handle == VK_NULL_HANDLE) {
cmd_lock = std::unique_lock<std::mutex>(cmd_mutex_);
}
set_cmd();
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
uint32_t log_idx = UINT32_MAX;
if (enable_op_profiling_) {
log_idx = querypool_.shader_profile_begin(
cmd_,
shader.kernel_name,
create_extent3d(global_work_group),
create_extent3d(local_work_group_size));
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
// Factor out template parameter independent code to minimize code bloat.
DescriptorSet descriptor_set =
submit_compute_prologue(cmd_, shader, local_work_group_size);
detail::bind(
descriptor_set,
std::index_sequence_for<Arguments...>{},
std::forward<Arguments>(arguments)...);
// Adjust the global workgroup size based on the output tile size
const utils::uvec3 effective_global_wg = {
utils::div_up(global_work_group.data[0u], shader.out_tile_size.data[0u]),
utils::div_up(global_work_group.data[1u], shader.out_tile_size.data[1u]),
utils::div_up(global_work_group.data[2u], shader.out_tile_size.data[2u]),
};
// Factor out template parameter independent code to minimize code bloat.
submit_compute_epilogue(
cmd_, descriptor_set, pipeline_barrier, effective_global_wg);
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
if (enable_op_profiling_) {
querypool_.shader_profile_end(cmd_, log_idx);
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
submit_count_++;
if (fence_handle != VK_NULL_HANDLE ||
submit_count_ >= config_.cmdSubmitFrequency) {
submit_cmd_to_gpu(fence_handle);
}
}
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 13,068
| 26.630021
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Descriptor.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Resource.h>
#include <ATen/native/vulkan/api/Shader.h>
#include <c10/util/flat_hash_map.h>
namespace at {
namespace native {
namespace vulkan {
namespace api {
class DescriptorSet final {
public:
explicit DescriptorSet(
const VkDevice,
const VkDescriptorSet,
const ShaderLayout::Signature&);
DescriptorSet(const DescriptorSet&) = delete;
DescriptorSet& operator=(const DescriptorSet&) = delete;
DescriptorSet(DescriptorSet&&) noexcept;
DescriptorSet& operator=(DescriptorSet&&) noexcept;
~DescriptorSet() = default;
struct ResourceBinding final {
uint32_t binding_idx;
VkDescriptorType descriptor_type;
bool is_image;
union {
VkDescriptorBufferInfo buffer_info;
VkDescriptorImageInfo image_info;
} resource_info;
};
private:
VkDevice device_;
VkDescriptorSet handle_;
ShaderLayout::Signature shader_layout_signature_;
c10::SmallVector<ResourceBinding, 6u> bindings_;
public:
DescriptorSet& bind(const uint32_t, const VulkanBuffer&);
DescriptorSet& bind(const uint32_t, const VulkanImage&);
VkDescriptorSet get_bind_handle() const;
private:
void add_binding(const ResourceBinding& resource);
};
class DescriptorSetPile final {
public:
DescriptorSetPile(
const uint32_t,
const VkDescriptorSetLayout,
const VkDevice,
const VkDescriptorPool);
DescriptorSetPile(const DescriptorSetPile&) = delete;
DescriptorSetPile& operator=(const DescriptorSetPile&) = delete;
DescriptorSetPile(DescriptorSetPile&&) = default;
DescriptorSetPile& operator=(DescriptorSetPile&&) = default;
~DescriptorSetPile() = default;
private:
uint32_t pile_size_;
VkDescriptorSetLayout set_layout_;
VkDevice device_;
VkDescriptorPool pool_;
std::vector<VkDescriptorSet> descriptors_;
size_t in_use_;
public:
VkDescriptorSet get_descriptor_set();
private:
void allocate_new_batch();
};
struct DescriptorPoolConfig final {
// Overall Pool capacity
uint32_t descriptorPoolMaxSets;
// DescriptorCounts by type
uint32_t descriptorUniformBufferCount;
uint32_t descriptorStorageBufferCount;
uint32_t descriptorCombinedSamplerCount;
uint32_t descriptorStorageImageCount;
// Pile size for pre-allocating descriptor sets
uint32_t descriptorPileSizes;
};
class DescriptorPool final {
public:
explicit DescriptorPool(const VkDevice, const DescriptorPoolConfig&);
DescriptorPool(const DescriptorPool&) = delete;
DescriptorPool& operator=(const DescriptorPool&) = delete;
DescriptorPool(DescriptorPool&&) = delete;
DescriptorPool& operator=(DescriptorPool&&) = delete;
~DescriptorPool();
private:
VkDevice device_;
VkDescriptorPool pool_;
DescriptorPoolConfig config_;
// New Descriptors
std::mutex mutex_;
ska::flat_hash_map<VkDescriptorSetLayout, DescriptorSetPile> piles_;
public:
DescriptorSet get_descriptor_set(
const VkDescriptorSetLayout handle,
const ShaderLayout::Signature& signature);
void flush();
};
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 3,220
| 23.037313
| 71
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Pipeline.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Resource.h>
#include <ATen/native/vulkan/api/Shader.h>
#include <c10/util/SmallVector.h>
#include <c10/util/flat_hash_map.h>
#include <mutex>
namespace at {
namespace native {
namespace vulkan {
namespace api {
struct PipelineBarrier final {
struct Stages final {
VkPipelineStageFlags src;
VkPipelineStageFlags dst;
} stage;
c10::SmallVector<BufferMemoryBarrier, 4u> buffers;
c10::SmallVector<ImageMemoryBarrier, 4u> images;
inline operator bool() const {
return (0u != stage.src) || (0u != stage.dst) || !buffers.empty() ||
!images.empty();
}
};
using PipelineStageFlags = uint8_t;
enum PipelineStage : PipelineStageFlags {
NO_STAGE = 0u << 0u,
COMPUTE = 1u << 0u,
HOST = 1u << 1u,
TRANSFER = 1u << 2u,
};
VkAccessFlags vk_access(const PipelineStageFlags, const MemoryAccessFlags);
VkPipelineStageFlags vk_stage(const PipelineStageFlags);
VkImageLayout vk_layout(const PipelineStageFlags, const MemoryAccessFlags);
class PipelineLayout final {
public:
explicit PipelineLayout(const VkDevice, const VkDescriptorSetLayout);
PipelineLayout(const PipelineLayout&) = delete;
PipelineLayout& operator=(const PipelineLayout&) = delete;
PipelineLayout(PipelineLayout&&) noexcept;
PipelineLayout& operator=(PipelineLayout&&) = delete;
~PipelineLayout();
private:
VkDevice device_;
VkPipelineLayout handle_;
public:
VkPipelineLayout handle() const {
return handle_;
}
// We need to define a custom swap function since this class
// does not allow for move assignment. The swap function will
// be used in the hash map.
friend void swap(PipelineLayout& lhs, PipelineLayout& rhs) noexcept;
};
class ComputePipeline final {
public:
struct Descriptor final {
VkPipelineLayout pipeline_layout;
VkShaderModule shader_module;
utils::uvec3 local_work_group;
};
explicit ComputePipeline(
const VkDevice device,
const Descriptor& descriptor,
const VkPipelineCache pipeline_cache);
ComputePipeline(const ComputePipeline&) = delete;
ComputePipeline& operator=(const ComputePipeline&) = delete;
ComputePipeline(ComputePipeline&&) noexcept;
ComputePipeline& operator=(ComputePipeline&&) = delete;
~ComputePipeline();
private:
VkDevice device_;
VkPipeline handle_;
public:
inline VkPipeline handle() const {
return handle_;
}
// We need to define a custom swap function since this class
// does not allow for move assignment. The swap function will
// be used in the hash map.
friend void swap(ComputePipeline& lhs, ComputePipeline& rhs) noexcept;
};
class PipelineLayoutCache final {
public:
explicit PipelineLayoutCache(const VkDevice device);
PipelineLayoutCache(const PipelineLayoutCache&) = delete;
PipelineLayoutCache& operator=(const PipelineLayoutCache&) = delete;
PipelineLayoutCache(PipelineLayoutCache&&) noexcept;
PipelineLayoutCache& operator=(PipelineLayoutCache&&) = delete;
~PipelineLayoutCache();
using Key = VkDescriptorSetLayout;
using Value = PipelineLayout;
struct Hasher {
inline size_t operator()(
const VkDescriptorSetLayout descriptor_layout) const {
return c10::get_hash(descriptor_layout);
}
};
private:
// Multiple threads could potentially be adding entries into the cache, so use
// a mutex to manage access
std::mutex cache_mutex_;
VkDevice device_;
ska::flat_hash_map<Key, Value, Hasher> cache_;
public:
VkPipelineLayout retrieve(const Key&);
void purge();
};
class ComputePipelineCache final {
public:
explicit ComputePipelineCache(const VkDevice device);
ComputePipelineCache(const ComputePipelineCache&) = delete;
ComputePipelineCache& operator=(const ComputePipelineCache&) = delete;
ComputePipelineCache(ComputePipelineCache&&) noexcept;
ComputePipelineCache& operator=(ComputePipelineCache&&) = delete;
~ComputePipelineCache();
using Key = ComputePipeline::Descriptor;
using Value = ComputePipeline;
struct Hasher {
inline size_t operator()(
const ComputePipeline::Descriptor& descriptor) const {
return c10::get_hash(
descriptor.pipeline_layout,
descriptor.shader_module,
descriptor.local_work_group.data[0u],
descriptor.local_work_group.data[1u],
descriptor.local_work_group.data[2u]);
};
};
private:
// Multiple threads could potentially be adding entries into the cache, so use
// a mutex to manage access
std::mutex cache_mutex_;
VkDevice device_;
VkPipelineCache pipeline_cache_;
ska::flat_hash_map<Key, Value, Hasher> cache_;
public:
VkPipeline retrieve(const Key&);
void purge();
};
//
// Impl
//
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 4,899
| 24.128205
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/QueryPool.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Adapter.h>
#include <ATen/native/vulkan/api/Command.h>
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Pipeline.h>
namespace at {
namespace native {
namespace vulkan {
namespace api {
struct QueryPoolConfig final {
uint32_t maxQueryCount;
uint32_t initialReserveSize;
};
struct ShaderDuration final {
uint32_t idx;
// Execution Properties
std::string kernel_name;
VkExtent3D global_workgroup_size;
VkExtent3D local_workgroup_size;
// Query indexes
uint32_t start_query_idx;
uint32_t end_query_idx;
// Timings
uint64_t start_time_ns;
uint64_t end_time_ns;
uint64_t execution_duration_ns;
};
class QueryPool final {
public:
explicit QueryPool(const QueryPoolConfig&, const Adapter* adapter_p);
QueryPool(const QueryPool&) = delete;
QueryPool& operator=(const QueryPool&) = delete;
QueryPool(QueryPool&&) = delete;
QueryPool& operator=(QueryPool&&) = delete;
~QueryPool();
private:
std::mutex mutex_;
VkDevice device_;
QueryPoolConfig config_;
VkQueryPool querypool_;
std::vector<std::vector<ShaderDuration>> shader_logs_;
size_t in_use_;
/** Total number of entries in shader logs from before most recent reset */
size_t previous_shader_count_;
/**
* Indicates whether there are new log entries in the shader log since the
* most recent call to extract_results()
*/
bool results_pending_;
private:
size_t write_timestamp(const CommandBuffer&);
std::string generate_string_report();
/** Most recent shader log since the last time the QueryPool was reset */
inline std::vector<ShaderDuration>& shader_log() {
return shader_logs_[shader_logs_.size() - 1];
}
/** Total number of entries in all shader logs, but without locking mutex */
size_t shader_logs_entry_count_thread_unsafe();
public:
inline bool is_enabled() const {
return VK_NULL_HANDLE != querypool_;
}
void reset(const CommandBuffer&);
uint32_t shader_profile_begin(
const CommandBuffer&,
const std::string&,
const VkExtent3D,
const VkExtent3D);
void shader_profile_end(const CommandBuffer&, const uint32_t);
void extract_results();
void print_results();
uint64_t get_total_op_ns(std::string op_name);
uint64_t ns_per_tick_;
void shader_log_for_each(std::function<void(const ShaderDuration&)> fn);
/**
* query_index is what number entry across all of the QueryPool's shader logs
* is being queried, regardless of resets. This may be different than
* ShaderDuration's idx field, which is what number entry it is since the last
* reset before it was added to the shader logs.
*/
std::tuple<std::string, uint64_t> get_shader_name_and_execution_duration_ns(
size_t query_index);
/** Total number of entries in all shader logs */
size_t shader_logs_entry_count();
};
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 3,019
| 24.166667
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Resource.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Allocator.h>
#include <ATen/native/vulkan/api/Utils.h>
#include <c10/core/ScalarType.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/typeid.h>
#include <stack>
namespace at {
namespace native {
namespace vulkan {
namespace api {
typedef uint8_t MemoryAccessFlags;
VkFormat vk_format(const at::ScalarType dtype);
c10::ScalarType c10_scalartype(const VkFormat image_format);
constexpr VmaAllocationCreateFlags DEFAULT_ALLOCATION_STRATEGY =
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
enum MemoryAccessType : MemoryAccessFlags {
NONE = 0u << 0u,
READ = 1u << 0u,
WRITE = 1u << 1u,
};
struct MemoryBarrier final {
VkMemoryBarrier handle;
MemoryBarrier(
const VkAccessFlags src_access_flags,
const VkAccessFlags dst_access_flags);
};
class VulkanBuffer final {
public:
struct MemoryProperties final {
VmaAllocationCreateFlags create_flags;
VmaMemoryUsage memory_usage;
VkMemoryPropertyFlags required_mem_flags;
VkMemoryPropertyFlags preferred_mem_flags;
VkBufferUsageFlags buffer_usage;
};
struct BufferProperties final {
VkDeviceSize size;
VkDeviceSize mem_offset;
VkDeviceSize mem_range;
};
explicit VulkanBuffer();
explicit VulkanBuffer(
const VmaAllocator,
const VkDeviceSize,
const MemoryProperties&);
VulkanBuffer(const VulkanBuffer&) = delete;
VulkanBuffer& operator=(const VulkanBuffer&) = delete;
VulkanBuffer(VulkanBuffer&&) noexcept;
VulkanBuffer& operator=(VulkanBuffer&&) noexcept;
~VulkanBuffer();
struct Package final {
VkBuffer handle;
VkDeviceSize buffer_offset;
VkDeviceSize buffer_range;
};
friend struct BufferMemoryBarrier;
private:
MemoryProperties memory_properties_;
BufferProperties buffer_properties_;
// The allocator object this was allocated from
VmaAllocator allocator_;
// Handles to the allocated memory
VmaAllocation allocation_;
VkBuffer handle_;
public:
inline VmaAllocator vma_allocator() const {
return allocator_;
}
inline VmaAllocation allocation() const {
return allocation_;
}
inline VkBuffer handle() const {
return handle_;
}
inline VkDeviceSize mem_offset() const {
return buffer_properties_.mem_offset;
}
inline VkDeviceSize mem_range() const {
return buffer_properties_.mem_range;
}
inline VkDeviceSize mem_size() const {
return buffer_properties_.size;
}
operator bool() const {
return (allocation_ != VK_NULL_HANDLE);
}
};
class MemoryMap final {
public:
explicit MemoryMap(
const VulkanBuffer& buffer,
const MemoryAccessFlags access);
MemoryMap(const MemoryMap&) = delete;
MemoryMap& operator=(const MemoryMap&) = delete;
MemoryMap(MemoryMap&&) noexcept;
MemoryMap& operator=(MemoryMap&&) = delete;
~MemoryMap();
private:
uint8_t access_;
VmaAllocator allocator_;
VmaAllocation allocation_;
void* data_;
VkDeviceSize data_len_;
public:
template <typename T>
T* data() {
return reinterpret_cast<T*>(data_);
}
inline size_t nbytes() {
return utils::safe_downcast<size_t>(data_len_);
}
void invalidate();
};
struct BufferMemoryBarrier final {
VkBufferMemoryBarrier handle;
BufferMemoryBarrier(
const VkAccessFlags src_access_flags,
const VkAccessFlags dst_access_flags,
const VulkanBuffer& buffer);
};
class ImageSampler final {
public:
struct Properties final {
VkFilter filter;
VkSamplerMipmapMode mipmap_mode;
VkSamplerAddressMode address_mode;
VkBorderColor border_color;
};
explicit ImageSampler(const VkDevice, const Properties&);
ImageSampler(const ImageSampler&) = delete;
ImageSampler& operator=(const ImageSampler&) = delete;
ImageSampler(ImageSampler&&) noexcept;
ImageSampler& operator=(ImageSampler&&) = delete;
~ImageSampler();
private:
VkDevice device_;
VkSampler handle_;
public:
VkSampler handle() const {
return handle_;
}
struct Hasher {
size_t operator()(const Properties&) const;
};
// We need to define a custom swap function since this class
// does not allow for move assignment. The swap function will
// be used in the hash map.
friend void swap(ImageSampler& lhs, ImageSampler& rhs) noexcept;
};
class VulkanImage final {
public:
struct MemoryProperties final {
VmaAllocationCreateFlags create_flags;
VmaMemoryUsage memory_usage;
VkMemoryPropertyFlags required_mem_flags;
VkMemoryPropertyFlags preferred_mem_flags;
VkImageUsageFlags image_usage;
};
struct ImageProperties final {
VkImageType image_type;
VkFormat image_format;
VkExtent3D image_extents;
};
struct ViewProperties final {
VkImageViewType view_type;
VkFormat view_format;
};
typedef ImageSampler::Properties SamplerProperties;
struct Handles final {
VkImage image;
VkImageView image_view;
VkSampler sampler;
};
explicit VulkanImage();
explicit VulkanImage(
const VmaAllocator,
const VkDevice,
const MemoryProperties&,
const ImageProperties&,
const ViewProperties&,
const SamplerProperties&,
const VkImageLayout layout,
const VkSampler);
VulkanImage(const VulkanImage&) = delete;
VulkanImage& operator=(const VulkanImage&) = delete;
VulkanImage(VulkanImage&&) noexcept;
VulkanImage& operator=(VulkanImage&&) noexcept;
~VulkanImage();
struct Package final {
VkImage handle;
VkImageLayout image_layout;
VkImageView image_view;
VkSampler image_sampler;
};
friend struct ImageMemoryBarrier;
private:
MemoryProperties memory_properties_;
ImageProperties image_properties_;
ViewProperties view_properties_;
SamplerProperties sampler_properties_;
// The allocator object this was allocated from
VmaAllocator allocator_;
// Handles to the allocated memory
VmaAllocation allocation_;
Handles handles_;
// Layout
VkImageLayout layout_;
public:
inline VmaAllocator vma_allocator() const {
return allocator_;
}
inline VmaAllocation allocation() const {
return allocation_;
}
inline VkFormat format() const {
return image_properties_.image_format;
}
inline VkExtent3D extents() const {
return image_properties_.image_extents;
}
inline VkImage handle() const {
return handles_.image;
}
inline VkImageView image_view() const {
return handles_.image_view;
}
inline VkSampler sampler() const {
return handles_.sampler;
}
Package package() const {
return {
handles_.image,
layout_,
handles_.image_view,
handles_.sampler,
};
}
inline VkImageLayout layout() const {
return layout_;
}
inline void set_layout(const VkImageLayout layout) {
layout_ = layout;
}
inline operator bool() const {
return (allocation_ != VK_NULL_HANDLE);
}
};
struct ImageMemoryBarrier final {
VkImageMemoryBarrier handle;
ImageMemoryBarrier(
const VkAccessFlags src_access_flags,
const VkAccessFlags dst_access_flags,
const VkImageLayout src_layout_flags,
const VkImageLayout dst_layout_flags,
const VulkanImage& image);
};
class SamplerCache final {
public:
explicit SamplerCache(const VkDevice device);
SamplerCache(const SamplerCache&) = delete;
SamplerCache& operator=(const SamplerCache&) = delete;
SamplerCache(SamplerCache&&) noexcept;
SamplerCache& operator=(SamplerCache&&) = delete;
~SamplerCache();
typedef ImageSampler::Properties Key;
typedef ImageSampler Value;
typedef ImageSampler::Hasher Hasher;
private:
// Multiple threads could potentially be adding entries into the cache, so use
// a mutex to manage access
std::mutex cache_mutex_;
VkDevice device_;
ska::flat_hash_map<Key, Value, Hasher> cache_;
public:
VkSampler retrieve(const Key&);
void purge();
};
class MemoryAllocator final {
public:
explicit MemoryAllocator(
const VkInstance instance,
const VkPhysicalDevice physical_device,
const VkDevice device);
MemoryAllocator(const MemoryAllocator&) = delete;
MemoryAllocator& operator=(const MemoryAllocator&) = delete;
MemoryAllocator(MemoryAllocator&&) noexcept;
MemoryAllocator& operator=(MemoryAllocator&&) = delete;
~MemoryAllocator();
private:
VkInstance instance_;
VkPhysicalDevice physical_device_;
VkDevice device_;
VmaAllocator allocator_;
public:
VulkanImage create_image(
const VkExtent3D&,
const VkFormat,
const VkImageType,
const VkImageViewType,
const VulkanImage::SamplerProperties&,
const VkSampler,
const bool allow_transfer = false);
VulkanBuffer create_storage_buffer(
const VkDeviceSize,
const bool gpu_only = true);
VulkanBuffer create_staging_buffer(const VkDeviceSize);
/*
* Create a uniform buffer with a specified size
*/
VulkanBuffer create_uniform_buffer(const VkDeviceSize);
/*
* Create a uniform buffer containing the data in an arbitrary struct
*/
template <typename Block>
VulkanBuffer create_params_buffer(const Block& block);
};
class VulkanFence final {
public:
// TODO: This is required for the lazy allocation pattern in api/Tensor.
// It will be disabled pending future refactors.
explicit VulkanFence();
explicit VulkanFence(const VkDevice);
VulkanFence(const VulkanFence&) = delete;
VulkanFence& operator=(const VulkanFence&) = delete;
VulkanFence(VulkanFence&&) noexcept;
VulkanFence& operator=(VulkanFence&&) noexcept;
~VulkanFence();
private:
VkDevice device_;
VkFence handle_;
bool waiting_;
public:
// Used to get the handle for a queue submission.
VkFence get_submit_handle() {
if (handle_ != VK_NULL_HANDLE) {
// Indicate we are now waiting for this fence to be signaled
waiting_ = true;
}
return handle_;
}
VkFence handle() {
return handle_;
}
// Trigger a synchronous wait for the fence to be signaled
void wait();
bool waiting() const {
return waiting_;
}
operator bool() const {
return (VK_NULL_HANDLE != handle_);
}
};
// A pool to track created Fences and reuse ones that are available.
// Only intended to be modified by one thread at a time.
struct FencePool final {
VkDevice device_;
std::stack<VulkanFence> pool_;
explicit FencePool(const VkDevice device) : device_(device), pool_{} {}
// Returns an rvalue reference to a fence, so that it can be moved
inline VulkanFence get_fence() {
if (pool_.empty()) {
VulkanFence new_fence = VulkanFence(device_);
return new_fence;
}
VulkanFence top_fence = std::move(pool_.top());
pool_.pop();
return top_fence;
}
// Marks the fence as available
inline void return_fence(VulkanFence& fence) {
pool_.push(std::move(fence));
}
};
//
// Impl
//
template <typename Block>
inline VulkanBuffer MemoryAllocator::create_params_buffer(const Block& block) {
VulkanBuffer uniform_buffer = create_uniform_buffer(sizeof(Block));
// Fill the uniform buffer with data in block
{
MemoryMap mapping(uniform_buffer, MemoryAccessType::WRITE);
Block* data_ptr = mapping.template data<Block>();
*data_ptr = block;
}
return uniform_buffer;
}
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 11,427
| 21.147287
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Runtime.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Adapter.h>
#include <ATen/native/vulkan/api/Common.h>
namespace at {
namespace native {
namespace vulkan {
namespace api {
//
// A Vulkan Runtime initializes a Vulkan instance and decouples the concept of
// Vulkan instance initialization from intialization of, and subsequent
// interactions with, Vulkan [physical and logical] devices as a precursor to
// multi-GPU support. The Vulkan Runtime can be queried for available Adapters
// (i.e. physical devices) in the system which in turn can be used for creation
// of a Vulkan Context (i.e. logical devices). All Vulkan tensors in PyTorch
// are associated with a Context to make tensor <-> device affinity explicit.
//
enum AdapterSelector {
First,
};
struct RuntimeConfiguration final {
bool enableValidationMessages;
bool initDefaultDevice;
AdapterSelector defaultSelector;
uint32_t numRequestedQueues;
};
class Runtime final {
public:
explicit Runtime(const RuntimeConfiguration);
// Do not allow copying. There should be only one global instance of this
// class.
Runtime(const Runtime&) = delete;
Runtime& operator=(const Runtime&) = delete;
Runtime(Runtime&&) noexcept;
Runtime& operator=(Runtime&&) = delete;
~Runtime();
using DeviceMapping = std::pair<PhysicalDevice, int32_t>;
using AdapterPtr = std::unique_ptr<Adapter>;
private:
RuntimeConfiguration config_;
VkInstance instance_;
std::vector<DeviceMapping> device_mappings_;
std::vector<AdapterPtr> adapters_;
uint32_t default_adapter_i_;
VkDebugReportCallbackEXT debug_report_callback_;
public:
inline VkInstance instance() const {
return instance_;
}
inline Adapter* get_adapter_p() {
TORCH_CHECK(
default_adapter_i_ >= 0 && default_adapter_i_ < adapters_.size(),
"Pytorch Vulkan Runtime: Default device adapter is not set correctly!");
return adapters_[default_adapter_i_].get();
}
inline Adapter* get_adapter_p(uint32_t i) {
TORCH_CHECK(
i >= 0 && i < adapters_.size(),
"Pytorch Vulkan Runtime: Adapter at index ",
i,
" is not available!");
return adapters_[i].get();
}
inline uint32_t default_adapter_i() const {
return default_adapter_i_;
}
using Selector =
std::function<uint32_t(const std::vector<Runtime::DeviceMapping>&)>;
uint32_t create_adapter(const Selector&);
};
// The global runtime is retrieved using this function, where it is declared as
// a static local variable.
Runtime* runtime();
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,667
| 25.156863
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Shader.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Types.h>
#include <ATen/native/vulkan/api/Utils.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/hash.h>
#include <mutex>
namespace at {
namespace native {
namespace vulkan {
namespace api {
class ShaderLayout final {
public:
using Signature = c10::SmallVector<VkDescriptorType, 6u>;
explicit ShaderLayout(const VkDevice, const Signature&);
ShaderLayout(const ShaderLayout&) = delete;
ShaderLayout& operator=(const ShaderLayout&) = delete;
ShaderLayout(ShaderLayout&&) noexcept;
ShaderLayout& operator=(ShaderLayout&&) = delete;
~ShaderLayout();
private:
VkDevice device_;
VkDescriptorSetLayout handle_;
public:
VkDescriptorSetLayout handle() const {
return handle_;
}
// We need to define a custom swap function since this class
// does not allow for move assignment. The swap function will
// be used in the hash map.
friend void swap(ShaderLayout& lhs, ShaderLayout& rhs) noexcept;
};
struct ShaderInfo final {
struct {
const uint32_t* bin;
uint32_t size;
} src_code;
std::string kernel_name{""};
ShaderLayout::Signature kernel_layout{};
// Shader Metadata
utils::uvec3 out_tile_size{1u, 1u, 1u};
c10::SmallVector<uint32_t, 4> tile_size;
StorageType bias_storage_type{StorageType::UNKNOWN};
StorageType weight_storage_type{StorageType::UNKNOWN};
explicit ShaderInfo();
explicit ShaderInfo(std::string, const char*);
explicit ShaderInfo(
std::string,
const uint32_t*,
const uint32_t,
const std::vector<VkDescriptorType>&);
explicit ShaderInfo(
std::string,
const uint32_t*,
const uint32_t,
const std::vector<VkDescriptorType>&,
const std::vector<uint32_t>& tile_size,
const StorageType bias_storage_type,
const StorageType weight_storage_type);
};
bool operator==(const ShaderInfo& _1, const ShaderInfo& _2);
class ShaderModule final {
public:
explicit ShaderModule(const VkDevice device, const ShaderInfo& source);
ShaderModule(const ShaderModule&) = delete;
ShaderModule& operator=(const ShaderModule&) = delete;
ShaderModule(ShaderModule&&) noexcept;
ShaderModule& operator=(ShaderModule&&) = delete;
~ShaderModule();
private:
VkDevice device_;
VkShaderModule handle_;
public:
inline VkShaderModule handle() const {
return handle_;
}
// We need to define a custom swap function since this class
// does not allow for move assignment. The swap function will
// be used in the hash map.
friend void swap(ShaderModule& lhs, ShaderModule& rhs) noexcept;
};
class ShaderLayoutCache final {
public:
explicit ShaderLayoutCache(const VkDevice device);
ShaderLayoutCache(const ShaderLayoutCache&) = delete;
ShaderLayoutCache& operator=(const ShaderLayoutCache&) = delete;
ShaderLayoutCache(ShaderLayoutCache&&) noexcept;
ShaderLayoutCache& operator=(ShaderLayoutCache&&) = delete;
~ShaderLayoutCache();
using Key = ShaderLayout::Signature;
using Value = ShaderLayout;
struct Hasher {
inline size_t operator()(const ShaderLayout::Signature& signature) const {
size_t hashed = 0u;
for (const VkDescriptorType type : signature) {
hashed = c10::hash_combine(hashed, c10::get_hash(type));
}
return hashed;
}
};
private:
// Multiple threads could potentially be adding entries into the cache, so use
// a mutex to manage access
std::mutex cache_mutex_;
VkDevice device_;
ska::flat_hash_map<Key, Value, Hasher> cache_;
public:
VkDescriptorSetLayout retrieve(const Key&);
void purge();
};
class ShaderCache final {
public:
explicit ShaderCache(const VkDevice device);
ShaderCache(const ShaderCache&) = delete;
ShaderCache& operator=(const ShaderCache&) = delete;
ShaderCache(ShaderCache&&) noexcept;
ShaderCache& operator=(ShaderCache&&) = delete;
~ShaderCache();
using Key = ShaderInfo;
using Value = ShaderModule;
struct Hasher {
inline size_t operator()(const ShaderInfo& source) const {
return c10::get_hash(source.src_code.bin, source.src_code.size);
}
};
private:
// Multiple threads could potentially be adding entries into the cache, so use
// a mutex to manage access
std::mutex cache_mutex_;
VkDevice device_;
ska::flat_hash_map<Key, Value, Hasher> cache_;
public:
VkShaderModule retrieve(const Key&);
void purge();
};
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
inline bool operator==(
const VkDescriptorSetLayoutBinding& _1,
const VkDescriptorSetLayoutBinding& _2) {
return (
_1.binding == _2.binding && _1.descriptorType == _2.descriptorType &&
_1.descriptorCount == _2.descriptorCount &&
_1.stageFlags == _2.stageFlags &&
_1.pImmutableSamplers == _2.pImmutableSamplers);
}
#endif /* USE_VULKAN_API */
| 4,942
| 23.839196
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Tensor.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Context.h>
#include <c10/core/MemoryFormat.h>
#include <c10/util/accumulate.h>
namespace at {
namespace native {
namespace vulkan {
struct LastAccess {
api::PipelineStageFlags stage;
api::MemoryAccessFlags access;
LastAccess()
: stage{api::PipelineStage::NO_STAGE},
access{api::MemoryAccessType::NONE} {}
LastAccess(
api::PipelineStageFlags stage_flags,
api::MemoryAccessFlags access_flags)
: stage{stage_flags}, access{access_flags} {}
};
class vTensorStorage final {
public:
// Do not allow empty vTensorStorage construction
vTensorStorage() = default;
vTensorStorage(
api::Context* context,
const api::StorageType storage_type,
const IntArrayRef sizes,
const at::ScalarType dtype);
vTensorStorage(const vTensorStorage&) = delete;
vTensorStorage& operator=(const vTensorStorage&) = delete;
vTensorStorage(vTensorStorage&&) = default;
vTensorStorage operator=(vTensorStorage&&) = delete;
~vTensorStorage();
friend class vTensor;
private:
// Context
api::Context* context_;
api::StorageType storage_type_;
// Resource sizings
api::utils::uvec3 extents_;
int64_t buffer_length_;
// Image Texture
mutable api::VulkanImage image_;
mutable api::VulkanBuffer buffer_;
// Last Access - used to insert memory barriers
LastAccess last_access_;
private:
// Memory barrier insertion
void transition(
api::PipelineBarrier&,
const api::PipelineStageFlags,
const api::MemoryAccessFlags);
// Validation
void verify() const;
public:
inline VkFormat texture_format() {
return image_.format();
}
};
class vTensor final {
public:
// Do not allow empty vTensor construction
vTensor() = default;
// Default constructor
vTensor(
api::Context* context,
IntArrayRef sizes,
const c10::ScalarType dtype = c10::kFloat,
const api::StorageType storage_type = api::StorageType::TEXTURE_3D,
const c10::MemoryFormat memory_format = c10::MemoryFormat::Contiguous);
// Default constructor with quantization parameters
vTensor(
api::Context* const context,
const IntArrayRef sizes,
double q_scale,
int64_t q_zero_point,
const c10::ScalarType dtype = c10::kQUInt8,
const api::StorageType storage_type = api::StorageType::TEXTURE_3D,
const c10::MemoryFormat memory_format = c10::MemoryFormat::Contiguous);
// Copy Constructor and Assignment; Ideally copying would be disabled
// (see the reasoning for move assignment below) but it is required for
// compatibility with OpaqueTensorImpl
vTensor(const vTensor& other) = default;
vTensor& operator=(const vTensor& other) = default;
// Move Constructor and assignment
vTensor(vTensor&& other) = default;
vTensor& operator=(vTensor&& other) = default;
// Used for passing buffer sizes and strides data to shaders
struct BufferMetadata {
api::utils::uvec4 sizes;
api::utils::uvec4 strides;
uint32_t ndim;
uint32_t buffer_length;
};
private:
// Tensor Options
c10::ScalarType dtype_;
c10::MemoryFormat memory_format_;
// Sizes and Strides
c10::SmallVector<int64_t, 6u> sizes_;
c10::SmallVector<int64_t, 6u> strides_;
// Storage Dimensions. When stored on the GPU, one dimension will be aligned
// to the next multiple of 4 in order to take advantage of vec4 data types.
c10::SmallVector<int64_t, 6u> gpu_sizes_;
c10::SmallVector<int64_t, 6u> gpu_strides_;
// A Vulkan uniform buffer containing sizes and strides of the GPU buffer that
// can be passed into a shader.
api::UniformParamsBuffer metadata_uniform_;
// Quantization params
bool is_quantized_{false};
double q_scale_{1.0f};
int64_t q_zero_point_{0u};
// Even at the cost of a heap allocation plus the resulting negative impact
// on cache locality due to the subsequent pointer chasing, it is still
// critcal to share the view across vTensor implementations to minimize
// programmer errors. Ideally this class should have been only made movable,
// and non-copyable - something we cannot do unfortunately due to the inner
// workings of at::TensorImpl requiring copy semantics in
// at::TensorImpl::release_resources() to function as expected. Now that this
// class is made copyable though, a new door to a whole new class of bugs is
// opened, in that there now is a chance of two [shallow] copies, have their
// StorageState objects go out of sync as a result of an operation being
// performed on one shallow copy that is not reflected in the other.
// Technically, if the programmer is very careful, it is possible to avoid
// this trap and not pay the cost of indirection, but the resulting bugs of
// missing memory barriers will be so frustrating to hunt down for those
// unfamiliar with the internal mechanics of this class, that I decided to
// take the performance pentalty of this extra layer of indirection in favor
// of making this class easier to use.
std::shared_ptr<vTensorStorage> view_;
public:
/*
Texture Access
*/
inline api::StorageType storage_type() const {
return view_->storage_type_;
}
api::VulkanImage& image(api::PipelineBarrier&, const api::PipelineStageFlags)
const&;
api::VulkanImage& image(
api::PipelineBarrier&,
const api::PipelineStageFlags,
const api::MemoryAccessFlags) &;
api::VulkanBuffer& buffer(
api::PipelineBarrier&,
const api::PipelineStageFlags) const&;
api::VulkanBuffer& buffer(
api::PipelineBarrier&,
const api::PipelineStageFlags,
const api::MemoryAccessFlags) &;
/*
Metadata
*/
inline const api::utils::uvec3& extents() const {
return view_->extents_;
}
/*
* Extract a ScalarType from the TensorOptions member
*/
inline c10::ScalarType dtype() const {
return dtype_;
}
/*
* Get a c10::ScalarType that corresponds to the image format of the texture
*/
inline c10::ScalarType texture_dtype() const {
return api::c10_scalartype(view_->texture_format());
}
inline c10::MemoryFormat memory_format() const {
return memory_format_;
}
inline IntArrayRef sizes() const {
return sizes_;
}
inline IntArrayRef strides() const {
return strides_;
}
inline IntArrayRef gpu_sizes() const {
return gpu_sizes_;
}
inline IntArrayRef gpu_strides() const {
return gpu_strides_;
}
/*
* Get a uniform buffer containing sizes and strides information of the GPU
* buffer
*/
inline api::VulkanBuffer& buffer_metadata() {
return metadata_uniform_.buffer();
}
/*
* Constructs a BufferMetdata struct based on the original sizes and strides
* to pass into a shader.
*/
BufferMetadata get_cpu_buffer_metadata() const;
inline void set_is_quantized() {
is_quantized_ = true;
}
inline bool is_quantized() const {
return is_quantized_;
}
inline void set_scale(const double q_scale) {
q_scale_ = q_scale;
}
inline double get_scale() const {
return q_scale_;
}
inline float get_scale_float() const {
return api::utils::safe_downcast<float>(q_scale_);
}
inline void set_zero_point(const int64_t q_zero_point) {
q_zero_point_ = q_zero_point;
}
inline int64_t get_zero_point() const {
return q_zero_point_;
}
inline int32_t get_zero_point_int32() const {
return api::utils::safe_downcast<int32_t>(q_zero_point_);
}
inline size_t numel() const {
return c10::multiply_integers(sizes());
}
inline size_t nbytes() const {
return c10::elementSize(dtype()) * numel();
}
/*
* Returns numel but based on gpu_sizes_ instead of sizes_
*/
inline size_t gpu_numel() const {
return view_->buffer_length_;
}
/*
* Return nbytes but bnased on gpu_sizes_ instead of sizes_
*/
inline VkDeviceSize gpu_nbytes() const {
return c10::elementSize(dtype()) * gpu_numel();
}
};
void add_buffer_barrier(
api::PipelineBarrier&,
const api::VulkanBuffer&,
const api::PipelineStageFlags,
const api::MemoryAccessFlags,
const api::PipelineStageFlags,
const api::MemoryAccessFlags);
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 8,328
| 25.525478
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/Utils.h
|
#pragma once
#include <c10/util/ArrayRef.h>
#include <c10/util/Half.h> // For c10::overflows
#include <ATen/native/vulkan/api/Common.h>
#ifdef USE_VULKAN_API
namespace at {
namespace native {
namespace vulkan {
namespace api {
namespace utils {
//
// Alignment
//
template <typename Type>
inline constexpr Type align_down(const Type number, const Type multiple) {
return (number / multiple) * multiple;
}
template <typename Type>
inline constexpr Type align_up(const Type number, const Type multiple) {
return align_down(number + multiple - 1, multiple);
}
template <typename Type>
inline constexpr Type div_up(const Type numerator, const Type denominator) {
return (numerator + denominator - 1) / denominator;
}
//
// Cast
//
namespace detail {
template <typename To, typename From>
inline constexpr To safe_downcast(const From v) {
TORCH_CHECK(!c10::overflows<To>(v), "Cast failed: out of range!");
return static_cast<To>(v);
}
template <typename To, typename From>
inline constexpr bool is_signed_to_unsigned() {
return std::is_signed<From>::value && std::is_unsigned<To>::value;
}
} // namespace detail
template <
typename To,
typename From,
std::enable_if_t<detail::is_signed_to_unsigned<To, From>(), bool> = true>
inline constexpr To safe_downcast(const From v) {
TORCH_CHECK(v >= From{}, "Cast failed: negative signed to unsigned!");
return detail::safe_downcast<To, From>(v);
}
template <
typename To,
typename From,
std::enable_if_t<!detail::is_signed_to_unsigned<To, From>(), bool> = true>
inline constexpr To safe_downcast(const From v) {
return detail::safe_downcast<To, From>(v);
}
//
// Vector Types
//
namespace detail {
template <typename Type, uint32_t N>
struct vec final {
Type data[N];
};
} // namespace detail
template <uint32_t N>
using ivec = detail::vec<int32_t, N>;
using ivec2 = ivec<2u>;
using ivec3 = ivec<3u>;
using ivec4 = ivec<4u>;
template <uint32_t N>
using uvec = detail::vec<uint32_t, N>;
using uvec2 = uvec<2u>;
using uvec3 = uvec<3u>;
using uvec4 = uvec<4u>;
template <uint32_t N>
using vec = detail::vec<float, N>;
using vec2 = vec<2u>;
using vec3 = vec<3u>;
using vec4 = vec<4u>;
//
// IntArrayRef Handling
//
/*
* Utility function to perform indexing on an IntArrayRef. Negative indexing is
* allowed. For instance, passing an index of -1 will retrieve the last element.
* If the requested index is out of bounds, then 1u will be returned.
*/
inline uint32_t val_at(int32_t index, const IntArrayRef sizes) {
const int32_t ndim = sizes.size();
if (index >= 0) {
return index >= ndim ? 1 : safe_downcast<uint32_t>(sizes[index]);
} else {
return ndim + index < 0 ? 1 : safe_downcast<uint32_t>(sizes[ndim + index]);
}
}
inline ivec2 make_ivec2(IntArrayRef ints, bool reverse = false) {
TORCH_CHECK(ints.size() == 2);
if (reverse) {
return {safe_downcast<int32_t>(ints[1]), safe_downcast<int32_t>(ints[0])};
} else {
return {safe_downcast<int32_t>(ints[0]), safe_downcast<int32_t>(ints[1])};
}
}
inline ivec4 make_ivec4(IntArrayRef ints, bool reverse = false) {
TORCH_CHECK(ints.size() == 4);
if (reverse) {
return {
safe_downcast<int32_t>(ints[3]),
safe_downcast<int32_t>(ints[2]),
safe_downcast<int32_t>(ints[1]),
safe_downcast<int32_t>(ints[0]),
};
} else {
return {
safe_downcast<int32_t>(ints[0]),
safe_downcast<int32_t>(ints[1]),
safe_downcast<int32_t>(ints[2]),
safe_downcast<int32_t>(ints[3]),
};
}
}
inline ivec3 make_ivec3(uvec3 ints) {
return {
safe_downcast<int32_t>(ints.data[0u]),
safe_downcast<int32_t>(ints.data[1u]),
safe_downcast<int32_t>(ints.data[2u])};
}
/*
* Given an IntArrayRef of up to 4 elements, constructs a uvec4 containing those
* elements in reverse order.
*/
inline uvec4 make_nchw_uvec4(const IntArrayRef arr) {
uint32_t w = val_at(-1, arr);
uint32_t h = val_at(-2, arr);
uint32_t c = val_at(-3, arr);
uint32_t n = val_at(-4, arr);
return {w, h, c, n};
}
} // namespace utils
inline bool operator==(const utils::uvec3& _1, const utils::uvec3& _2) {
return (
_1.data[0u] == _2.data[0u] && _1.data[1u] == _2.data[1u] &&
_1.data[2u] == _2.data[2u]);
}
inline VkOffset3D create_offset3d(const utils::uvec3& offsets) {
return VkOffset3D{
static_cast<int32_t>(offsets.data[0u]),
static_cast<int32_t>(offsets.data[1u]),
static_cast<int32_t>(offsets.data[2u])};
}
inline VkExtent3D create_extent3d(const utils::uvec3& extents) {
return VkExtent3D{extents.data[0u], extents.data[1u], extents.data[2u]};
}
} // namespace api
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 4,745
| 23.590674
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/api/api.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Common.h>
#include <ATen/native/vulkan/api/Adapter.h>
#include <ATen/native/vulkan/api/Command.h>
#include <ATen/native/vulkan/api/Context.h>
#include <ATen/native/vulkan/api/Descriptor.h>
#include <ATen/native/vulkan/api/Pipeline.h>
#include <ATen/native/vulkan/api/Resource.h>
#include <ATen/native/vulkan/api/Runtime.h>
#include <ATen/native/vulkan/api/Shader.h>
#include <ATen/native/vulkan/api/Tensor.h>
#include <ATen/native/vulkan/api/Utils.h>
#endif /* USE_VULKAN_API */
| 551
| 28.052632
| 46
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/glsl/texel_access.h
|
/*
* Texel access utility functions
*/
// Broadcasting: compute input texel position from broadcasted output position
ivec3 map_output_pos_to_input_pos(
ivec3 output_pos,
ivec4 output_sizes,
ivec4 input_sizes) {
ivec3 input_pos;
// HW: use modulo
input_pos.xy = output_pos.xy % input_sizes.xy;
if (output_sizes.w == input_sizes.w && output_sizes.z != input_sizes.z) {
// C: divide by ceil(C/4) to map to input tensor range
input_pos.z = output_pos.z / int(ceil(output_sizes.z / 4.0));
} else {
// N: use modulo. z-range of input is batch * ceil(channel/4)
input_pos.z =
output_pos.z % (input_sizes.w * int(ceil(input_sizes.z / 4.0)));
}
return input_pos;
}
// Broadcasting: load texel from an image texture, applying broadcasting
vec4 load_texel(
ivec3 mapped_pos,
ivec4 output_sizes,
ivec4 input_sizes,
sampler3D uInput) {
return (output_sizes.z != input_sizes.z)
? texelFetch(uInput, mapped_pos, 0).xxxx
: texelFetch(uInput, mapped_pos, 0);
}
| 1,029
| 29.294118
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/graph/Constant.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Context.h>
namespace at {
namespace native {
namespace vulkan {
/*
* Represents a reference to a tensor that has been serialized with the model,
* such as a serialized weight tensor. It contains some metadata as well as a
* raw pointer to the data of the tensor, which is assumed to be contiguous.
*/
struct TensorRef final {
std::vector<int64_t> sizes;
c10::ScalarType dtype;
const void* data;
explicit TensorRef(
const IntArrayRef t_sizes,
c10::ScalarType t_dtype,
const void* const t_data);
TensorRef(const TensorRef&) = default;
TensorRef& operator=(const TensorRef&) = default;
TensorRef(TensorRef&&) = default;
TensorRef& operator=(TensorRef&&) = default;
};
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 870
| 21.921053
| 78
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/graph/Copy.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/graph/Graph.h>
namespace at {
namespace native {
namespace vulkan {
void add_copy_node(ComputeGraph& graph, const ValueRef from, const ValueRef to);
ValueRef add_copy_node(ComputeGraph& graph, const ValueRef from);
class CopyNode : public virtual OpNode {
public:
explicit CopyNode(const ValueRef from, const ValueRef to);
void encode_execute(ComputeGraph* graph) const override;
};
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 550
| 20.192308
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/graph/Exception.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <exception>
#include <ostream>
namespace at {
namespace native {
namespace vulkan {
/*
* Same as c10::SourceLocation, represents a location in source code
*/
struct SourceLocation {
const char* func;
const char* file;
uint32_t line;
};
std::ostream& operator<<(std::ostream& out, const SourceLocation& loc);
/*
* Simple error class modeled after c10::Error
*/
class Error : public std::exception {
public:
// Constructors
Error(SourceLocation location, std::string msg);
private:
// The source location of the exception
SourceLocation location_;
// The actual error message
std::string msg_;
std::string what_;
public:
const char* what() const noexcept override {
return what_.c_str();
}
const std::string& msg() const {
return msg_;
}
private:
void refresh_what();
std::string compute_what(bool include_source) const;
};
} // namespace vulkan
} // namespace native
} // namespace at
#define VKGRAPH_THROW(...) \
throw ::at::native::vulkan::Error( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str(__VA_ARGS__));
#define VKGRAPH_CHECK(cond, ...) \
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
throw ::at::native::vulkan::Error( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str(__VA_ARGS__)); \
}
#endif /* USE_VULKAN_API */
| 1,567
| 21.4
| 71
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/graph/Graph.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/Context.h>
#include <ATen/native/vulkan/api/Tensor.h>
#include <ATen/native/vulkan/graph/Config.h>
#include <ATen/native/vulkan/graph/Exception.h>
#include <ATen/native/vulkan/graph/Value.h>
namespace at {
namespace native {
namespace vulkan {
typedef int32_t ValueRef;
class ComputeGraph;
/*
* Represents a single op in a ML model. In graph mode, ops will be implemented
* introducing a derived class that implements encode_execute, which will
* implement encoding of the shader corresponding to the op into the command
* buffer of a ComputeGraph, as well as encode_prepack, which will implement
* encoding of shaders transferring necessary data (such as weights and biases)
* to the GPU, wherever prepacking is necessary.
*/
class OpNode {
friend class ComputeGraph;
public:
virtual ~OpNode() {}
protected:
std::vector<ValueRef> inputs_;
std::vector<ValueRef> outputs_;
public:
virtual void encode_prepack(ComputeGraph* graph) const {}
virtual void encode_execute(ComputeGraph* graph) const {}
};
/*
* This is the core data structure used to execute Vulkan models in graph mode.
* As opposed to ATen/eager mode where a command buffer is encoded every
* inference (since ops are executed with the model), in graph mode the ops that
* compose the model are intended to be parsed only once, upon which a command
* buffer will be encoded. Model inference will then execute the cached command
* buffer without needing to encode a new one.
*/
class ComputeGraph final {
public:
explicit ComputeGraph(GraphConfig config);
ComputeGraph(ComputeGraph&&) = default;
ComputeGraph& operator=(ComputeGraph&&) = default;
~ComputeGraph();
private:
GraphConfig config_;
std::unique_ptr<api::Context> context_;
std::vector<Value> values_;
std::vector<std::unique_ptr<OpNode>> prepack_nodes_;
std::vector<std::unique_ptr<OpNode>> execute_nodes_;
std::vector<ValueRef> inputs_;
std::vector<ValueRef> outputs_;
public:
//
// Accessors
//
inline api::Context* context() {
return context_.get();
}
inline std::vector<ValueRef>& inputs() {
return inputs_;
}
inline std::vector<ValueRef>& outputs() {
return outputs_;
}
/*
* Returns the value at a particular reference
*/
inline Value& get_val(ValueRef idx) {
return values_[idx];
}
inline IntArrayRef get_val_sizes(ValueRef idx) {
Value& val = get_val(idx);
if (val.isTensor()) {
return val.toTensor().sizes();
} else if (val.isTensorRef()) {
return val.toTensorRef().sizes;
}
VKGRAPH_THROW("Could not get sizes of value with type ", val.type());
}
inline c10::ScalarType get_val_dtype(ValueRef idx) {
Value& val = get_val(idx);
if (val.isTensor()) {
return val.toTensor().dtype();
} else if (val.isTensorRef()) {
return val.toTensorRef().dtype;
}
VKGRAPH_THROW("Could not get dtype of value with type ", val.type());
}
inline std::vector<std::unique_ptr<OpNode>>& prepack_nodes() {
return prepack_nodes_;
}
inline std::vector<std::unique_ptr<OpNode>>& execute_nodes() {
return execute_nodes_;
}
//
// Graph Building
//
ValueRef add_tensor(const IntArrayRef sizes, const c10::ScalarType dtype);
ValueRef add_tensorref(
const IntArrayRef sizes,
const c10::ScalarType dtype,
const void* const data);
ValueRef add_staging(const c10::ScalarType dtype, const size_t numel);
ValueRef set_input_tensor(const ValueRef idx, const bool use_staging = true);
ValueRef set_output_tensor(const ValueRef idx, const bool use_staging = true);
//
// Input/Output
//
void copy_into_staging(
const ValueRef idx,
const void* data,
const size_t numel);
void copy_from_staging(const ValueRef idx, void* data, const size_t numel);
//
// Graph Prepacking
//
void encode_prepack();
void prepack() const;
//
// Graph Execution
//
void encode_execute();
void execute() const;
};
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 4,142
| 23.957831
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/graph/Staging.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/graph/Graph.h>
namespace at {
namespace native {
namespace vulkan {
//
// Functions to memcpy data into staging buffer
//
void memcpy_to_mapping(
const void* src,
api::MemoryMap& dst_mapping,
const size_t nbytes,
const c10::ScalarType dtype);
void memcpy_from_mapping(
const api::MemoryMap& src_mapping,
void* dst,
const size_t nbytes,
const c10::ScalarType dtype);
//
// Utility functions for memcpy
//
template <typename T>
void memcpy_to_mapping_impl(
const void* src,
api::MemoryMap& dst_mapping,
const size_t nbytes) {
T* data_ptr = dst_mapping.template data<T>();
memcpy(data_ptr, reinterpret_cast<const T*>(src), nbytes);
}
template <typename T>
void memcpy_from_mapping_impl(
api::MemoryMap& src_mapping,
void* dst,
const size_t nbytes) {
T* data_ptr = src_mapping.template data<T>();
memcpy(reinterpret_cast<T*>(dst), data_ptr, nbytes);
}
//
// Functions to copy data into and out of a staging buffer
//
void copy_ptr_to_staging(
const void* src,
api::StorageBuffer& staging,
const size_t nbytes);
void copy_staging_to_ptr(
api::StorageBuffer& staging,
void* dst,
const size_t nbytes);
//
// Functions to record copying data between a staging buffer and a vTensor
//
void encode_copy_to_vtensor(
api::Context* context,
api::StorageBuffer& staging,
vTensor& tensor);
void encode_copy_from_vtensor(
api::Context* context,
vTensor& tensor,
api::StorageBuffer& staging);
/*
* OpNode that allows copying data into and out of a staging buffer.
*/
class StagingNode : public virtual OpNode {
public:
explicit StagingNode(ValueRef from, ValueRef to);
void encode_execute(ComputeGraph* graph) const override;
};
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 1,901
| 20.370787
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/impl/Common.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/api/api.h>
#include <ATen/native/vulkan/impl/Registry.h>
#define VK_KERNEL(shader_name) \
::at::native::vulkan::get_shader_info(#shader_name)
#define VK_LOOKUP_KERNEL(op_name) \
::at::native::vulkan::look_up_shader_info(#op_name)
namespace at {
namespace native {
namespace vulkan {
/*
* Maps a semantic dimension name to an integer that corresponds to its
* innermost ordering in a 4D tensor in NCHW format. Width is the innermost
* dimension, so it corresponds to 1, height is the next innermost, so it
* corresponds to 2, and so on.
*/
struct Dim4D {
static constexpr uint32_t Width = 1u;
static constexpr uint32_t Height = 2u;
static constexpr uint32_t Channel = 3u;
static constexpr uint32_t Batch = 4u;
};
/*
* Semantic dimension names for a 1D tensor
*/
struct Dim1D {
static constexpr uint32_t Length = 1u;
};
/*
* Semantic dimension names for a 2D Convolution kernel.
*/
struct DimConv2DKernel {
static constexpr uint32_t Width = 1u;
static constexpr uint32_t Height = 2u;
static constexpr uint32_t InChannels = 3u;
static constexpr uint32_t OutChannels = 4u;
};
/*
* The same as the above, except for a 2D Transposed Convolution kernel.
*/
struct DimTConv2DKernel {
static constexpr uint32_t Width = 1u;
static constexpr uint32_t Height = 2u;
static constexpr uint32_t OutChannels = 3u;
static constexpr uint32_t InChannels = 4u;
};
/*
* The functions below safely return the size of the dimension at the N-th
* innermost index. If the dimensionality of the size array is not sufficient
* then 1 will be returned. The structs above are intended to be used with
* these functions.
*/
template <uint32_t N>
uint32_t dim_at(const IntArrayRef sizes) {
const uint32_t dims = sizes.size();
return dims < N ? 1 : api::utils::safe_downcast<uint32_t>(sizes[dims - N]);
}
template <uint32_t N>
uint32_t dim_at(const vTensor& v_in) {
return dim_at<N>(v_in.sizes());
}
/*
* For most global work group sizes, returns {4, 4, 4}, but adjusts the size for
* 2D global work group sizes. Always maintains a total of 64 invocations
*/
api::utils::uvec3 adaptive_work_group_size(
const api::utils::uvec3& global_work_group);
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,341
| 26.232558
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/impl/Packing.h
|
#include <ATen/native/vulkan/api/api.h>
namespace at {
namespace native {
namespace vulkan {
namespace packing {
api::ShaderInfo get_nchw_to_image_shader(const vTensor& v_dst);
api::ShaderInfo get_image_to_nchw_shader(const vTensor& v_src);
void record_nchw_to_image_op(
api::Context* const context,
api::ShaderInfo& compute_shader,
api::VulkanBuffer& src_buffer,
vTensor& v_dst,
api::PipelineBarrier pipeline_barrier,
const VkFence fence_handle);
void record_image_to_nchw_op(
api::Context* const context,
api::ShaderInfo& compute_shader,
vTensor& v_src,
api::VulkanBuffer& dst_buffer,
api::PipelineBarrier pipeline_barrier,
const VkFence fence_handle);
void record_nchw_to_buffer_op(
api::Context* const context,
api::VulkanBuffer& src_buffer,
vTensor& v_dst,
api::PipelineBarrier pipeline_barrier,
const VkFence fence_handle);
void record_buffer_to_nchw_op(
api::Context* const context,
vTensor& v_src,
api::VulkanBuffer& dst_buffer,
api::PipelineBarrier pipeline_barrier,
const VkFence fence_handle);
} // namespace packing
} // namespace vulkan
} // namespace native
} // namespace at
| 1,185
| 25.355556
| 63
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Batchnorm.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/VulkanPackedContext.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
class BatchNormPackedContext final : virtual public VulkanPackedContext,
public torch::jit::CustomClassHolder {
private:
c10::impl::GenericList unpacked_;
public:
BatchNormPackedContext(
const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt,
const c10::optional<Tensor>& running_mean_opt,
const c10::optional<Tensor>& running_var_opt,
double eps);
/*
* Assigns a name to each index in the packed/unpacked list.
*/
struct ListArgs final {
static constexpr uint32_t kWeight = 0u;
static constexpr uint32_t kBias = 1u;
static constexpr uint32_t kRunningMean = 2u;
static constexpr uint32_t kRunningVar = 3u;
static constexpr uint32_t kEps = 4u;
static constexpr uint32_t kNumArgs = 5u;
};
static BatchNormPackedContext pack(c10::impl::GenericList);
const c10::impl::GenericList unpack() const override {
TORCH_CHECK(unpacked_.size() > 0u, "unpacked_ does not have any elements!");
return unpacked_;
}
};
c10::intrusive_ptr<BatchNormPackedContext> create_batchnorm_context(
c10::optional<Tensor>&& weight_opt,
c10::optional<Tensor>&& bias_opt,
c10::optional<Tensor>&& running_mean_opt,
c10::optional<Tensor>&& running_var_opt,
bool training,
double /* momentum */,
double eps,
bool /* cudnn_enable, deprecated */);
Tensor run_batchnorm_context(
const Tensor& input_arg,
const c10::intrusive_ptr<BatchNormPackedContext>& context);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 1,867
| 26.072464
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Common.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/core/List.h>
#include <ATen/core/Tensor.h>
#include <ATen/native/vulkan/api/api.h>
#include <ATen/native/vulkan/impl/Common.h>
#include <ATen/native/vulkan/ops/Convert.h>
#define VK_KERNEL(shader_name) \
::at::native::vulkan::get_shader_info(#shader_name)
#define VK_LOOKUP_KERNEL(op_name) \
::at::native::vulkan::look_up_shader_info(#op_name)
namespace at {
namespace native {
namespace vulkan {
namespace ops {
struct Layout final {
// 4D Activation Maps
struct Activation4D final {
static constexpr size_t batch = 0u;
static constexpr size_t channels = 1u;
static constexpr size_t height = 2u;
static constexpr size_t width = 3u;
};
// Convolution Filters
struct Filter final {
static constexpr size_t output = 0u;
static constexpr size_t input = 1u;
static constexpr size_t height = 2u;
static constexpr size_t width = 3u;
};
// Transposed Convolution Filters
struct TransposedFilter final {
static constexpr size_t input = 0u;
static constexpr size_t output = 1u;
static constexpr size_t height = 2u;
static constexpr size_t width = 3u;
};
// Parameters (Pooling Kernels, Dilation, Padding, Stride, etc.)
struct Parameter final {
static constexpr size_t height = 0u;
static constexpr size_t width = 1u;
};
};
/*
* The functions below safely return the size of the dimension at the N-th
* innermost index. If the dimensionality of the size array is not sufficient
* then 1 will be returned. The structs above are intended to be used with
* these functions.
*/
template <uint32_t N>
uint32_t get_dim(const IntArrayRef sizes) {
const uint32_t dims = sizes.size();
return dims < N ? 1 : api::utils::safe_downcast<uint32_t>(sizes[dims - N]);
}
template <uint32_t N>
uint32_t get_dim(const Tensor& t_in) {
return get_dim<N>(t_in.sizes());
}
template <uint32_t N>
uint32_t get_dim(const vTensor& v_in) {
return get_dim<N>(v_in.sizes());
}
inline c10::optional<Tensor> get_optional_tensor(
const c10::impl::GenericList& gen_list,
const uint32_t idx) {
return gen_list.get(idx).isTensor() ? gen_list.get(idx).toTensor()
: c10::optional<Tensor>();
}
inline c10::optional<Scalar> get_optional_scalar(
const c10::impl::GenericList& gen_list,
const uint32_t idx) {
return gen_list.get(idx).isScalar() ? gen_list.get(idx).toScalar()
: c10::optional<Scalar>();
}
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,620
| 26.589474
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Convert.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/VulkanOpaqueTensorImpl.h>
#include <ATen/native/vulkan/api/Tensor.h>
#include <c10/util/accumulate.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
using vTensorImpl = VulkanOpaqueTensorImpl<vTensor>;
inline Tensor convert(const vTensor& tensor) {
return at::detail::make_tensor<vTensorImpl>(
DispatchKeySet(DispatchKey::Vulkan),
c10::scalarTypeToTypeMeta(tensor.dtype()),
at::Device(at::kVulkan),
tensor,
tensor.sizes(),
tensor.strides());
}
inline Tensor convert_quantized(const vTensor& tensor) {
TORCH_CHECK(tensor.is_quantized(), "Not a Quantized Tensor");
return at::detail::make_tensor<vTensorImpl>(
DispatchKeySet(DispatchKey::Vulkan),
c10::scalarTypeToTypeMeta(tensor.dtype()),
at::Device(at::kVulkan),
tensor,
tensor.sizes(),
tensor.strides());
}
inline vTensor& convert(const Tensor& tensor) {
TORCH_INTERNAL_ASSERT(tensor.is_vulkan(), "Vulkan tensor expected!");
vTensorImpl* const impl =
static_cast<vTensorImpl*>(tensor.unsafeGetTensorImpl());
return impl->unsafe_opaque_handle();
}
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 1,294
| 23.903846
| 71
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Convolution.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/VulkanPackedContext.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
enum Conv2dMethod {
Conv2dDepthwise,
Conv2dPointwise,
Conv2dSlidingWindow,
};
namespace conv2d {
Tensor rearrange_weights_dw(const Tensor& weight_in);
Tensor rearrange_weights_2d(const Tensor& weight_in, bool tconv);
Tensor rearrange_bias(
const c10::optional<Tensor>& bias_in,
const at::Tensor& weight_in,
bool tconv);
} // namespace conv2d
namespace qconv2d_vk {
struct QParams final {
api::utils::uvec3 out_extents;
int32_t ic4;
api::utils::ivec4 sizes_2d;
float output_scale;
float input_scale;
int32_t output_zero_point;
int32_t input_zero_point;
float weight_scale;
float bias_scale;
int32_t weight_zero_point;
int32_t bias_zero_point;
api::utils::ivec2 kernel_size;
api::utils::ivec2 stride;
api::utils::ivec2 padding;
api::utils::ivec2 dilate;
api::utils::vec2 clamp;
api::utils::ivec4 src_filter;
};
} // namespace qconv2d_vk
class Conv2dPackedContext final : virtual public VulkanPackedContext,
public torch::jit::CustomClassHolder {
private:
c10::impl::GenericList unpacked_;
api::ShaderInfo compute_shader_{};
public:
Conv2dPackedContext(
const Tensor& weight,
const c10::optional<Tensor>& bias,
const IntArrayRef stride_arg,
const IntArrayRef padding_arg,
const IntArrayRef dilation_arg,
const bool transposed,
const bool quantized,
const IntArrayRef output_padding_arg,
const int64_t groups,
const c10::optional<Scalar>& output_min = c10::nullopt,
const c10::optional<Scalar>& output_max = c10::nullopt);
/*
* Assigns a name to each index in the unpacked list.
*/
struct Unpacked final {
static constexpr uint32_t Weight = 0u;
static constexpr uint32_t Bias = 1u;
static constexpr uint32_t Stride = 2u;
static constexpr uint32_t Padding = 3u;
static constexpr uint32_t Dilation = 4u;
static constexpr uint32_t isTransposed = 5u;
static constexpr uint32_t isQuantized = 6u;
static constexpr uint32_t OutputPadding = 7u;
static constexpr uint32_t Groups = 8u;
static constexpr uint32_t OutputMin = 9u;
static constexpr uint32_t OutputMax = 10u;
static constexpr uint32_t NumArgs = 11u;
};
/*
* Assigns a name to each index in the packed list.
*/
struct Packed final {
static constexpr uint32_t Weight = 0u;
static constexpr uint32_t Bias = 1u;
static constexpr uint32_t OverlayRegion = 2u;
static constexpr uint32_t Stride = 3u;
static constexpr uint32_t Padding = 4u;
static constexpr uint32_t OutputPadding = 5u;
static constexpr uint32_t Dilation = 6u;
static constexpr uint32_t isTransposed = 7u;
static constexpr uint32_t isQuantized = 8u;
static constexpr uint32_t Groups = 9u;
static constexpr uint32_t OutputMin = 10u;
static constexpr uint32_t OutputMax = 11u;
static constexpr uint32_t ConvMethod = 12u;
static constexpr uint32_t WeightSizes = 13u;
static constexpr uint32_t NumArgs = 14u;
};
static Conv2dPackedContext pack(c10::impl::GenericList);
const c10::impl::GenericList unpack() const override {
TORCH_CHECK(unpacked_.size() > 0u, "unpacked_ does not have any elements!");
return unpacked_;
}
inline api::ShaderInfo& compute_shader() {
return compute_shader_;
}
};
c10::intrusive_ptr<Conv2dPackedContext> create_conv2d_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& dilation,
const int64_t groups,
const c10::optional<Scalar>& output_min = c10::nullopt,
const c10::optional<Scalar>& output_max = c10::nullopt);
Tensor run_conv2d_context(
const Tensor& input,
const c10::intrusive_ptr<Conv2dPackedContext>& context);
c10::intrusive_ptr<Conv2dPackedContext> create_tconv2d_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& output_padding,
std::vector<int64_t>&& dilation,
const int64_t groups,
const c10::optional<Scalar>& output_min = c10::nullopt,
const c10::optional<Scalar>& output_max = c10::nullopt);
Tensor run_tconv2d_context(
const Tensor& input,
const c10::intrusive_ptr<Conv2dPackedContext>& context);
c10::intrusive_ptr<Conv2dPackedContext> create_qconv2d_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& dilation,
const int64_t groups,
const c10::optional<Scalar>& output_min = c10::nullopt,
const c10::optional<Scalar>& output_max = c10::nullopt);
Tensor run_qconv2d_context(
const Tensor& input_arg,
double scale,
int64_t zero_point,
const c10::intrusive_ptr<Conv2dPackedContext>& conv_context);
// Backwards compatibility
class Conv2dOpContext final : public torch::jit::CustomClassHolder {
public:
static Conv2dOpContext create(
const Tensor& weight,
const c10::optional<Tensor>& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool transposed,
IntArrayRef output_padding,
int64_t groups,
const c10::optional<Scalar>& output_min = c10::nullopt,
const c10::optional<Scalar>& output_max = c10::nullopt);
using State = std::tuple<
Tensor,
c10::optional<Tensor>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
c10::optional<Scalar>,
c10::optional<Scalar>>;
Tensor run(const Tensor& input) const;
State unpack() const;
private:
explicit Conv2dOpContext(Conv2dPackedContext conv_context);
Conv2dPackedContext conv_context_;
};
Tensor conv2d_clamp_run(
const Tensor& input,
const c10::intrusive_ptr<Conv2dOpContext>& context);
c10::intrusive_ptr<Conv2dOpContext> conv2d_clamp_prepack(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& dilation,
const int64_t groups,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 6,555
| 28.00885
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Copy.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
void transfer_cpu_to_vulkan(const Tensor&, vTensor&);
void transfer_vulkan_to_cpu(vTensor&, Tensor&);
void pack_cpu_to_vulkan(const Tensor& src, vTensor& dst);
void pack_vulkan_to_cpu(vTensor& src, Tensor& dst);
Tensor& copy_(Tensor& dst, const Tensor& src);
vTensor to_vulkan(
at::Tensor& src,
const api::StorageType storage_type = api::StorageType::TEXTURE_3D);
at::Tensor from_vulkan(vTensor& v_src);
//
// Utility functions for memcpy
//
template <typename T>
void memcpy_to_mapping_impl(const Tensor& src, api::MemoryMap& dst_mapping) {
T* data_ptr = dst_mapping.template data<T>();
memcpy(
data_ptr,
src.const_data_ptr<T>(),
std::min(src.nbytes(), dst_mapping.nbytes()));
}
template <typename T>
void memcpy_from_mapping_impl(api::MemoryMap& src_mapping, Tensor& dst) {
T* data_ptr = src_mapping.template data<T>();
memcpy(
dst.mutable_data_ptr<T>(),
data_ptr,
std::min(src_mapping.nbytes(), dst.nbytes()));
}
inline void memcpy_from_mapping_bool(api::MemoryMap& src_mapping, Tensor& dst) {
uint8_t* src_ptr = src_mapping.template data<uint8_t>();
bool* dst_ptr = dst.mutable_data_ptr<bool>();
for (int i = 0; (unsigned)i < std::min(src_mapping.nbytes(), dst.nbytes());
++i) {
dst_ptr[i] = static_cast<bool>(src_ptr[i]);
}
}
inline void memcpy_to_mapping_uint8(
const Tensor& src,
api::MemoryMap& dst_mapping) {
bool* src_ptr = src.mutable_data_ptr<bool>();
uint8_t* dst_ptr = dst_mapping.template data<uint8_t>();
for (int i = 0; (unsigned)i < std::min(dst_mapping.nbytes(), src.nbytes());
++i) {
dst_ptr[i] = static_cast<uint8_t>(src_ptr[i]);
}
}
void memcpy_to_mapping(const Tensor& src, api::MemoryMap& dst_mapping);
void memcpy_from_mapping(api::MemoryMap& src_mapping, Tensor& dst);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,068
| 24.8625
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Factory.h
|
#include <ATen/native/vulkan/ops/Common.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
Tensor _empty_affine_quantized(
const IntArrayRef sizes,
const c10::optional<ScalarType> dtype,
const c10::optional<c10::Layout> layout,
const c10::optional<Device> device,
const c10::optional<bool> pin_memory,
const double scale,
const int64_t zero_point,
const optional<MemoryFormat> memory_format);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
| 530
| 23.136364
| 48
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Gru.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/VulkanPackedContext.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
class GruPackedContext final : virtual public VulkanPackedContext,
public torch::jit::CustomClassHolder {
public:
GruPackedContext(
const std::vector<Tensor>& params_cpu, // weights/biases (cpu)
bool has_biases,
int64_t num_layers,
double dropout,
bool train,
bool bidirectional,
bool batch_first);
/*
* Assigns a name to each index in the unpacked list.
*/
struct Unpacked final {
static constexpr uint32_t Params = 0u;
static constexpr uint32_t hasBiases = 1u;
static constexpr uint32_t NumLayers = 2u;
static constexpr uint32_t Dropout = 3u;
static constexpr uint32_t Train = 4u;
static constexpr uint32_t Bidirectional = 5u;
static constexpr uint32_t BatchFirst = 6u;
static constexpr uint32_t NumArgs = 7u;
};
/*
* Assigns a name to each index in the packed list.
*/
struct Packed final {
static constexpr uint32_t LinearContexts = 0u;
static constexpr uint32_t hasBiases = 1u;
static constexpr uint32_t NumLayers = 2u;
static constexpr uint32_t Dropout = 3u;
static constexpr uint32_t Train = 4u;
static constexpr uint32_t Bidirectional = 5u;
static constexpr uint32_t BatchFirst = 6u;
static constexpr uint32_t NumArgs = 7u;
};
static GruPackedContext pack(c10::impl::GenericList);
const c10::impl::GenericList unpack() const override;
};
c10::intrusive_ptr<GruPackedContext> create_gru_context(
std::vector<Tensor>&& params_cpu, // weights/biases (cpu)
bool has_biases,
int64_t num_layers,
double dropout,
bool train,
bool bidirectional,
bool batch_first);
std::tuple<Tensor, Tensor> run_gru_context(
const Tensor& input_vk,
const Tensor& hx_vk,
const c10::intrusive_ptr<GruPackedContext>& vulkan_context);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,176
| 25.876543
| 69
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Lstm.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/VulkanPackedContext.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
class LstmPackedContext final : virtual public VulkanPackedContext,
public torch::jit::CustomClassHolder {
public:
LstmPackedContext(
const std::vector<Tensor>& params_cpu, // weights/biases (cpu)
bool has_biases,
int64_t num_layers,
double dropout,
bool train,
bool bidirectional,
bool batch_first);
/*
* Assigns a name to each index in the unpacked list.
*/
struct Unpacked final {
static constexpr uint32_t Params = 0u;
static constexpr uint32_t hasBiases = 1u;
static constexpr uint32_t NumLayers = 2u;
static constexpr uint32_t Dropout = 3u;
static constexpr uint32_t Train = 4u;
static constexpr uint32_t Bidirectional = 5u;
static constexpr uint32_t BatchFirst = 6u;
static constexpr uint32_t NumArgs = 7u;
};
/*
* Assigns a name to each index in the packed list.
*/
struct Packed final {
static constexpr uint32_t LinearContexts = 0u;
static constexpr uint32_t hasBiases = 1u;
static constexpr uint32_t NumLayers = 2u;
static constexpr uint32_t Dropout = 3u;
static constexpr uint32_t Train = 4u;
static constexpr uint32_t Bidirectional = 5u;
static constexpr uint32_t BatchFirst = 6u;
static constexpr uint32_t NumArgs = 7u;
};
static LstmPackedContext pack(c10::impl::GenericList);
const c10::impl::GenericList unpack() const override;
};
c10::intrusive_ptr<LstmPackedContext> create_lstm_context(
std::vector<Tensor>&& params_cpu, // weights/biases (cpu)
bool has_biases,
int64_t num_layers,
double dropout,
bool train,
bool bidirectional,
bool batch_first);
std::tuple<Tensor, Tensor, Tensor> run_lstm_context(
const Tensor& input_vk, // input sequence (vulkan)
const Tensor& hx_vk, // initial hidden state (vulkan)
const Tensor& cx_vk, // initial cell state (vulkan)
const c10::intrusive_ptr<LstmPackedContext>& vulkan_context);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 2,308
| 27.158537
| 70
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/Mm.h
|
#pragma once
#ifdef USE_VULKAN_API
#include <ATen/native/vulkan/ops/Common.h>
#include <ATen/native/vulkan/ops/VulkanPackedContext.h>
#include <torch/library.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
class LinearPackedContext final : virtual public VulkanPackedContext,
public torch::jit::CustomClassHolder {
private:
c10::impl::GenericList unpacked_;
public:
LinearPackedContext(const Tensor& weight, const c10::optional<Tensor>& bias);
/*
* Assigns a name to each index in the unpacked list.
*/
struct Unpacked final {
static constexpr uint32_t Weight = 0u;
static constexpr uint32_t Bias = 1u;
static constexpr uint32_t NumArgs = 2u;
};
/*
* Assigns a name to each index in the packed list.
*/
struct Packed final {
static constexpr uint32_t Weight = 0u;
static constexpr uint32_t Bias = 1u;
static constexpr uint32_t WeightSizes = 2u;
static constexpr uint32_t BiasDefined = 3u;
static constexpr uint32_t NumArgs = 4u;
};
static LinearPackedContext pack(c10::impl::GenericList);
const c10::impl::GenericList unpack() const override {
TORCH_CHECK(unpacked_.size() > 0u, "unpacked_ does not have any elements!");
return unpacked_;
}
};
c10::intrusive_ptr<LinearPackedContext> create_linear_context(
Tensor&& weight,
c10::optional<Tensor>&& bias);
Tensor run_linear_context(
const Tensor& input,
const c10::intrusive_ptr<LinearPackedContext>& context);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
#endif /* USE_VULKAN_API */
| 1,635
| 23.41791
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vulkan/ops/QuantizedFunctions.h
|
#include <ATen/native/vulkan/ops/Common.h>
namespace at {
namespace native {
namespace vulkan {
namespace ops {
Tensor quantize_per_tensor(
const at::Tensor& input_arg,
const double scale,
const int64_t zero_point,
const c10::ScalarType dtype);
Tensor dequantize_helper(
const at::Tensor& input_arg,
const double scale,
const int64_t zero_point,
const c10::ScalarType dtype);
Tensor dequantize(const Tensor& self);
Tensor quantized_add(
const Tensor& self_arg,
const Tensor& other_arg,
const double scale,
const int64_t zero_point);
Tensor quantized_sub(
const Tensor& self_arg,
const Tensor& other_arg,
const double scale,
const int64_t zero_point);
Tensor quantized_mul(
const Tensor& self_arg,
const Tensor& other_arg,
const double scale,
const int64_t zero_point);
Tensor quantized_div(
const Tensor& self_arg,
const Tensor& other_arg,
const double scale,
const int64_t zero_point);
Tensor quantized_conv2d(
const Tensor& input_,
const Tensor& weight,
const c10::optional<Tensor>& bias_opt,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups,
double out_scale,
int64_t out_zero_point);
Tensor quantized_upsample_nearest2d(
const Tensor& input_arg,
const IntArrayRef output_sizes,
const c10::optional<double> scales_h,
const c10::optional<double> scales_w);
} // namespace ops
} // namespace vulkan
} // namespace native
} // namespace at
| 1,528
| 21.820896
| 42
|
h
|
null |
pytorch-main/aten/src/ATen/native/xnnpack/Common.h
|
#pragma once
#ifdef USE_XNNPACK
#include <xnnpack.h>
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
#include <c10/util/ArrayRef.h>
#include <limits>
#include <memory>
namespace at::native::xnnpack {
struct Deleter final {
void operator()(const xnn_operator_t op) const {
xnn_delete_operator(op);
}
};
using Operator = std::unique_ptr<xnn_operator, Deleter>;
struct ContextLinear final {
Operator op;
int64_t output_channels;
ContextLinear() = delete;
ContextLinear(Operator&& o, int64_t o_channels) : op(std::move(o)), output_channels(o_channels) {}
static constexpr float kMin = -std::numeric_limits<float>::infinity();
static constexpr float kMax = std::numeric_limits<float>::infinity();
};
// This contains information for both the transpose and non-transpose cases.
struct ContextConv2D final {
Operator op;
std::array<int64_t, 4> weight_size_;
std::array<int64_t, 2> padding_;
std::array<int64_t, 2> output_padding_;
std::array<int64_t, 2> stride_;
std::array<int64_t, 2> dilation_;
bool transposed_;
int64_t groups_;
ContextConv2D() = delete;
ContextConv2D(
Operator&& o,
std::array<int64_t, 4> weight_size,
std::array<int64_t, 2> padding,
std::array<int64_t, 2> output_padding,
std::array<int64_t, 2> stride,
std::array<int64_t, 2> dilation,
bool transposed,
int64_t groups)
: op(std::move(o)),
weight_size_(weight_size),
padding_(padding),
output_padding_(output_padding),
stride_(stride),
dilation_(dilation),
transposed_(transposed),
groups_(groups) {}
static constexpr float kMin = -std::numeric_limits<float>::infinity();
static constexpr float kMax = std::numeric_limits<float>::infinity();
};
namespace internal {
struct Layout final {
// 4D Activation Maps
struct Activation4D final {
static constexpr size_t batch = 0u;
static constexpr size_t channels = 1u;
static constexpr size_t height = 2u;
static constexpr size_t width = 3u;
};
// ND Activation Maps
struct ActivationND final {
// Some operators may not be limited to 4 dimensional tensors. In that scenario,
// XNNPACK denotes that operator with an _nc suffix and expects all dimensions,
// except channels, to be flattened into one argument: batch_size.
static int64_t batch(const IntArrayRef tensor) {
if (C10_UNLIKELY(tensor.empty())) {
return -1;
}
// Handle the case where batch size is zero.
int64_t batch = tensor[0];
for (size_t index = 1u; index < (tensor.size() - 1u); ++index) {
batch *= tensor[index];
}
return batch;
};
static int64_t channel(const IntArrayRef tensor) {
if (C10_UNLIKELY(tensor.empty())) {
return -1;
}
return tensor.back();
};
};
// Convolution Filters
struct Filter final {
static constexpr size_t output = 0u;
static constexpr size_t input = 1u;
static constexpr size_t height = 2u;
static constexpr size_t width = 3u;
};
// Parameters (Pooling Kernels, Dilation, Padding, Stride, etc.)
struct Parameter final {
static constexpr size_t height = 0u;
static constexpr size_t width = 1u;
};
};
} // namespace internal
} // namespace at::native::xnnpack
#endif /* USE_XNNPACK */
namespace at::native::xnnpack {
bool available();
} // namespace at::native::xnnpack
| 3,436
| 25.643411
| 100
|
h
|
null |
pytorch-main/aten/src/ATen/native/xnnpack/Convolution.h
|
#pragma once
#ifdef USE_XNNPACK
#include <ATen/Tensor.h>
#include <ATen/native/xnnpack/Common.h>
#include <ATen/native/xnnpack/OpContext.h>
namespace at::native::xnnpack {
namespace internal::convolution2d {
c10::intrusive_ptr<xnnpack::Conv2dOpContext>
createConv2dClampPrePackOpContext(
Tensor weight,
c10::optional<Tensor> bias,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
std::vector<int64_t> dilation,
int64_t groups,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
c10::intrusive_ptr<xnnpack::TransposeConv2dOpContext>
createConv2dTransposeClampPrePackOpContext(
Tensor weight,
c10::optional<Tensor> bias,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
std::vector<int64_t> output_padding,
std::vector<int64_t> dilation,
int64_t groups,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
Tensor conv2d_clamp_run(
const Tensor& input,
const c10::intrusive_ptr<xnnpack::Conv2dOpContext>& op_context);
IValue
unpack_prepacked_sizes_conv2d(const IValue& ivalue);
Tensor conv2d_transpose_clamp_run(
const Tensor& input,
const c10::intrusive_ptr<xnnpack::TransposeConv2dOpContext>& op_context);
ContextConv2D create(
const Tensor& weight,
const c10::optional<Tensor>& bias,
const IntArrayRef padding,
const IntArrayRef output_padding,
const IntArrayRef stride,
const IntArrayRef dilation,
const int64_t groups,
const bool transposed,
const float output_min,
const float output_max);
Tensor run(ContextConv2D& context, const Tensor& input);
} // namespace internal::convolution2d
Tensor convolution2d(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const IntArrayRef padding,
const IntArrayRef stride,
const IntArrayRef dilation,
const int64_t groups);
} // namespace at::native::xnnpack
#endif /* USE_XNNPACK */
| 2,062
| 27.260274
| 77
|
h
|
null |
pytorch-main/aten/src/ATen/native/xnnpack/Linear.h
|
#pragma once
#ifdef USE_XNNPACK
#include <ATen/Tensor.h>
#include <ATen/native/xnnpack/Common.h>
#include <ATen/native/xnnpack/OpContext.h>
namespace at::native::xnnpack {
namespace internal::linear {
c10::intrusive_ptr<xnnpack::LinearOpContext> createLinearClampPrePackOpContext(
Tensor weight,
c10::optional<Tensor> bias,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
Tensor linear_clamp_run(const Tensor& input, const c10::intrusive_ptr<xnnpack::LinearOpContext>& op_context);
IValue
unpack_prepacked_sizes_linear(const IValue& ivalue);
ContextLinear create(
const Tensor& weight,
const c10::optional<Tensor>& bias,
const float output_min,
const float output_max);
Tensor run(const ContextLinear& context, const Tensor& input);
} // namespace internal::linear
bool use_linear(
const Tensor& input,
const Tensor& weight,
const Tensor& bias);
Tensor linear(
const Tensor& input,
const Tensor& weight,
const Tensor& bias);
} // namespace at::native::xnnpack
#endif /* USE_XNNPACK */
| 1,088
| 23.2
| 109
|
h
|
null |
pytorch-main/aten/src/ATen/native/xnnpack/OpContext.h
|
#pragma once
#ifdef USE_XNNPACK
#include <ATen/core/ivalue.h>
#include <ATen/native/xnnpack/Common.h>
#include <ATen/Tensor.h>
namespace at::native::xnnpack {
using SerializationTypeLinearPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
c10::optional<Scalar>,
c10::optional<Scalar>>;
using SerializationTypeConv2dPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
c10::optional<Scalar>,
c10::optional<Scalar>>;
using SerializationTypeTransposeConv2dPrePack = std::tuple<
Tensor,
c10::optional<Tensor>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
std::vector<int64_t>,
int64_t,
c10::optional<Scalar>,
c10::optional<Scalar>>;
class LinearOpContext : public torch::jit::CustomClassHolder {
protected:
Tensor orig_weight_;
c10::optional<Tensor> orig_bias_;
c10::optional<Scalar> output_min_;
c10::optional<Scalar> output_max_;
bool orig_weight_and_bias_freed_;
public:
SerializationTypeLinearPrePack unpack() {
TORCH_CHECK(!orig_weight_and_bias_freed_, "Original weight and bias have been freed");
return std::make_tuple(orig_weight_, orig_bias_, output_min_, output_max_);
}
virtual Tensor run(const Tensor& input) = 0;
virtual void free_orig_weight_and_bias() = 0;
};
class XNNPackLinearOpContext final : public LinearOpContext {
private:
ContextLinear op_context_;
public:
XNNPackLinearOpContext(
Tensor&& weight,
c10::optional<Tensor>&& bias,
const c10::optional<Scalar>& min,
const c10::optional<Scalar>& max,
ContextLinear&& op_context)
: op_context_(std::move(op_context)) {
orig_weight_ = std::move(weight);
orig_bias_ = std::move(bias);
output_min_ = min;
output_max_ = max;
orig_weight_and_bias_freed_ = false;
}
Tensor run(const Tensor& input) override;
void free_orig_weight_and_bias() override;
static c10::intrusive_ptr<LinearOpContext> create_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
};
class Conv2dOpContext : public torch::jit::CustomClassHolder {
protected:
Tensor orig_weight_;
c10::optional<Tensor> orig_bias_;
std::vector<int64_t> stride_;
std::vector<int64_t> padding_;
std::vector<int64_t> dilation_;
int64_t groups_;
c10::optional<Scalar> output_min_;
c10::optional<Scalar> output_max_;
bool orig_weight_and_bias_freed_;
public:
SerializationTypeConv2dPrePack unpack() {
TORCH_CHECK(!orig_weight_and_bias_freed_, "Original weight and bias have been freed");
return std::make_tuple(
orig_weight_,
orig_bias_,
stride_,
padding_,
dilation_,
groups_,
output_min_,
output_max_);
}
virtual Tensor run(const Tensor& input) = 0;
virtual void free_orig_weight_and_bias() = 0;
};
class TransposeConv2dOpContext : public torch::jit::CustomClassHolder {
protected:
Tensor orig_weight_;
c10::optional<Tensor> orig_bias_;
std::vector<int64_t> stride_;
std::vector<int64_t> padding_;
std::vector<int64_t> output_padding_;
std::vector<int64_t> dilation_;
int64_t groups_;
c10::optional<Scalar> output_min_;
c10::optional<Scalar> output_max_;
bool orig_weight_and_bias_freed_;
public:
SerializationTypeTransposeConv2dPrePack unpack() {
TORCH_CHECK(!orig_weight_and_bias_freed_, "Original weight and bias have been freed");
return std::make_tuple(
orig_weight_,
orig_bias_,
stride_,
padding_,
output_padding_,
dilation_,
groups_,
output_min_,
output_max_);
}
virtual Tensor run(const Tensor& input) = 0;
virtual void free_orig_weight_and_bias() = 0;
};
class XNNPackConv2dOpContext final : public Conv2dOpContext {
private:
ContextConv2D op_context_;
// xnnpack convs use indirection buffer.
// These buffers need setup at runtime and/or when input
// dims change. If we are running the same model on multiple
// threads, this can lead to contention where indirection buffer
// is being accessed and updated at the same time from two different
// threads.
std::mutex xnnp_mutex_;
public:
XNNPackConv2dOpContext(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
uint64_t groups,
const c10::optional<Scalar>& min,
const c10::optional<Scalar>& max,
ContextConv2D&& op_context)
: op_context_(std::move(op_context)) {
orig_weight_ = std::move(weight);
orig_bias_ = std::move(bias);
padding_ = std::move(padding);
stride_ = std::move(stride);
dilation_ = std::move(dilation);
groups_ = groups;
output_min_ = min;
output_max_ = max;
orig_weight_and_bias_freed_ = false;
}
Tensor run(const Tensor& input) override;
void free_orig_weight_and_bias() override;
static c10::intrusive_ptr<Conv2dOpContext> create_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
int64_t groups,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
};
class XNNPackTransposeConv2dOpContext final : public TransposeConv2dOpContext {
private:
ContextConv2D op_context_;
// xnnpack convs use indirection buffer.
// These buffers need setup at runtime and/or when input
// dims change. If we are running the same model on multiple
// threads, this can lead to contention where indirection buffer
// is being accessed and updated at the same time from two different
// threads.
std::mutex xnnp_mutex_;
public:
XNNPackTransposeConv2dOpContext(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& output_padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
uint64_t groups,
const c10::optional<Scalar>& min,
const c10::optional<Scalar>& max,
ContextConv2D&& op_context)
: op_context_(std::move(op_context)) {
orig_weight_ = std::move(weight);
orig_bias_ = std::move(bias);
padding_ = std::move(padding);
output_padding_ = std::move(output_padding);
stride_ = std::move(stride);
dilation_ = std::move(dilation);
groups_ = groups;
output_min_ = min;
output_max_ = max;
orig_weight_and_bias_freed_ = false;
}
Tensor run(const Tensor& input) override;
void free_orig_weight_and_bias() override;
static c10::intrusive_ptr<TransposeConv2dOpContext> create_context(
Tensor&& weight,
c10::optional<Tensor>&& bias,
std::vector<int64_t>&& padding,
std::vector<int64_t>&& output_padding,
std::vector<int64_t>&& stride,
std::vector<int64_t>&& dilation,
int64_t groups,
const c10::optional<Scalar>& output_min,
const c10::optional<Scalar>& output_max);
};
} // namespace at::native::xnnpack
#endif /* USE_XNNPACK */
| 7,270
| 28.200803
| 90
|
h
|
null |
pytorch-main/aten/src/ATen/native/xnnpack/Pooling.h
|
#pragma once
#ifdef USE_XNNPACK
#include <ATen/Tensor.h>
namespace at::native::xnnpack::internal::pooling {
struct Parameters final {
std::array<int64_t, 2> kernel;
std::array<int64_t, 2> padding;
std::array<int64_t, 2> stride;
std::array<int64_t, 2> dilation;
explicit Parameters(
const IntArrayRef kernel_,
const IntArrayRef padding_,
const IntArrayRef stride_,
const IntArrayRef dilation_)
: kernel(normalize(kernel_)),
padding(normalize(padding_)),
stride(normalize(stride_)),
dilation(normalize(dilation_)) {
}
private:
static std::array<int64_t, 2> normalize(const IntArrayRef parameter) {
TORCH_INTERNAL_ASSERT(
!parameter.empty(),
"Invalid usage! Reason: normalize() was called on an empty parameter.");
return std::array<int64_t, 2>{
parameter[0],
(2 == parameter.size()) ? parameter[1] : parameter[0],
};
}
};
} // namespace at::native::xnnpack::internal::pooling
#endif /* USE_XNNPACK */
| 1,004
| 22.372093
| 81
|
h
|
null |
pytorch-main/aten/src/ATen/nnapi/NeuralNetworks.h
|
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Most of NeuralNetworks.h has been stripped for simplicity.
We don't need any of the function declarations since
we call them all through dlopen/dlsym.
Operation codes are pulled directly from serialized models.
*/
#ifndef MINIMAL_NEURAL_NETWORKS_H
#define MINIMAL_NEURAL_NETWORKS_H
#include <stdint.h>
typedef enum {
ANEURALNETWORKS_NO_ERROR = 0,
ANEURALNETWORKS_OUT_OF_MEMORY = 1,
ANEURALNETWORKS_INCOMPLETE = 2,
ANEURALNETWORKS_UNEXPECTED_NULL = 3,
ANEURALNETWORKS_BAD_DATA = 4,
ANEURALNETWORKS_OP_FAILED = 5,
ANEURALNETWORKS_BAD_STATE = 6,
ANEURALNETWORKS_UNMAPPABLE = 7,
ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
} ResultCode;
typedef enum {
ANEURALNETWORKS_FLOAT32 = 0,
ANEURALNETWORKS_INT32 = 1,
ANEURALNETWORKS_UINT32 = 2,
ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
ANEURALNETWORKS_TENSOR_INT32 = 4,
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
ANEURALNETWORKS_BOOL = 6,
ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
ANEURALNETWORKS_TENSOR_BOOL8 = 9,
ANEURALNETWORKS_FLOAT16 = 10,
ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12,
ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
} OperandCode;
typedef enum {
ANEURALNETWORKS_PREFER_LOW_POWER = 0,
ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
} PreferenceCode;
typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
typedef struct ANeuralNetworksModel ANeuralNetworksModel;
typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
typedef int32_t ANeuralNetworksOperationType;
typedef struct ANeuralNetworksOperandType {
int32_t type;
uint32_t dimensionCount;
const uint32_t* dimensions;
float scale;
int32_t zeroPoint;
} ANeuralNetworksOperandType;
#endif // MINIMAL_NEURAL_NETWORKS_H
| 2,757
| 31.447059
| 75
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.