| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| #pragma once |
|
|
| #include "cutlass/gemm/thread/mma.h" |
|
|
| #include "cutlass/util/host_tensor.h" |
| #include "cutlass/util/tensor_view_io.h" |
|
|
| #include "cutlass/util/reference/host/tensor_copy.h" |
| #include "cutlass/util/reference/host/tensor_fill.h" |
| #include "cutlass/util/reference/host/tensor_compare.h" |
| #include "cutlass/util/reference/host/gemm.h" |
|
|
| namespace test { |
| namespace gemm { |
| namespace thread { |
|
|
| |
|
|
| |
| template <typename Mma> |
| __global__ void kernel( |
| typename Mma::ElementC *D, |
| typename Mma::ElementA const *A, |
| typename Mma::ElementB const *B, |
| typename Mma::ElementC const *C) { |
|
|
| auto ptr_D = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> *>(D); |
| auto ptr_A = reinterpret_cast<cutlass::Array<typename Mma::ElementA, Mma::Shape::kMK> const *>(A); |
| auto ptr_B = reinterpret_cast<cutlass::Array<typename Mma::ElementB, Mma::Shape::kKN> const *>(B); |
| auto ptr_C = reinterpret_cast<cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> const *>(C); |
|
|
| Mma mma; |
|
|
| auto a = *ptr_A; |
| auto b = *ptr_B; |
| auto c = *ptr_C; |
|
|
| cutlass::Array<typename Mma::ElementC, Mma::Shape::kMN> d; |
|
|
| mma(d, a, b, c); |
|
|
| *ptr_D = d; |
| } |
|
|
| |
|
|
| |
| template < |
| |
| typename Shape, |
| |
| typename ElementA, |
| |
| typename LayoutA, |
| |
| typename ElementB, |
| |
| typename LayoutB, |
| |
| typename ElementC, |
| |
| typename LayoutC |
| > |
| struct Testbed { |
|
|
| |
| using Mma = cutlass::gemm::thread::Mma< |
| Shape, |
| ElementA, |
| LayoutA, |
| ElementB, |
| LayoutB, |
| ElementC, |
| LayoutC |
| >; |
|
|
| |
| |
| |
|
|
| cutlass::HostTensor<ElementA, LayoutA> tensor_A; |
| cutlass::HostTensor<ElementB, LayoutB> tensor_B; |
| cutlass::HostTensor<ElementC, LayoutC> tensor_C; |
| cutlass::HostTensor<ElementC, LayoutC> tensor_D_computed; |
| cutlass::HostTensor<ElementC, LayoutC> tensor_D_reference; |
|
|
| |
| |
| |
|
|
| |
| Testbed() { |
|
|
| tensor_A.reset(cutlass::make_Coord(Shape::kM, Shape::kK)); |
| tensor_B.reset(cutlass::make_Coord(Shape::kK, Shape::kN)); |
| tensor_C.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); |
| tensor_D_computed.reset(cutlass::make_Coord(Shape::kM, Shape::kN)); |
| tensor_D_reference.reset(cutlass::make_Coord(Shape::kM, Shape::kN), false); |
| } |
|
|
| |
| bool run() { |
|
|
| |
| |
| |
|
|
| cutlass::reference::host::BlockFillSequential( |
| tensor_A.host_data(), |
| tensor_A.capacity() |
| ); |
|
|
| cutlass::reference::host::BlockFillSequential( |
| tensor_B.host_data(), |
| tensor_B.capacity(), |
| ElementB(1), |
| ElementB(2) |
| ); |
|
|
| cutlass::reference::host::TensorFill( |
| tensor_C.host_view(), |
| ElementC(0) |
| ); |
|
|
| cutlass::reference::host::TensorFill( |
| tensor_D_computed.host_view(), |
| ElementC(0) |
| ); |
|
|
| cutlass::reference::host::TensorFill( |
| tensor_D_reference.host_view(), |
| ElementC(0) |
| ); |
|
|
| tensor_A.sync_device(); |
| tensor_B.sync_device(); |
| tensor_C.sync_device(); |
| tensor_D_computed.sync_device(); |
|
|
| |
| kernel<Mma><<< dim3(1, 1), dim3(1, 1, 1) >>>( |
| tensor_D_computed.device_data(), |
| tensor_A.device_data(), |
| tensor_B.device_data(), |
| tensor_C.device_data()); |
|
|
| |
| cudaError_t result = cudaDeviceSynchronize(); |
|
|
| EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); |
| if (result != cudaSuccess) { |
| return false; |
| } |
|
|
| tensor_D_computed.sync_host(); |
|
|
| |
| |
| |
|
|
| |
|
|
| cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB, |
| ElementC, LayoutC, ElementC, ElementC> |
| reference_gemm; |
|
|
| reference_gemm( |
| {Shape::kM, Shape::kN, Shape::kK}, |
| ElementC(1), |
| tensor_A.host_ref(), |
| tensor_B.host_ref(), |
| ElementC(0), |
| tensor_D_reference.host_ref() |
| ); |
|
|
| |
| |
| |
|
|
| |
| bool passed = cutlass::reference::host::TensorEquals( |
| tensor_D_computed.host_view(), |
| tensor_D_reference.host_view() |
| ); |
|
|
| EXPECT_TRUE(passed) |
| << "A:\n" << tensor_A.host_view() << "\n\n" |
| << "B:\n" << tensor_B.host_view() << "\n\n" |
| << "C:\n" << tensor_C.host_view() << "\n\n" |
| << "Reference:\n" << tensor_D_reference.host_view() << "\n\n" |
| << "Computed:\n" << tensor_D_computed.host_view() << std::endl; |
| |
| |
| return passed; |
| } |
| }; |
|
|
| |
|
|
| } |
| } |
| } |
|
|