| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| #pragma once |
|
|
| #include <cute/config.hpp> |
| #include <cute/arch/mma.hpp> |
| #include <cute/numeric/complex.hpp> |
|
|
| |
| #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) |
| # define CUTE_ARCH_MMA_SM80_ENABLED |
|
|
| #if (__CUDA_ARCH__ <= 900) |
| #define CUTE_ARCH_MMA_B1_AND_SM80_ENABLED |
| #endif |
|
|
| #if (__CUDA_ARCH__ <= 890) |
| #define CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED |
| #endif |
|
|
| #endif |
|
|
|
|
|
|
| namespace cute { |
|
|
| |
|
|
| |
| struct SM80_16x8x8_F16F16F16F16_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " |
| "{%0, %1}," |
| "{%2, %3}," |
| "{%4}," |
| "{%5, %6};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_F16F16F16F16_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 " |
| "{%0, %1}," |
| "{%2, %3, %4, %5}," |
| "{%6, %7}," |
| "{%8, %9};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x8_F32F16F16F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_F32F16F16F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x8_F32BF16BF16F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_F32BF16BF16F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x4_F32TF32TF32F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x4_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x8_F32TF32TF32F32_TN |
| { |
| using DRegisters = float[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = float[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(float & d0, float & d1, float & d2, float & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| float const & c0, float const & c1, float const & c2, float const & c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "f"(c0), "f"(c1), "f"(c2), "f"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x4_F64F64F64F64_TN |
| { |
| using DRegisters = double[2]; |
| using ARegisters = double[1]; |
| using BRegisters = double[1]; |
| using CRegisters = double[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(double & d0, double & d1, |
| double const& a0, |
| double const& b0, |
| double const& c0, double const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=d"(d0), "=d"(d1) |
| : "d"(a0), |
| "d"(b0), |
| "d"(c0), "d"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
| struct SM80_8x8x4_C64C64C64C64_TN |
| { |
| using DRegisters = complex<double>[2]; |
| using ARegisters = complex<double>[1]; |
| using BRegisters = complex<double>[1]; |
| using CRegisters = complex<double>[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(complex<double> & d0, complex<double> & d1, |
| complex<double> const& a0, |
| complex<double> const& b0, |
| complex<double> const& c0, complex<double> const& c1) |
| { |
| |
| double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; |
| double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; |
| double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; |
| double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; |
|
|
| |
| SM80_8x8x4_F64F64F64F64_TN::fma( |
| rd0, rd1, |
| a0.real(), |
| b0.real(), |
| c0.real(), c1.real()); |
|
|
| |
| SM80_8x8x4_F64F64F64F64_TN::fma( |
| id0, id1, |
| a0.imag(), |
| b0.real(), |
| c0.imag(), c1.imag()); |
|
|
| |
| SM80_8x8x4_F64F64F64F64_TN::fma( |
| rd0, rd1, |
| -a0.imag(), |
| b0.imag(), |
| d0.real(), d1.real()); |
|
|
| |
| SM80_8x8x4_F64F64F64F64_TN::fma( |
| id0, id1, |
| a0.real(), |
| b0.imag(), |
| d0.imag(), d1.imag()); |
| } |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| struct SM80_8x8x4_GC64C64C64GC64_TN |
| { |
| struct GaussComplex { |
| double t0, t1, t2; |
|
|
| CUTE_HOST_DEVICE |
| operator complex<double>() const { return complex<double>(t0 - t1, t2 - t0 - t1); } |
|
|
| CUTE_HOST_DEVICE friend |
| complex<double> operator*(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) * b; } |
| CUTE_HOST_DEVICE friend |
| complex<double> operator*(complex<double> const& a, GaussComplex const& b) { return b * a; } |
|
|
| CUTE_HOST_DEVICE friend |
| complex<double> operator+(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) + b; } |
| CUTE_HOST_DEVICE friend |
| complex<double> operator+(complex<double> const& a, GaussComplex const& b) { return b + a; } |
| }; |
|
|
| using DRegisters = GaussComplex[2]; |
| using ARegisters = complex<double>[1]; |
| using BRegisters = complex<double>[1]; |
| using CRegisters = GaussComplex[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(GaussComplex & d0, GaussComplex & d1, |
| complex<double> const& a0, |
| complex<double> const& b0, |
| GaussComplex const& c0, GaussComplex const& c1) |
| { |
| SM80_8x8x4_F64F64F64F64_TN::fma(d0.t0, d1.t0, |
| a0.real(), |
| b0.real(), |
| c0.t0, c1.t0); |
| SM80_8x8x4_F64F64F64F64_TN::fma(d0.t1, d1.t1, |
| a0.imag(), |
| b0.imag(), |
| c0.t1, c1.t1); |
| SM80_8x8x4_F64F64F64F64_TN::fma(d0.t2, d1.t2, |
| a0.real() + a0.imag(), |
| b0.real() + b0.imag(), |
| c0.t2, c1.t2); |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32S8S8S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32S8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32S8S8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32S8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S8S8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32S8U8S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32S8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32S8U8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32S8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S8U8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32U8S8S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32U8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32U8S8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32U8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U8S8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U8S8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32U8U8S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x16_S32U8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32U8U8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x16_S32U8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U8U8S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U8U8S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32S4S4S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32S4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S4S4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32S4S4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32S4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32S4U4S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32S4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S4U4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32S4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32S4U4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32S4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32U4S4S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32U4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U4S4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32U4S4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32U4S4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32U4U4S32_TN |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x32_S32U4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32.satfinite " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U4U4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x32_S32U4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32U4U4S32_TN |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x64_S32U4U4S32_TN_SATURATE |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x128_S32U1U1S32_TN_ANDPOPC |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k128.row.col.s32.b1.b1.s32.and.popc " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x128_S32U1U1S32_TN_ANDPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x128_S32U1U1S32_TN_ANDPOPC |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k128.row.col.s32.b1.b1.s32.and.popc " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x128_S32U1U1S32_TN_ANDPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x256_S32U1U1S32_TN_ANDPOPC |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_B1_AND_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.and.popc " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x256_S32U1U1S32_TN_ANDPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_8x8x128_S32U1U1S32_TN_XORPOPC |
| { |
| using DRegisters = uint32_t[2]; |
| using ARegisters = uint32_t[1]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[2]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, |
| uint32_t const& a0, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1) |
| { |
| #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m8n8k128.row.col.s32.b1.b1.s32.xor.popc " |
| "{%0, %1}," |
| "{%2}," |
| "{%3}," |
| "{%4, %5};\n" |
| : "=r"(d0), "=r"(d1) |
| : "r"(a0), |
| "r"(b0), |
| "r"(c0), "r"(c1)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x128_S32U1U1S32_TN_XORPOPC |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[2]; |
| using BRegisters = uint32_t[1]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, |
| uint32_t const& b0, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k128.row.col.s32.b1.b1.s32.xor.popc " |
| "{%0, %1, %2, %3}," |
| "{%4, %5}," |
| "{%6}," |
| "{%7, %8, %9, %10};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), |
| "r"(b0), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| struct SM80_16x8x256_S32U1U1S32_TN_XORPOPC |
| { |
| using DRegisters = uint32_t[4]; |
| using ARegisters = uint32_t[4]; |
| using BRegisters = uint32_t[2]; |
| using CRegisters = uint32_t[4]; |
|
|
| CUTE_HOST_DEVICE static void |
| fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, |
| uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, |
| uint32_t const& b0, uint32_t const& b1, |
| uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) |
| { |
| #if defined(CUTE_ARCH_MMA_B1_XOR_SM80_ENABLED) |
| asm volatile( |
| "mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc " |
| "{%0, %1, %2, %3}," |
| "{%4, %5, %6, %7}," |
| "{%8, %9}," |
| "{%10, %11, %12, %13};\n" |
| : "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) |
| : "r"(a0), "r"(a1), "r"(a2), "r"(a3), |
| "r"(b0), "r"(b1), |
| "r"(c0), "r"(c1), "r"(c2), "r"(c3)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x256_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| } |
|
|