| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| #pragma once |
|
|
| #include <cute/config.hpp> |
|
|
| #include <cute/arch/copy.hpp> |
|
|
| |
| #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)) |
| # define CUTE_ARCH_CP_ASYNC_SM80_ENABLED |
| #endif |
|
|
| namespace cute |
| { |
|
|
| |
| template <class TS, class TD = TS> |
| struct SM80_CP_ASYNC_CACHEALWAYS |
| { |
| using SRegisters = TS[1]; |
| using DRegisters = TD[1]; |
|
|
| static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); |
| static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); |
|
|
| CUTE_HOST_DEVICE static void |
| copy(TS const& gmem_src, |
| TD & smem_dst) |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| TS const* gmem_ptr = &gmem_src; |
| uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); |
| asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2;\n" |
| :: "r"(smem_int_ptr), |
| "l"(gmem_ptr), |
| "n"(sizeof(TS))); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); |
| #endif |
| } |
| }; |
|
|
| |
| template <class TS, class TD = TS> |
| struct SM80_CP_ASYNC_CACHEGLOBAL |
| { |
| using SRegisters = TS[1]; |
| using DRegisters = TD[1]; |
|
|
| static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); |
| static_assert(sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); |
|
|
| CUTE_HOST_DEVICE static void |
| copy(TS const& gmem_src, |
| TD & smem_dst) |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| TS const* gmem_ptr = &gmem_src; |
| uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); |
| asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2;\n" |
| :: "r"(smem_int_ptr), |
| "l"(gmem_ptr), |
| "n"(sizeof(TS))); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); |
| #endif |
| } |
| }; |
|
|
| |
| template <class TS, class TD = TS> |
| struct SM80_CP_ASYNC_CACHEALWAYS_ZFILL |
| { |
| using SRegisters = TS[1]; |
| using DRegisters = TD[1]; |
|
|
| static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); |
| static_assert(sizeof(TS) == 4 || sizeof(TS) == 8 || sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); |
|
|
| CUTE_HOST_DEVICE static void |
| copy(TS const& gmem_src, |
| TD & smem_dst, |
| bool pred) |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| TS const* gmem_ptr = &gmem_src; |
| uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); |
| int src_size = pred ? sizeof(TS) : 0; |
| asm volatile("cp.async.ca.shared.global.L2::128B [%0], [%1], %2, %3;\n" |
| :: "r"(smem_int_ptr), |
| "l"(gmem_ptr), |
| "n"(sizeof(TS)), |
| "r"(src_size)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); |
| #endif |
| } |
| }; |
|
|
| |
| template <class TS, class TD = TS> |
| struct SM80_CP_ASYNC_CACHEGLOBAL_ZFILL |
| { |
| using SRegisters = TS[1]; |
| using DRegisters = TD[1]; |
|
|
| static_assert(sizeof(TS) == sizeof(TD), "cp.async requires sizeof(src_value_type) == sizeof(dst_value_type)"); |
| static_assert(sizeof(TS) == 16, "cp.async sizeof(TS) is not supported"); |
|
|
| CUTE_HOST_DEVICE static void |
| copy(TS const& gmem_src, |
| TD & smem_dst, |
| bool pred) |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| TS const* gmem_ptr = &gmem_src; |
| uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_dst); |
| int src_size = pred ? sizeof(TS) : 0; |
| asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], %2, %3;\n" |
| :: "r"(smem_int_ptr), |
| "l"(gmem_ptr), |
| "n"(sizeof(TS)), |
| "r"(src_size)); |
| #else |
| CUTE_INVALID_CONTROL_PATH("Support for cp.async instructions has not been enabled"); |
| #endif |
| } |
| }; |
|
|
| |
|
|
| |
| CUTE_HOST_DEVICE |
| void |
| cp_async_fence() |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| asm volatile("cp.async.commit_group;\n" ::); |
| #endif |
| } |
|
|
| |
|
|
| |
| template <int N> |
| CUTE_HOST_DEVICE |
| void |
| cp_async_wait() |
| { |
| #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) |
| if constexpr (N == 0) { |
| asm volatile("cp.async.wait_all;\n" ::); |
| } else { |
| asm volatile("cp.async.wait_group %0;\n" :: "n"(N)); |
| } |
| #endif |
| } |
|
|
| template <int N> |
| CUTE_HOST_DEVICE |
| void |
| cp_async_wait(Int<N>) |
| { |
| return cp_async_wait<N>(); |
| } |
|
|
| |
|
|
| } |
|
|