| |
|
|
| #include "common.cuh" |
|
|
|
|
| static __device__ __forceinline__ unsigned int ggml_cuda_cvta_generic_to_shared(void * generic_ptr) { |
| #ifdef CP_ASYNC_AVAILABLE |
| return __cvta_generic_to_shared(generic_ptr); |
| #else |
| GGML_UNUSED(generic_ptr); |
| NO_DEVICE_CODE; |
| return 0; |
| #endif |
| } |
|
|
| |
| |
| |
| |
| |
| template <int preload> |
| static __device__ __forceinline__ void cp_async_cg_16(const unsigned int dst, const void * src) { |
| static_assert(preload == 0 || preload == 64 || preload == 128 || preload == 256, "bad preload"); |
| #ifdef CP_ASYNC_AVAILABLE |
| #if CUDART_VERSION >= 11040 |
| if (preload == 256) { |
| asm volatile("cp.async.cg.shared.global.L2::256B [%0], [%1], 16;" |
| : : "r"(dst), "l"(src)); |
| } else if (preload == 128) { |
| asm volatile("cp.async.cg.shared.global.L2::128B [%0], [%1], 16;" |
| : : "r"(dst), "l"(src)); |
| } else if (preload == 64) { |
| asm volatile("cp.async.cg.shared.global.L2::64B [%0], [%1], 16;" |
| : : "r"(dst), "l"(src)); |
| } else |
| #endif |
| { |
| asm volatile("cp.async.cg.shared.global [%0], [%1], 16;" |
| : : "r"(dst), "l"(src)); |
| } |
| #else |
| GGML_UNUSED(dst); |
| GGML_UNUSED(src); |
| NO_DEVICE_CODE; |
| #endif |
| } |
|
|
| |
| |
| |
| static __device__ __forceinline__ void cp_async_wait_all() { |
| #ifdef CP_ASYNC_AVAILABLE |
| asm volatile("cp.async.wait_all;"); |
| #else |
| NO_DEVICE_CODE; |
| #endif |
| } |
|
|