Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh +47 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh +149 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh +514 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh +537 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h +358 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h +19 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h +105 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h +211 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h +181 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h +89 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh +53 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h +75 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h +318 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h +288 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh +15 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h +20 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h +37 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh +121 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h +44 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh +121 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h +5 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh +4 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h +11 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh +78 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h +13 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h +23 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh +405 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh +53 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h +40 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h +275 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h +34 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h +307 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h +146 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h +41 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h +12 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h +18 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h +3 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h +80 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h +26 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_ops.h +28 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_native.h +24 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cuda_dispatch.h +26 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_cpu_dispatch.h +25 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atan_ops.h +50 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h +27 -0
- vllm/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_native.h +22 -0
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cuda.h>
|
| 4 |
+
#include <cuda_runtime.h>
|
| 5 |
+
#include <cuda_fp16.h>
|
| 6 |
+
|
| 7 |
+
#include <c10/macros/Export.h>
|
| 8 |
+
|
| 9 |
+
// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 2 |
+
|
| 3 |
+
#include <cuda_runtime.h>
|
| 4 |
+
|
| 5 |
+
namespace at::cuda {
|
| 6 |
+
|
| 7 |
+
/**
|
| 8 |
+
Computes ceil(a / b)
|
| 9 |
+
*/
|
| 10 |
+
template <typename T>
|
| 11 |
+
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
|
| 12 |
+
return (a + b - 1) / b;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
namespace {
|
| 16 |
+
|
| 17 |
+
// Threads per block for our apply kernel
|
| 18 |
+
// FIXME: use occupancy calculator instead
|
| 19 |
+
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
|
| 20 |
+
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
|
| 21 |
+
|
| 22 |
+
template <int step = 1>
|
| 23 |
+
inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
|
| 24 |
+
if (curDevice == -1) return false;
|
| 25 |
+
uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
|
| 26 |
+
uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
|
| 27 |
+
uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
|
| 28 |
+
if (numBlocks > maxGridX)
|
| 29 |
+
numBlocks = maxGridX;
|
| 30 |
+
grid = dim3(numBlocks);
|
| 31 |
+
return true;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
constexpr int getApplyBlocksPerSM() {
|
| 35 |
+
return AT_APPLY_BLOCKS_PER_SM;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
constexpr int getApplyBlockSize() {
|
| 39 |
+
return AT_APPLY_THREADS_PER_BLOCK;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
|
| 43 |
+
return dim3(max_threads_per_block);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
} // anonymous namespace
|
| 47 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <cstdint>
|
| 3 |
+
|
| 4 |
+
// Collection of direct PTX functions
|
| 5 |
+
|
| 6 |
+
namespace at::cuda {
|
| 7 |
+
|
| 8 |
+
template <typename T>
|
| 9 |
+
struct Bitfield {};
|
| 10 |
+
|
| 11 |
+
template <>
|
| 12 |
+
struct Bitfield<unsigned int> {
|
| 13 |
+
static __device__ __host__ __forceinline__
|
| 14 |
+
unsigned int getBitfield(unsigned int val, int pos, int len) {
|
| 15 |
+
#if !defined(__CUDA_ARCH__)
|
| 16 |
+
pos &= 0xff;
|
| 17 |
+
len &= 0xff;
|
| 18 |
+
|
| 19 |
+
unsigned int m = (1u << len) - 1u;
|
| 20 |
+
return (val >> pos) & m;
|
| 21 |
+
#else
|
| 22 |
+
unsigned int ret;
|
| 23 |
+
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
|
| 24 |
+
return ret;
|
| 25 |
+
#endif
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
static __device__ __host__ __forceinline__
|
| 29 |
+
unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
|
| 30 |
+
#if !defined(__CUDA_ARCH__)
|
| 31 |
+
pos &= 0xff;
|
| 32 |
+
len &= 0xff;
|
| 33 |
+
|
| 34 |
+
unsigned int m = (1u << len) - 1u;
|
| 35 |
+
toInsert &= m;
|
| 36 |
+
toInsert <<= pos;
|
| 37 |
+
m <<= pos;
|
| 38 |
+
|
| 39 |
+
return (val & ~m) | toInsert;
|
| 40 |
+
#else
|
| 41 |
+
unsigned int ret;
|
| 42 |
+
asm("bfi.b32 %0, %1, %2, %3, %4;" :
|
| 43 |
+
"=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
|
| 44 |
+
return ret;
|
| 45 |
+
#endif
|
| 46 |
+
}
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
template <>
|
| 50 |
+
struct Bitfield<uint64_t> {
|
| 51 |
+
static __device__ __host__ __forceinline__
|
| 52 |
+
uint64_t getBitfield(uint64_t val, int pos, int len) {
|
| 53 |
+
#if !defined(__CUDA_ARCH__)
|
| 54 |
+
pos &= 0xff;
|
| 55 |
+
len &= 0xff;
|
| 56 |
+
|
| 57 |
+
uint64_t m = (1u << len) - 1u;
|
| 58 |
+
return (val >> pos) & m;
|
| 59 |
+
#else
|
| 60 |
+
uint64_t ret;
|
| 61 |
+
asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len));
|
| 62 |
+
return ret;
|
| 63 |
+
#endif
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
static __device__ __host__ __forceinline__
|
| 67 |
+
uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) {
|
| 68 |
+
#if !defined(__CUDA_ARCH__)
|
| 69 |
+
pos &= 0xff;
|
| 70 |
+
len &= 0xff;
|
| 71 |
+
|
| 72 |
+
uint64_t m = (1u << len) - 1u;
|
| 73 |
+
toInsert &= m;
|
| 74 |
+
toInsert <<= pos;
|
| 75 |
+
m <<= pos;
|
| 76 |
+
|
| 77 |
+
return (val & ~m) | toInsert;
|
| 78 |
+
#else
|
| 79 |
+
uint64_t ret;
|
| 80 |
+
asm("bfi.b64 %0, %1, %2, %3, %4;" :
|
| 81 |
+
"=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len));
|
| 82 |
+
return ret;
|
| 83 |
+
#endif
|
| 84 |
+
}
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
__device__ __forceinline__ int getLaneId() {
|
| 88 |
+
#if defined(USE_ROCM)
|
| 89 |
+
return __lane_id();
|
| 90 |
+
#else
|
| 91 |
+
int laneId;
|
| 92 |
+
asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
|
| 93 |
+
return laneId;
|
| 94 |
+
#endif
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
#if defined(USE_ROCM)
|
| 98 |
+
__device__ __forceinline__ unsigned long long int getLaneMaskLt() {
|
| 99 |
+
const std::uint64_t m = (1ull << getLaneId()) - 1ull;
|
| 100 |
+
return m;
|
| 101 |
+
}
|
| 102 |
+
#else
|
| 103 |
+
__device__ __forceinline__ unsigned getLaneMaskLt() {
|
| 104 |
+
unsigned mask;
|
| 105 |
+
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask));
|
| 106 |
+
return mask;
|
| 107 |
+
}
|
| 108 |
+
#endif
|
| 109 |
+
|
| 110 |
+
#if defined (USE_ROCM)
|
| 111 |
+
__device__ __forceinline__ unsigned long long int getLaneMaskLe() {
|
| 112 |
+
std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1));
|
| 113 |
+
return m;
|
| 114 |
+
}
|
| 115 |
+
#else
|
| 116 |
+
__device__ __forceinline__ unsigned getLaneMaskLe() {
|
| 117 |
+
unsigned mask;
|
| 118 |
+
asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask));
|
| 119 |
+
return mask;
|
| 120 |
+
}
|
| 121 |
+
#endif
|
| 122 |
+
|
| 123 |
+
#if defined(USE_ROCM)
|
| 124 |
+
__device__ __forceinline__ unsigned long long int getLaneMaskGt() {
|
| 125 |
+
const std::uint64_t m = getLaneMaskLe();
|
| 126 |
+
return m ? ~m : m;
|
| 127 |
+
}
|
| 128 |
+
#else
|
| 129 |
+
__device__ __forceinline__ unsigned getLaneMaskGt() {
|
| 130 |
+
unsigned mask;
|
| 131 |
+
asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask));
|
| 132 |
+
return mask;
|
| 133 |
+
}
|
| 134 |
+
#endif
|
| 135 |
+
|
| 136 |
+
#if defined(USE_ROCM)
|
| 137 |
+
__device__ __forceinline__ unsigned long long int getLaneMaskGe() {
|
| 138 |
+
const std::uint64_t m = getLaneMaskLt();
|
| 139 |
+
return ~m;
|
| 140 |
+
}
|
| 141 |
+
#else
|
| 142 |
+
__device__ __forceinline__ unsigned getLaneMaskGe() {
|
| 143 |
+
unsigned mask;
|
| 144 |
+
asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask));
|
| 145 |
+
return mask;
|
| 146 |
+
}
|
| 147 |
+
#endif
|
| 148 |
+
|
| 149 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cuda.h>
|
| 4 |
+
#include <c10/util/Half.h>
|
| 5 |
+
#include <c10/util/BFloat16.h>
|
| 6 |
+
|
| 7 |
+
#include <ATen/NumericUtils.h>
|
| 8 |
+
|
| 9 |
+
#if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))))
|
| 10 |
+
#include <cuda_bf16.h>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
template <typename T>
|
| 14 |
+
struct AtomicFPOp;
|
| 15 |
+
|
| 16 |
+
template <>
|
| 17 |
+
struct AtomicFPOp<at::Half> {
|
| 18 |
+
template <typename func_t>
|
| 19 |
+
inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) {
|
| 20 |
+
unsigned int * address_as_ui =
|
| 21 |
+
(unsigned int *) ((char *)address - ((size_t)address & 2));
|
| 22 |
+
unsigned int old = *address_as_ui;
|
| 23 |
+
unsigned int assumed;
|
| 24 |
+
|
| 25 |
+
at::Half hsum;
|
| 26 |
+
do {
|
| 27 |
+
assumed = old;
|
| 28 |
+
hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
|
| 29 |
+
hsum = func(hsum, val);
|
| 30 |
+
old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
|
| 31 |
+
old = atomicCAS(address_as_ui, assumed, old);
|
| 32 |
+
} while (assumed != old);
|
| 33 |
+
hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
|
| 34 |
+
return hsum;
|
| 35 |
+
}
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
struct AtomicFPOp<at::BFloat16> {
|
| 40 |
+
template <typename func_t>
|
| 41 |
+
inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) {
|
| 42 |
+
unsigned int * address_as_ui =
|
| 43 |
+
(unsigned int *) ((char *)address - ((size_t)address & 2));
|
| 44 |
+
unsigned int old = *address_as_ui;
|
| 45 |
+
unsigned int assumed;
|
| 46 |
+
|
| 47 |
+
at::BFloat16 bsum;
|
| 48 |
+
do {
|
| 49 |
+
assumed = old;
|
| 50 |
+
bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
|
| 51 |
+
bsum = func(bsum, val);
|
| 52 |
+
old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x;
|
| 53 |
+
old = atomicCAS(address_as_ui, assumed, old);
|
| 54 |
+
} while (assumed != old);
|
| 55 |
+
bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
|
| 56 |
+
return bsum.x;
|
| 57 |
+
}
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
template <>
|
| 61 |
+
struct AtomicFPOp<double> {
|
| 62 |
+
template <typename func_t>
|
| 63 |
+
inline __device__ double operator() (double * address, double val, const func_t& func) {
|
| 64 |
+
unsigned long long int* address_as_ull = (unsigned long long int*)address;
|
| 65 |
+
unsigned long long int old = *address_as_ull;
|
| 66 |
+
unsigned long long int assumed;
|
| 67 |
+
|
| 68 |
+
do {
|
| 69 |
+
assumed = old;
|
| 70 |
+
old = atomicCAS(address_as_ull, assumed, func(val, assumed));
|
| 71 |
+
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
|
| 72 |
+
} while (assumed != old);
|
| 73 |
+
|
| 74 |
+
return __longlong_as_double(old);
|
| 75 |
+
}
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
#define ATOMIC_INTEGER_IMPL(NAME) \
|
| 79 |
+
template <typename T, size_t n> \
|
| 80 |
+
struct Atomic##NAME##IntegerImpl; \
|
| 81 |
+
\
|
| 82 |
+
template<typename T> \
|
| 83 |
+
struct Atomic##NAME##IntegerImpl<T, 1> { \
|
| 84 |
+
template <typename func_t> \
|
| 85 |
+
inline __device__ void operator()(T *address, T val, const func_t& func) { \
|
| 86 |
+
size_t offset = (size_t)address & 3; \
|
| 87 |
+
uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
|
| 88 |
+
uint32_t old = *address_as_ui; \
|
| 89 |
+
uint32_t shift = offset * 8; \
|
| 90 |
+
uint32_t old_byte; \
|
| 91 |
+
uint32_t newval; \
|
| 92 |
+
uint32_t assumed; \
|
| 93 |
+
\
|
| 94 |
+
do { \
|
| 95 |
+
assumed = old; \
|
| 96 |
+
old_byte = (old >> shift) & 0xff; \
|
| 97 |
+
newval = static_cast<uint8_t>(func(val, static_cast<T>(old_byte))); \
|
| 98 |
+
newval = (old & ~(0x000000ff << shift)) | (newval << shift); \
|
| 99 |
+
old = atomicCAS(address_as_ui, assumed, newval); \
|
| 100 |
+
} while (assumed != old); \
|
| 101 |
+
} \
|
| 102 |
+
}; \
|
| 103 |
+
\
|
| 104 |
+
template<typename T> \
|
| 105 |
+
struct Atomic##NAME##IntegerImpl<T, 2> { \
|
| 106 |
+
template <typename func_t> \
|
| 107 |
+
inline __device__ void operator()(T *address, T val, const func_t& func) { \
|
| 108 |
+
size_t offset = (size_t)address & 2; \
|
| 109 |
+
uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \
|
| 110 |
+
bool is_32_align = offset; \
|
| 111 |
+
uint32_t old = *address_as_ui; \
|
| 112 |
+
uint32_t old_bytes; \
|
| 113 |
+
uint32_t newval; \
|
| 114 |
+
uint32_t assumed; \
|
| 115 |
+
\
|
| 116 |
+
do { \
|
| 117 |
+
assumed = old; \
|
| 118 |
+
old_bytes = is_32_align ? old >> 16 : old & 0xffff; \
|
| 119 |
+
newval = static_cast<uint16_t>(func(val, static_cast<T>(old_bytes))); \
|
| 120 |
+
newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \
|
| 121 |
+
old = atomicCAS(address_as_ui, assumed, newval); \
|
| 122 |
+
} while (assumed != old); \
|
| 123 |
+
} \
|
| 124 |
+
}; \
|
| 125 |
+
\
|
| 126 |
+
template<typename T> \
|
| 127 |
+
struct Atomic##NAME##IntegerImpl<T, 4> { \
|
| 128 |
+
template <typename func_t> \
|
| 129 |
+
inline __device__ void operator()(T *address, T val, const func_t& func) { \
|
| 130 |
+
uint32_t * address_as_ui = (uint32_t *) (address); \
|
| 131 |
+
uint32_t old = *address_as_ui; \
|
| 132 |
+
uint32_t newval; \
|
| 133 |
+
uint32_t assumed; \
|
| 134 |
+
\
|
| 135 |
+
do { \
|
| 136 |
+
assumed = old; \
|
| 137 |
+
newval = static_cast<uint32_t>(func(val, static_cast<T>(old))); \
|
| 138 |
+
old = atomicCAS(address_as_ui, assumed, newval); \
|
| 139 |
+
} while (assumed != old); \
|
| 140 |
+
} \
|
| 141 |
+
}; \
|
| 142 |
+
\
|
| 143 |
+
template<typename T> \
|
| 144 |
+
struct Atomic##NAME##IntegerImpl<T, 8> { \
|
| 145 |
+
template <typename func_t> \
|
| 146 |
+
inline __device__ void operator()(T *address, T val, const func_t& func) { \
|
| 147 |
+
unsigned long long * address_as_ui = (unsigned long long *) (address); \
|
| 148 |
+
unsigned long long old = *address_as_ui; \
|
| 149 |
+
unsigned long long newval; \
|
| 150 |
+
unsigned long long assumed; \
|
| 151 |
+
\
|
| 152 |
+
do { \
|
| 153 |
+
assumed = old; \
|
| 154 |
+
newval = static_cast<uint64_t>(func(val, static_cast<T>(old))); \
|
| 155 |
+
old = atomicCAS(address_as_ui, assumed, newval); \
|
| 156 |
+
} while (assumed != old); \
|
| 157 |
+
} \
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \
|
| 162 |
+
inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \
|
| 163 |
+
Atomic##NAME##IntegerImpl<DTYPE, sizeof(DTYPE)>()(address, \
|
| 164 |
+
val, \
|
| 165 |
+
[](DTYPE a, DTYPE b) { \
|
| 166 |
+
return OP; \
|
| 167 |
+
}); \
|
| 168 |
+
} \
|
| 169 |
+
|
| 170 |
+
ATOMIC_INTEGER_IMPL(Add)
|
| 171 |
+
GPU_ATOMIC_INTEGER(Add, a || b, bool)
|
| 172 |
+
|
| 173 |
+
// Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64)
|
| 174 |
+
inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) {
|
| 175 |
+
AtomicAddIntegerImpl<uint8_t, sizeof(uint8_t)>()(address,
|
| 176 |
+
val,
|
| 177 |
+
[](uint8_t a, uint8_t b) {
|
| 178 |
+
return a + b;
|
| 179 |
+
});
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) {
|
| 183 |
+
AtomicAddIntegerImpl<int8_t, sizeof(int8_t)>()(address,
|
| 184 |
+
val,
|
| 185 |
+
[](int8_t a, int8_t b) {
|
| 186 |
+
return a + b;
|
| 187 |
+
});
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) {
|
| 191 |
+
AtomicAddIntegerImpl<int16_t, sizeof(int16_t)>()(address,
|
| 192 |
+
val,
|
| 193 |
+
[](int16_t a, int16_t b) {
|
| 194 |
+
return a + b;
|
| 195 |
+
});
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) {
|
| 199 |
+
return atomicAdd(address, val);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) {
|
| 203 |
+
#if defined(USE_ROCM)
|
| 204 |
+
__atomic_fetch_add(address, val, __ATOMIC_RELAXED);
|
| 205 |
+
#else
|
| 206 |
+
static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed");
|
| 207 |
+
atomicAdd(reinterpret_cast<unsigned long long int *>(address), static_cast<unsigned long long int>(val));
|
| 208 |
+
#endif
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) {
|
| 212 |
+
#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
|
| 213 |
+
return AtomicFPOp<at::Half>()(address, val,
|
| 214 |
+
[](at::Half hsum, at::Half val) {
|
| 215 |
+
return hsum + val;
|
| 216 |
+
});
|
| 217 |
+
#else
|
| 218 |
+
return atomicAdd(reinterpret_cast<__half*>(address), val);
|
| 219 |
+
#endif
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) {
|
| 223 |
+
#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))
|
| 224 |
+
return AtomicFPOp<at::BFloat16>()(address, val,
|
| 225 |
+
[](at::BFloat16 bsum, at::BFloat16 val) {
|
| 226 |
+
return bsum + val;
|
| 227 |
+
});
|
| 228 |
+
#else
|
| 229 |
+
__nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val));
|
| 230 |
+
return *reinterpret_cast<c10::BFloat16*>(&r);
|
| 231 |
+
#endif
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
|
| 235 |
+
// from CUDA C Programmic Guide
|
| 236 |
+
inline __device__ double atomicAdd(double* address, double val)
|
| 237 |
+
#if defined(__clang__) && defined(__CUDA__)
|
| 238 |
+
#pragma GCC diagnostic push
|
| 239 |
+
#pragma GCC diagnostic ignored "-Wgcc-compat"
|
| 240 |
+
__attribute__((enable_if(true, "")))
|
| 241 |
+
#pragma GCC diagnostic pop
|
| 242 |
+
#endif
|
| 243 |
+
{
|
| 244 |
+
|
| 245 |
+
return AtomicFPOp<double>()(address, val,
|
| 246 |
+
[](double val, unsigned long long int assumed) {
|
| 247 |
+
return __double_as_longlong(val + __longlong_as_double(assumed));
|
| 248 |
+
});
|
| 249 |
+
}
|
| 250 |
+
#elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__))
|
| 251 |
+
|
| 252 |
+
/* Note [hip-clang differences to hcc]
|
| 253 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 254 |
+
* The upcoming hip-clang compiler for ROCm differs from hcc in a few details.
|
| 255 |
+
* It exports the __HIP__ macro, we can hence differentiate between hcc and
|
| 256 |
+
* hip-clang. In the below, hcc only received support for atomicAdd with double
|
| 257 |
+
* typing after work week 18312. hip-clang had support from the first version.
|
| 258 |
+
* In general, the code-visible differences between hip-clang and hcc will be
|
| 259 |
+
* minimal.
|
| 260 |
+
*/
|
| 261 |
+
|
| 262 |
+
#if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__
|
| 263 |
+
// This needs to be defined for the host side pass
|
| 264 |
+
inline __device__ double atomicAdd(double *address, double val) { }
|
| 265 |
+
#endif
|
| 266 |
+
#endif
|
| 267 |
+
|
| 268 |
+
inline __device__ double gpuAtomicAdd(double *address, double val) {
|
| 269 |
+
return atomicAdd(address, val);
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
inline __device__ float gpuAtomicAdd(float *address, float val) {
|
| 273 |
+
return atomicAdd(address, val);
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
template<typename T>
|
| 277 |
+
inline __device__ void gpuAtomicAdd(c10::complex<T> *address, c10::complex<T> val) {
|
| 278 |
+
gpuAtomicAdd(&address->real_, val.real_);
|
| 279 |
+
gpuAtomicAdd(&address->imag_, val.imag_);
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
/* Note [gpuAtomicAdd vs atomicAdd]
|
| 283 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 284 |
+
* Some extensions such as torchvision call atomicAdd()
|
| 285 |
+
* directly and require non-library provided data type support. Only for these, we
|
| 286 |
+
* continue to provide atomicAdd overloads.
|
| 287 |
+
*/
|
| 288 |
+
inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
|
| 289 |
+
return gpuAtomicAdd(address, val);
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) {
|
| 293 |
+
return gpuAtomicAdd(address, val);
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
inline __device__ void atomicAdd(uint8_t *address, uint8_t val) {
|
| 297 |
+
gpuAtomicAdd(address, val);
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
inline __device__ void atomicAdd(int8_t *address, int8_t val) {
|
| 301 |
+
gpuAtomicAdd(address, val);
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
inline __device__ void atomicAdd(int16_t *address, int16_t val) {
|
| 305 |
+
gpuAtomicAdd(address, val);
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
inline __device__ void atomicAdd(int64_t *address, int64_t val) {
|
| 309 |
+
gpuAtomicAdd(address, val);
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
inline __device__ void atomicAdd(bool *address, bool val) {
|
| 313 |
+
gpuAtomicAdd(address, val);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
/* Note [explicitly non-returning atomics]
|
| 317 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 318 |
+
* AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet().
|
| 319 |
+
* Due to compiler limitations, callers must opt-in to guarantee the optimized instruction.
|
| 320 |
+
* This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd,
|
| 321 |
+
* therefore we need a new API 'gpuAtomicAddNoReturn'.
|
| 322 |
+
*/
|
| 323 |
+
template<typename T>
|
| 324 |
+
inline __device__ void gpuAtomicAddNoReturn(c10::complex<T> *address, c10::complex<T> val) { gpuAtomicAdd(address, val); }
|
| 325 |
+
inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); }
|
| 326 |
+
inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); }
|
| 327 |
+
inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); }
|
| 328 |
+
inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); }
|
| 329 |
+
inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); }
|
| 330 |
+
inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); }
|
| 331 |
+
inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); }
|
| 332 |
+
inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); }
|
| 333 |
+
inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); }
|
| 334 |
+
|
| 335 |
+
/* Special case fp32 atomic. */
|
| 336 |
+
#if defined(USE_ROCM)
|
| 337 |
+
inline __device__ void gpuAtomicAddNoReturn(float *address, float val) {
|
| 338 |
+
#if defined(__gfx908__)
|
| 339 |
+
atomicAddNoRet(address, val);
|
| 340 |
+
#else
|
| 341 |
+
(void)unsafeAtomicAdd(address, val);
|
| 342 |
+
#endif
|
| 343 |
+
}
|
| 344 |
+
#else
|
| 345 |
+
inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); }
|
| 346 |
+
#endif
|
| 347 |
+
|
| 348 |
+
// Atomic multiplication implementation.
|
| 349 |
+
|
| 350 |
+
ATOMIC_INTEGER_IMPL(Mul)
|
| 351 |
+
GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t)
|
| 352 |
+
GPU_ATOMIC_INTEGER(Mul, a * b, int8_t)
|
| 353 |
+
GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
|
| 354 |
+
GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
|
| 355 |
+
GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
|
| 356 |
+
|
| 357 |
+
inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
|
| 358 |
+
return AtomicFPOp<at::Half>()(address, val,
|
| 359 |
+
[](at::Half bsum, at::Half val) {
|
| 360 |
+
return bsum * val;
|
| 361 |
+
});
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) {
|
| 365 |
+
return AtomicFPOp<at::BFloat16>()(address, val,
|
| 366 |
+
[](at::BFloat16 bsum, at::BFloat16 val) {
|
| 367 |
+
return bsum * val;
|
| 368 |
+
});
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
inline __device__ double gpuAtomicMul(double * address, double val) {
|
| 372 |
+
return AtomicFPOp<double>()(address, val,
|
| 373 |
+
[](double val, unsigned long long int assumed) {
|
| 374 |
+
return __double_as_longlong(val * __longlong_as_double(assumed));
|
| 375 |
+
});
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
|
| 379 |
+
inline __device__ float gpuAtomicMul (float * address, float val) {
|
| 380 |
+
unsigned int* address_as_ull = (unsigned int*)address;
|
| 381 |
+
unsigned int old = *address_as_ull;
|
| 382 |
+
unsigned int assumed;
|
| 383 |
+
|
| 384 |
+
do {
|
| 385 |
+
assumed = old;
|
| 386 |
+
old = atomicCAS(address_as_ull, assumed,
|
| 387 |
+
__float_as_int(val *
|
| 388 |
+
__int_as_float(assumed)));
|
| 389 |
+
|
| 390 |
+
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
|
| 391 |
+
} while (assumed != old);
|
| 392 |
+
|
| 393 |
+
return __int_as_float(old);
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
// Atomic maximum implementation.
|
| 397 |
+
|
| 398 |
+
template <typename T>
|
| 399 |
+
__host__ __device__ T safe_max(T a, T b) {
|
| 400 |
+
#if defined(__HIPCC__)
|
| 401 |
+
// TODO: remove this special case for HIP when issue is fixed:
|
| 402 |
+
// https://github.com/ROCm-Developer-Tools/HIP/issues/2209
|
| 403 |
+
T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max<T>(a, b));
|
| 404 |
+
#else
|
| 405 |
+
T max = at::_isnan(b) ? b : std::max<T>(a, b);
|
| 406 |
+
#endif
|
| 407 |
+
|
| 408 |
+
return max;
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
ATOMIC_INTEGER_IMPL(Max)
|
| 412 |
+
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
|
| 413 |
+
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
|
| 414 |
+
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t)
|
| 415 |
+
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t)
|
| 416 |
+
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t)
|
| 417 |
+
|
| 418 |
+
inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
|
| 419 |
+
return AtomicFPOp<at::Half>()(address, val,
|
| 420 |
+
[](at::Half bsum, at::Half val) {
|
| 421 |
+
return safe_max(bsum, val);
|
| 422 |
+
});
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
|
| 426 |
+
return AtomicFPOp<at::BFloat16>()(address, val,
|
| 427 |
+
[](at::BFloat16 bsum, at::BFloat16 val) {
|
| 428 |
+
return safe_max(bsum, val);
|
| 429 |
+
});
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
inline __device__ double gpuAtomicMax(double * address, double val) {
|
| 433 |
+
return AtomicFPOp<double>()(address, val,
|
| 434 |
+
[](double val, unsigned long long int assumed) {
|
| 435 |
+
return __double_as_longlong(safe_max(val, __longlong_as_double(assumed)));
|
| 436 |
+
});
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
|
| 440 |
+
inline __device__ float gpuAtomicMax(float * address, float val) {
|
| 441 |
+
unsigned int* address_as_ull = (unsigned int*)address;
|
| 442 |
+
unsigned int old = *address_as_ull;
|
| 443 |
+
unsigned int assumed;
|
| 444 |
+
|
| 445 |
+
do {
|
| 446 |
+
assumed = old;
|
| 447 |
+
old = atomicCAS(address_as_ull, assumed,
|
| 448 |
+
__float_as_int(safe_max(val, __int_as_float(assumed))));
|
| 449 |
+
|
| 450 |
+
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
|
| 451 |
+
} while (assumed != old);
|
| 452 |
+
|
| 453 |
+
return __int_as_float(old);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
// Atomic minimum implementation.
|
| 457 |
+
|
| 458 |
+
template <typename T>
|
| 459 |
+
__host__ __device__ T safe_min(T a, T b) {
|
| 460 |
+
#if defined(__HIPCC__)
|
| 461 |
+
// TODO: remove this special case for HIP when issue is fixed:
|
| 462 |
+
// https://github.com/ROCm-Developer-Tools/HIP/issues/2209
|
| 463 |
+
T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min<T>(a, b));
|
| 464 |
+
#else
|
| 465 |
+
T min = at::_isnan(b) ? b : std::min<T>(a, b);
|
| 466 |
+
#endif
|
| 467 |
+
|
| 468 |
+
return min;
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
ATOMIC_INTEGER_IMPL(Min)
|
| 472 |
+
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
|
| 473 |
+
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
|
| 474 |
+
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t)
|
| 475 |
+
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t)
|
| 476 |
+
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t)
|
| 477 |
+
|
| 478 |
+
inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
|
| 479 |
+
return AtomicFPOp<at::Half>()(address, val,
|
| 480 |
+
[](at::Half bsum, at::Half val) {
|
| 481 |
+
return safe_min(bsum, val);
|
| 482 |
+
});
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
|
| 486 |
+
return AtomicFPOp<at::BFloat16>()(address, val,
|
| 487 |
+
[](at::BFloat16 bsum, at::BFloat16 val) {
|
| 488 |
+
return safe_min(bsum, val);
|
| 489 |
+
});
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
inline __device__ double gpuAtomicMin(double * address, double val) {
|
| 493 |
+
return AtomicFPOp<double>()(address, val,
|
| 494 |
+
[](double val, unsigned long long int assumed) {
|
| 495 |
+
return __double_as_longlong(safe_min(val, __longlong_as_double(assumed)));
|
| 496 |
+
});
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
|
| 500 |
+
inline __device__ float gpuAtomicMin(float * address, float val) {
|
| 501 |
+
unsigned int* address_as_ull = (unsigned int*)address;
|
| 502 |
+
unsigned int old = *address_as_ull;
|
| 503 |
+
unsigned int assumed;
|
| 504 |
+
|
| 505 |
+
do {
|
| 506 |
+
assumed = old;
|
| 507 |
+
old = atomicCAS(address_as_ull, assumed,
|
| 508 |
+
__float_as_int(safe_min(val, __int_as_float(assumed))));
|
| 509 |
+
|
| 510 |
+
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
|
| 511 |
+
} while (assumed != old);
|
| 512 |
+
|
| 513 |
+
return __int_as_float(old);
|
| 514 |
+
}
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/ApplyGridUtils.cuh>
|
| 4 |
+
#include <ATen/cuda/detail/IndexUtils.cuh>
|
| 5 |
+
#include <ATen/core/TensorBase.h>
|
| 6 |
+
#include <ATen/ceil_div.h>
|
| 7 |
+
#include <ATen/cuda/Atomic.cuh>
|
| 8 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 9 |
+
#include <c10/macros/Macros.h>
|
| 10 |
+
#include <ATen/native/Copy.h>
|
| 11 |
+
|
| 12 |
+
#include <math.h>
|
| 13 |
+
|
| 14 |
+
//
|
| 15 |
+
// This file contains pointwise operation functions and kernels that
|
| 16 |
+
// work on both contiguous and non-contiguous tensor arguments of
|
| 17 |
+
// arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without
|
| 18 |
+
// copying or temporary storage.
|
| 19 |
+
//
|
| 20 |
+
|
| 21 |
+
/*
|
| 22 |
+
NOTE [ CUDA_tensor_applyN helpers ]
|
| 23 |
+
|
| 24 |
+
The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4)
|
| 25 |
+
functions apply a pointwise operator to N tensor(s).
|
| 26 |
+
|
| 27 |
+
The calling convention is
|
| 28 |
+
|
| 29 |
+
1. The template arguments should be, sequentially,
|
| 30 |
+
- First N typename args specify the scalar types of each of the N tensors.
|
| 31 |
+
- (Optional) `int step` arg specifies the number of elements processed
|
| 32 |
+
together at the same time.
|
| 33 |
+
Default is 1.
|
| 34 |
+
- A usually omitted (i.e., inferred) typename arg specifies the type of the
|
| 35 |
+
function/functor applied on `N * step` values in each iteration of each
|
| 36 |
+
CUDA thread.
|
| 37 |
+
2. The arguments should be, sequentially,
|
| 38 |
+
- N tensors
|
| 39 |
+
- op: a function/functor that processes `N * step` values at the same time.
|
| 40 |
+
- If `step == 1`, it must have signature
|
| 41 |
+
`void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where
|
| 42 |
+
`scalar*_t`s are the first N typename template args, and the inputs
|
| 43 |
+
are the `N` values from the `N` tensors retrieved at a common index.
|
| 44 |
+
- Otherwise, it must must have signature
|
| 45 |
+
void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times
|
| 46 |
+
scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times
|
| 47 |
+
...,
|
| 48 |
+
scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times
|
| 49 |
+
Different from `step == 1` case, it processes `N * step` values taken
|
| 50 |
+
from `step` common indices. Moreover, the first input `n` represents the
|
| 51 |
+
number of valid indices (it will always have `0 < n <= step`). It will
|
| 52 |
+
almost always be `step`, but at the boundary we may not have full `step`
|
| 53 |
+
elements and `n` can be a lesser value.
|
| 54 |
+
|
| 55 |
+
E.g., if `step == 4` and `N == 2`, `op` could be
|
| 56 |
+
|
| 57 |
+
[](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4,
|
| 58 |
+
scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) {
|
| 59 |
+
// Only process u1, ..., un and v1, ..., vn.
|
| 60 |
+
// So if `n == 3`, `u4` and `v4` need not to be considered.
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
In both cases, the references can actually be const, but at least one of
|
| 64 |
+
them should be non-const in order to write the output.
|
| 65 |
+
- (Optional, but recommended) N TensorArgType args that specify for each
|
| 66 |
+
tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite),
|
| 67 |
+
or only reads (i.e., TensorArgType::ReadOnly).
|
| 68 |
+
Default is TensorArgType::ReadWrite for first Tensor, and
|
| 69 |
+
TensorArgType::ReadOnly for the rest.
|
| 70 |
+
|
| 71 |
+
E.g.,
|
| 72 |
+
|
| 73 |
+
to compute a = b^2 for a and b of same dtype, we can call
|
| 74 |
+
|
| 75 |
+
CUDA_tensor_apply2<scalar, scalar>(
|
| 76 |
+
a, b,
|
| 77 |
+
[] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; }
|
| 78 |
+
);
|
| 79 |
+
|
| 80 |
+
to work on 2 values at the same time, we can call
|
| 81 |
+
|
| 82 |
+
CUDA_tensor_apply2<scalar1, scalar2, 2>(
|
| 83 |
+
a, b,
|
| 84 |
+
[] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2,
|
| 85 |
+
const scalar2 &b_val1, const scalar2 &b_val2) {
|
| 86 |
+
// call special vectorized op here, or just do elementwise and enjoy unrolling...
|
| 87 |
+
// if n == 1, only process a_val1 and b_val1
|
| 88 |
+
}
|
| 89 |
+
);
|
| 90 |
+
*/
|
| 91 |
+
|
| 92 |
+
namespace at::cuda {
|
| 93 |
+
|
| 94 |
+
// TODO: combine with TensorArg? So far that's been for debugging, and this is functional...
|
| 95 |
+
enum class TensorArgType { ReadWrite, ReadOnly };
|
| 96 |
+
|
| 97 |
+
namespace {
|
| 98 |
+
|
| 99 |
+
// Rearrange dimensions for pointwise operations so that strides are in
|
| 100 |
+
// decreasing order as much as possible, so that kernels have better memory
|
| 101 |
+
// access patterns.
|
| 102 |
+
//
|
| 103 |
+
// For example, consider a binary operation on two "transposed" 2-dim tensors:
|
| 104 |
+
// sizes: 256 512
|
| 105 |
+
// aInfo->strides: 1 256
|
| 106 |
+
// bInfo->strides: 1 256
|
| 107 |
+
//
|
| 108 |
+
// Given this, each concurrent memory access inside kernelPointwiseApply2() is
|
| 109 |
+
// exactly 256 elements apart, resulting in poor performance.
|
| 110 |
+
//
|
| 111 |
+
// This function exchanges dimensions so that memory access is contiguous:
|
| 112 |
+
// sizes: 512 256
|
| 113 |
+
// aInfo->strides: 256 1
|
| 114 |
+
// bInfo->strides: 256 1
|
| 115 |
+
//
|
| 116 |
+
// (Actually, it becomes even better because now collapseDims() can turn each
|
| 117 |
+
// input into one contiguous array.)
|
| 118 |
+
//
|
| 119 |
+
// In general, given M (<=4) TensorInfo's with N dimensions, we can view each
|
| 120 |
+
// strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange
|
| 121 |
+
// strides[i] and [j] if
|
| 122 |
+
// (1) strides[i][k] < strides[j][k] for some k (0 <= k < M)
|
| 123 |
+
// (exchanging them will benefit input #k), and
|
| 124 |
+
// (2) strides[i][k] <= strieds[j][k] for all k
|
| 125 |
+
// (exchanging them will not make any input worse).
|
| 126 |
+
template <typename T1, typename IndexType,
|
| 127 |
+
typename T2 = void, typename T3 = void, typename T4 = void>
|
| 128 |
+
inline void rearrangeDims(detail::TensorInfo<T1, IndexType>* aInfo,
|
| 129 |
+
detail::TensorInfo<T2, IndexType>* bInfo = nullptr,
|
| 130 |
+
detail::TensorInfo<T3, IndexType>* cInfo = nullptr,
|
| 131 |
+
detail::TensorInfo<T4, IndexType>* dInfo = nullptr) {
|
| 132 |
+
int numInfos = 1;
|
| 133 |
+
int dims = aInfo->dims;
|
| 134 |
+
IndexType *sizes[4] = { aInfo->sizes, };
|
| 135 |
+
IndexType *strides[4] = { aInfo->strides, };
|
| 136 |
+
|
| 137 |
+
if (bInfo != nullptr) {
|
| 138 |
+
++numInfos;
|
| 139 |
+
if (bInfo->dims != dims) return;
|
| 140 |
+
sizes[1] = bInfo->sizes;
|
| 141 |
+
strides[1] = bInfo->strides;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
if (cInfo != nullptr) {
|
| 145 |
+
++numInfos;
|
| 146 |
+
if (cInfo->dims != dims) return;
|
| 147 |
+
sizes[2] = cInfo->sizes;
|
| 148 |
+
strides[2] = cInfo->strides;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
if (dInfo != nullptr) {
|
| 152 |
+
++numInfos;
|
| 153 |
+
if (dInfo->dims != dims) return;
|
| 154 |
+
sizes[3] = dInfo->sizes;
|
| 155 |
+
strides[3] = dInfo->strides;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
// Bail out if sizes do not match: we are using "deprecated pointwise
|
| 159 |
+
// behavior" among tensors of different shapes but same number of elements.
|
| 160 |
+
for (int i = 1; i < numInfos; ++i) {
|
| 161 |
+
for (int j = 0; j < dims; ++j) {
|
| 162 |
+
if (sizes[i][j] != sizes[0][j]) return;
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
for (int i = 0; i < dims - 1; ++i) {
|
| 167 |
+
// No need to consider dimensions of size 1.
|
| 168 |
+
if (sizes[0][i] == 1) continue;
|
| 169 |
+
|
| 170 |
+
for (int j = i + 1; j < dims; ++j) {
|
| 171 |
+
if (sizes[0][j] == 1) continue;
|
| 172 |
+
|
| 173 |
+
// Compare the relative sizes of strides between dim #i and dim #j.
|
| 174 |
+
bool hasIncreasingStrides = false;
|
| 175 |
+
bool hasDecreasingStrides = false;
|
| 176 |
+
|
| 177 |
+
for (int k = 0; k < numInfos; k++) {
|
| 178 |
+
IndexType stride_i = strides[k][i];
|
| 179 |
+
IndexType stride_j = strides[k][j];
|
| 180 |
+
if (stride_i < stride_j) {
|
| 181 |
+
hasIncreasingStrides = true;
|
| 182 |
+
} else if (stride_i > stride_j) {
|
| 183 |
+
hasDecreasingStrides = true;
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
if (hasIncreasingStrides && !hasDecreasingStrides) {
|
| 188 |
+
for (int k = 0; k < numInfos; k++) {
|
| 189 |
+
IndexType size = sizes[k][i];
|
| 190 |
+
sizes[k][i] = sizes[k][j];
|
| 191 |
+
sizes[k][j] = size;
|
| 192 |
+
|
| 193 |
+
IndexType stride = strides[k][i];
|
| 194 |
+
strides[k][i] = strides[k][j];
|
| 195 |
+
strides[k][j] = stride;
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
// The `remaining_steps` argument is used to support Op that operates on
|
| 203 |
+
// multiple elements at the same time. Generally, the strategy of ApplyOpN is to
|
| 204 |
+
// 1. Initialize `remaining_steps = step`, where `step` is the template arg of
|
| 205 |
+
// CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the
|
| 206 |
+
// number of elements in bound for this call. It will almost always equal to
|
| 207 |
+
// `step` except at boundaries.
|
| 208 |
+
// 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in
|
| 209 |
+
// bound), and recursively call `ApplyOpN` with `remaining_steps - 1`.
|
| 210 |
+
// 3. At `remaining_steps = 0`,
|
| 211 |
+
// if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`;
|
| 212 |
+
// if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep,
|
| 213 |
+
// tensor2_val1, tensor2_val2, ..., tesor2_valstep,
|
| 214 |
+
// ...
|
| 215 |
+
// tensorN_val1, tensorN_val2, ..., tesorN_valstep);`
|
| 216 |
+
//
|
| 217 |
+
// See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like.
|
| 218 |
+
|
| 219 |
+
template <typename Op,
|
| 220 |
+
typename scalar,
|
| 221 |
+
typename IndexType,
|
| 222 |
+
int ADims,
|
| 223 |
+
int remaining_steps,
|
| 224 |
+
typename... Offsets>
|
| 225 |
+
struct ApplyOp1 {
|
| 226 |
+
__device__ __forceinline__
|
| 227 |
+
static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
|
| 228 |
+
IndexType linearIndex, Offsets... aOffsets) {
|
| 229 |
+
// Convert `linearIndex` into an offset of `a`
|
| 230 |
+
const IndexType aOffset = sizeof...(Offsets) < n ?
|
| 231 |
+
detail::IndexToOffset<scalar, IndexType, ADims>::get(linearIndex, a) : 0;
|
| 232 |
+
|
| 233 |
+
ApplyOp1<Op, scalar, IndexType, ADims, remaining_steps - 1, const IndexType, Offsets...>::apply(
|
| 234 |
+
a, op, n, linearIndex + 1, aOffsets..., aOffset
|
| 235 |
+
);
|
| 236 |
+
}
|
| 237 |
+
};
|
| 238 |
+
|
| 239 |
+
// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
|
| 240 |
+
// We don't need to pass in how many elements need to processed in this case.
|
| 241 |
+
template <typename Op,
|
| 242 |
+
typename scalar,
|
| 243 |
+
typename IndexType,
|
| 244 |
+
int ADims,
|
| 245 |
+
typename Offset>
|
| 246 |
+
struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offset> {
|
| 247 |
+
__device__ __forceinline__
|
| 248 |
+
static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op,
|
| 249 |
+
int n, IndexType linearIndex, Offset offset) {
|
| 250 |
+
op(a.data[offset]);
|
| 251 |
+
}
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
template <typename Op,
|
| 255 |
+
typename scalar,
|
| 256 |
+
typename IndexType,
|
| 257 |
+
int ADims,
|
| 258 |
+
typename... Offsets>
|
| 259 |
+
struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offsets...> {
|
| 260 |
+
__device__ __forceinline__
|
| 261 |
+
static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
|
| 262 |
+
IndexType linearIndex, Offsets... offsets) {
|
| 263 |
+
op(n, a.data[offsets]...);
|
| 264 |
+
}
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
template <typename Op,
|
| 268 |
+
typename scalar,
|
| 269 |
+
typename IndexType,
|
| 270 |
+
int ADims,
|
| 271 |
+
int step>
|
| 272 |
+
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
|
| 273 |
+
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
|
| 274 |
+
#endif
|
| 275 |
+
__global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
|
| 276 |
+
IndexType totalElements, const Op op) {
|
| 277 |
+
for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
|
| 278 |
+
linearIndex < totalElements;
|
| 279 |
+
linearIndex += gridDim.x * blockDim.x * step) {
|
| 280 |
+
ApplyOp1<Op, scalar, IndexType, ADims, step>::apply(
|
| 281 |
+
a, op, ::min(step, static_cast<int>(totalElements - linearIndex)), linearIndex);
|
| 282 |
+
}
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
template <typename Op,
|
| 287 |
+
typename scalar1,
|
| 288 |
+
typename scalar2,
|
| 289 |
+
typename IndexType,
|
| 290 |
+
int ADims,
|
| 291 |
+
int BDims,
|
| 292 |
+
int remaining_steps,
|
| 293 |
+
typename... Offsets>
|
| 294 |
+
struct ApplyOp2 {
|
| 295 |
+
__device__ __forceinline__
|
| 296 |
+
static void apply(detail::TensorInfo<scalar1, IndexType> &a,
|
| 297 |
+
detail::TensorInfo<scalar2, IndexType> &b,
|
| 298 |
+
const Op &op, int64_t n, IndexType linearIndex,
|
| 299 |
+
Offsets... aOffsets, Offsets... bOffsets) {
|
| 300 |
+
// Convert `linearIndex` into an offset of `a`
|
| 301 |
+
const IndexType aOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
|
| 302 |
+
detail::IndexToOffset<scalar1, IndexType, ADims>::get(linearIndex, a) : 0;
|
| 303 |
+
|
| 304 |
+
// Convert `linearIndex` into an offset of `b`
|
| 305 |
+
const IndexType bOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
|
| 306 |
+
detail::IndexToOffset<scalar2, IndexType, BDims>::get(linearIndex, b) : 0;
|
| 307 |
+
|
| 308 |
+
ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, remaining_steps - 1, const IndexType, Offsets...>::apply(
|
| 309 |
+
a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset
|
| 310 |
+
);
|
| 311 |
+
}
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
|
| 315 |
+
// We don't need to pass in how many elements need to processed in this case.
|
| 316 |
+
template <typename Op,
|
| 317 |
+
typename scalar1,
|
| 318 |
+
typename scalar2,
|
| 319 |
+
typename IndexType,
|
| 320 |
+
int ADims,
|
| 321 |
+
int BDims,
|
| 322 |
+
typename Offset>
|
| 323 |
+
struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offset> {
|
| 324 |
+
__device__ __forceinline__
|
| 325 |
+
static void apply(detail::TensorInfo<scalar1, IndexType> &a,
|
| 326 |
+
detail::TensorInfo<scalar2, IndexType> &b,
|
| 327 |
+
const Op &op, int /*n*/, IndexType /*linearIndex*/,
|
| 328 |
+
Offset aOffset, Offset bOffset) {
|
| 329 |
+
op(a.data[aOffset], b.data[bOffset]);
|
| 330 |
+
}
|
| 331 |
+
};
|
| 332 |
+
|
| 333 |
+
template <typename Op,
|
| 334 |
+
typename scalar1,
|
| 335 |
+
typename scalar2,
|
| 336 |
+
typename IndexType,
|
| 337 |
+
int ADims,
|
| 338 |
+
int BDims,
|
| 339 |
+
typename... Offsets>
|
| 340 |
+
struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offsets...> {
|
| 341 |
+
__device__ __forceinline__
|
| 342 |
+
static void apply(detail::TensorInfo<scalar1, IndexType> &a,
|
| 343 |
+
detail::TensorInfo<scalar2, IndexType> &b,
|
| 344 |
+
const Op &op, int n, IndexType linearIndex,
|
| 345 |
+
Offsets... aOffsets, Offsets... bOffsets) {
|
| 346 |
+
op(n, a.data[aOffsets]..., b.data[bOffsets]...);
|
| 347 |
+
}
|
| 348 |
+
};
|
| 349 |
+
|
| 350 |
+
template <typename Op,
|
| 351 |
+
typename scalar1,
|
| 352 |
+
typename scalar2,
|
| 353 |
+
typename IndexType,
|
| 354 |
+
int ADims, int BDims,
|
| 355 |
+
int step,
|
| 356 |
+
int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
|
| 357 |
+
int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
|
| 358 |
+
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
|
| 359 |
+
C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm)
|
| 360 |
+
#endif
|
| 361 |
+
__global__ void
|
| 362 |
+
kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
|
| 363 |
+
detail::TensorInfo<scalar2, IndexType> b,
|
| 364 |
+
IndexType totalElements,
|
| 365 |
+
const Op op) {
|
| 366 |
+
for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
|
| 367 |
+
linearIndex < totalElements;
|
| 368 |
+
linearIndex += gridDim.x * blockDim.x * step) {
|
| 369 |
+
ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, step>::apply(
|
| 370 |
+
a, b, op, ::min(step, static_cast<int>(totalElements - linearIndex)),
|
| 371 |
+
linearIndex);
|
| 372 |
+
}
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
} // anonymous namespace
|
| 376 |
+
|
| 377 |
+
template <typename scalar1, typename scalar2, int step, typename Op,
|
| 378 |
+
int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
|
| 379 |
+
int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
|
| 380 |
+
inline bool CUDA_tensor_apply2(at::TensorBase a,
|
| 381 |
+
at::TensorBase b,
|
| 382 |
+
const Op op,
|
| 383 |
+
TensorArgType aType = TensorArgType::ReadWrite,
|
| 384 |
+
TensorArgType bType = TensorArgType::ReadOnly) {
|
| 385 |
+
TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(),
|
| 386 |
+
"CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got "
|
| 387 |
+
"tensors with type ", a.device().type(), " and ", b.device().type());
|
| 388 |
+
int64_t totalElements = a.numel();
|
| 389 |
+
|
| 390 |
+
if (totalElements != b.numel()) {
|
| 391 |
+
return false;
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
if (a.dim() > MAX_TENSORINFO_DIMS ||
|
| 395 |
+
b.dim() > MAX_TENSORINFO_DIMS) {
|
| 396 |
+
return false;
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
if (a.numel() == 0) {
|
| 400 |
+
// Empty tensor; do nothing
|
| 401 |
+
return true;
|
| 402 |
+
}
|
| 403 |
+
const dim3 block = getApplyBlock(max_threads_per_block);
|
| 404 |
+
|
| 405 |
+
dim3 grid;
|
| 406 |
+
auto curDevice = current_device();
|
| 407 |
+
if (curDevice == -1) return false;
|
| 408 |
+
if (!getApplyGrid<step>(totalElements, grid, curDevice, max_threads_per_block)) {
|
| 409 |
+
return false;
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
/*
|
| 413 |
+
Expands readable/writable tensors whose indices may be "overlapped."
|
| 414 |
+
This ensures that each element of the tensor is operated on once and only
|
| 415 |
+
once.
|
| 416 |
+
*/
|
| 417 |
+
TensorBase oldA;
|
| 418 |
+
TensorBase oldB;
|
| 419 |
+
|
| 420 |
+
if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) {
|
| 421 |
+
// Must perform in contiguous space
|
| 422 |
+
oldA = std::exchange(a, a.contiguous());
|
| 423 |
+
}
|
| 424 |
+
if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) {
|
| 425 |
+
// Must perform in contiguous space
|
| 426 |
+
oldB = std::exchange(b, b.contiguous());
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
// It is possible that the tensor dimensions are able to be collapsed,
|
| 430 |
+
// and thus we can reduce the actual code complexity of the copy by
|
| 431 |
+
// exploiting this knowledge statically, since the div/mod is the
|
| 432 |
+
// most expensive part of the operation, more so than memory accesses.
|
| 433 |
+
// For instance, when copying a non-contiguous to a contiguous tensor
|
| 434 |
+
// (or vice versa), the contiguous tensor can be collapsed to one
|
| 435 |
+
// dimension, and the loop to translate the linear index to the array
|
| 436 |
+
// index can be similarly collapsed. That is what this unrolling is for.
|
| 437 |
+
|
| 438 |
+
#define HANDLE_CASE(TYPE, A, B) \
|
| 439 |
+
kernelPointwiseApply2<Op, \
|
| 440 |
+
scalar1, \
|
| 441 |
+
scalar2, \
|
| 442 |
+
TYPE, A, B, step, \
|
| 443 |
+
max_threads_per_block, \
|
| 444 |
+
min_blocks_per_sm> \
|
| 445 |
+
<<<grid, block, 0, at::cuda::getCurrentCUDAStream(curDevice)>>>( \
|
| 446 |
+
aInfo, bInfo, static_cast<TYPE>(totalElements), op); \
|
| 447 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
| 448 |
+
|
| 449 |
+
#define HANDLE_B_CASE(TYPE, A, B) { \
|
| 450 |
+
switch (B) { \
|
| 451 |
+
case 1: \
|
| 452 |
+
HANDLE_CASE(TYPE, A, 1); \
|
| 453 |
+
break; \
|
| 454 |
+
case 2: \
|
| 455 |
+
HANDLE_CASE(TYPE, A, 2); \
|
| 456 |
+
break; \
|
| 457 |
+
default: \
|
| 458 |
+
HANDLE_CASE(TYPE, A, -1); \
|
| 459 |
+
break; \
|
| 460 |
+
} \
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
#define HANDLE_A_CASE(TYPE, A, B) { \
|
| 464 |
+
switch (A) { \
|
| 465 |
+
case 1: \
|
| 466 |
+
HANDLE_B_CASE(TYPE, 1, B); \
|
| 467 |
+
break; \
|
| 468 |
+
case 2: \
|
| 469 |
+
HANDLE_B_CASE(TYPE, 2, B); \
|
| 470 |
+
break; \
|
| 471 |
+
default: \
|
| 472 |
+
HANDLE_B_CASE(TYPE, -1, B); \
|
| 473 |
+
break; \
|
| 474 |
+
} \
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
if (detail::canUse32BitIndexMath(a) &&
|
| 478 |
+
detail::canUse32BitIndexMath(b)) {
|
| 479 |
+
detail::TensorInfo<scalar1, unsigned int> aInfo =
|
| 480 |
+
detail::getTensorInfo<scalar1, unsigned int>(a);
|
| 481 |
+
|
| 482 |
+
detail::TensorInfo<scalar2, unsigned int> bInfo =
|
| 483 |
+
detail::getTensorInfo<scalar2, unsigned int>(b);
|
| 484 |
+
rearrangeDims(&aInfo, &bInfo);
|
| 485 |
+
aInfo.collapseDims();
|
| 486 |
+
bInfo.collapseDims();
|
| 487 |
+
|
| 488 |
+
HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims);
|
| 489 |
+
} else {
|
| 490 |
+
detail::TensorInfo<scalar1, uint64_t> aInfo =
|
| 491 |
+
detail::getTensorInfo<scalar1, uint64_t>(a);
|
| 492 |
+
|
| 493 |
+
detail::TensorInfo<scalar2, uint64_t> bInfo =
|
| 494 |
+
detail::getTensorInfo<scalar2, uint64_t>(b);
|
| 495 |
+
rearrangeDims(&aInfo, &bInfo);
|
| 496 |
+
aInfo.collapseDims();
|
| 497 |
+
bInfo.collapseDims();
|
| 498 |
+
|
| 499 |
+
/*
|
| 500 |
+
Only instantiates the all 1D special case and the fallback all nD case for
|
| 501 |
+
large (64-bit indexed) tensors to reduce compilation time.
|
| 502 |
+
*/
|
| 503 |
+
if (aInfo.dims == 1 && bInfo.dims == 1) {
|
| 504 |
+
HANDLE_CASE(uint64_t, 1, 1);
|
| 505 |
+
} else {
|
| 506 |
+
HANDLE_CASE(uint64_t, -1, -1);
|
| 507 |
+
}
|
| 508 |
+
}
|
| 509 |
+
#undef HANDLE_CASE
|
| 510 |
+
#undef HANDLE_B_CASE
|
| 511 |
+
#undef HANDLE_A_CASE
|
| 512 |
+
|
| 513 |
+
if (oldA.defined()) {
|
| 514 |
+
at::native::copy_ignoring_overlaps(oldA, a);
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
if (oldB.defined()) {
|
| 518 |
+
at::native::copy_ignoring_overlaps(oldB, b);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
return true;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
/* Provides default step = 1 to CUDA_tensor_apply2. */
|
| 525 |
+
template <typename scalar1, typename scalar2, typename Op,
|
| 526 |
+
int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
|
| 527 |
+
int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
|
| 528 |
+
inline bool CUDA_tensor_apply2(const at::TensorBase &a,
|
| 529 |
+
const at::TensorBase &b,
|
| 530 |
+
const Op op,
|
| 531 |
+
TensorArgType aType = TensorArgType::ReadWrite,
|
| 532 |
+
TensorArgType bType = TensorArgType::ReadOnly) {
|
| 533 |
+
return CUDA_tensor_apply2<scalar1, scalar2, 1, Op,
|
| 534 |
+
max_threads_per_block, min_blocks_per_sm>(a, b, op, aType, bType);
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
/*
|
| 3 |
+
Provides a subset of CUDA BLAS functions as templates:
|
| 4 |
+
|
| 5 |
+
gemm<Dtype>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c,
|
| 6 |
+
ldc)
|
| 7 |
+
|
| 8 |
+
gemv<Dtype>(transa, m, n, alpha, a, lda, x, incx, beta, y, incy)
|
| 9 |
+
|
| 10 |
+
dot<Dtype>(n, x, incx, y, incy, result)
|
| 11 |
+
|
| 12 |
+
where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot).
|
| 13 |
+
The functions are available in at::cuda::blas namespace.
|
| 14 |
+
*/
|
| 15 |
+
|
| 16 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 17 |
+
#include <ATen/OpMathType.h>
|
| 18 |
+
|
| 19 |
+
namespace at::cuda::blas {
|
| 20 |
+
|
| 21 |
+
// RAII guard that sets the CuBLAS pointer mode and restores it to
|
| 22 |
+
// its previous value when the guard is destroyed
|
| 23 |
+
class PointerModeGuard {
|
| 24 |
+
public:
|
| 25 |
+
PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) :
|
| 26 |
+
handle(handle) {
|
| 27 |
+
TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode));
|
| 28 |
+
TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode));
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
~PointerModeGuard() {
|
| 32 |
+
cublasSetPointerMode(handle, previous_mode);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
private:
|
| 36 |
+
cublasHandle_t handle;
|
| 37 |
+
cublasPointerMode_t previous_mode;
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
/* LEVEL 3 BLAS FUNCTIONS */
|
| 41 |
+
|
| 42 |
+
#define CUDABLAS_GEMM_ARGTYPES(Dtype) \
|
| 43 |
+
char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
|
| 44 |
+
const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type<Dtype> beta,\
|
| 45 |
+
Dtype *c, int64_t ldc
|
| 46 |
+
|
| 47 |
+
#define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc
|
| 48 |
+
|
| 49 |
+
template <typename Dtype>
|
| 50 |
+
inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
|
| 51 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::gemm: not implemented");
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
template <>
|
| 55 |
+
void gemm<double>(CUDABLAS_GEMM_ARGTYPES(double));
|
| 56 |
+
template <>
|
| 57 |
+
void gemm<float>(CUDABLAS_GEMM_ARGTYPES(float));
|
| 58 |
+
template <>
|
| 59 |
+
void gemm<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
|
| 60 |
+
template <>
|
| 61 |
+
void gemm<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
|
| 62 |
+
template <>
|
| 63 |
+
void gemm<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
|
| 64 |
+
template <>
|
| 65 |
+
void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
|
| 66 |
+
|
| 67 |
+
template <typename Dtype>
|
| 68 |
+
inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) {
|
| 69 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::gemm_internal: not implemented");
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
template <>
|
| 73 |
+
void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double));
|
| 74 |
+
template <>
|
| 75 |
+
void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float));
|
| 76 |
+
template <>
|
| 77 |
+
void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>));
|
| 78 |
+
template <>
|
| 79 |
+
void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>));
|
| 80 |
+
template <>
|
| 81 |
+
void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
|
| 82 |
+
template <>
|
| 83 |
+
void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
|
| 84 |
+
|
| 85 |
+
enum GEMMAndBiasActivationEpilogue {
|
| 86 |
+
None,
|
| 87 |
+
RELU,
|
| 88 |
+
GELU,
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
// NOTE: GELU activation is not supported prior to CUDA 11.4 and will
|
| 92 |
+
// do nothing if passed in that case.
|
| 93 |
+
template <typename Dtype>
|
| 94 |
+
void gemm_and_bias(
|
| 95 |
+
bool transpose_mat1,
|
| 96 |
+
bool transpose_mat2,
|
| 97 |
+
int64_t m,
|
| 98 |
+
int64_t n,
|
| 99 |
+
int64_t k,
|
| 100 |
+
at::opmath_type<Dtype> alpha_val,
|
| 101 |
+
const Dtype* mat1_ptr,
|
| 102 |
+
int64_t mat1_ld,
|
| 103 |
+
const Dtype* mat2_ptr,
|
| 104 |
+
int64_t mat2_ld,
|
| 105 |
+
const Dtype* bias,
|
| 106 |
+
Dtype* result_ptr,
|
| 107 |
+
int64_t result_ld,
|
| 108 |
+
GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None);
|
| 109 |
+
|
| 110 |
+
void int8_gemm(
|
| 111 |
+
bool transpose_mat1,
|
| 112 |
+
bool transpose_mat2,
|
| 113 |
+
int64_t m,
|
| 114 |
+
int64_t n,
|
| 115 |
+
int64_t k,
|
| 116 |
+
const int8_t* mat1_ptr,
|
| 117 |
+
int64_t mat1_ld,
|
| 118 |
+
const int8_t* mat2_ptr,
|
| 119 |
+
int64_t mat2_ld,
|
| 120 |
+
int32_t* result_ptr,
|
| 121 |
+
int64_t result_ld);
|
| 122 |
+
|
| 123 |
+
void scaled_gemm(
|
| 124 |
+
char transa,
|
| 125 |
+
char transb,
|
| 126 |
+
int64_t m,
|
| 127 |
+
int64_t n,
|
| 128 |
+
int64_t k,
|
| 129 |
+
const void* mat1_ptr,
|
| 130 |
+
const void* mat1_scale_ptr,
|
| 131 |
+
int64_t mat1_ld,
|
| 132 |
+
ScalarType mat1_dtype,
|
| 133 |
+
const void* mat2_ptr,
|
| 134 |
+
const void* mat2_scale_ptr,
|
| 135 |
+
int64_t mat2_ld,
|
| 136 |
+
ScalarType mat2_dtype,
|
| 137 |
+
const void* bias_ptr,
|
| 138 |
+
ScalarType bias_dtype,
|
| 139 |
+
void* result_ptr,
|
| 140 |
+
const void* result_scale_ptr,
|
| 141 |
+
int64_t result_ld,
|
| 142 |
+
ScalarType result_dtype,
|
| 143 |
+
void* amax_ptr,
|
| 144 |
+
bool use_fast_accum);
|
| 145 |
+
|
| 146 |
+
#define CUDABLAS_BGEMM_ARGTYPES(Dtype) \
|
| 147 |
+
char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type<Dtype> alpha, \
|
| 148 |
+
const Dtype *a, int64_t lda, int64_t stridea, \
|
| 149 |
+
const Dtype *b, int64_t ldb, int64_t strideb, \
|
| 150 |
+
at::opmath_type<Dtype> beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches
|
| 151 |
+
|
| 152 |
+
#define CUDABLAS_BGEMM_ARGS(Dtype) \
|
| 153 |
+
transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches
|
| 154 |
+
|
| 155 |
+
template <typename Dtype>
|
| 156 |
+
inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
|
| 157 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::bgemm: not implemented");
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
template <>
|
| 161 |
+
void bgemm<double>(CUDABLAS_BGEMM_ARGTYPES(double));
|
| 162 |
+
template <>
|
| 163 |
+
void bgemm<float>(CUDABLAS_BGEMM_ARGTYPES(float));
|
| 164 |
+
template <>
|
| 165 |
+
void bgemm<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
|
| 166 |
+
template <>
|
| 167 |
+
void bgemm<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
|
| 168 |
+
template <>
|
| 169 |
+
void bgemm<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
|
| 170 |
+
template <>
|
| 171 |
+
void bgemm<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
|
| 172 |
+
|
| 173 |
+
template <typename Dtype>
|
| 174 |
+
inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
|
| 175 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::bgemm_internal: not implemented");
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
template <>
|
| 179 |
+
void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double));
|
| 180 |
+
template <>
|
| 181 |
+
void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float));
|
| 182 |
+
template <>
|
| 183 |
+
void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>));
|
| 184 |
+
template <>
|
| 185 |
+
void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>));
|
| 186 |
+
template <>
|
| 187 |
+
void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half));
|
| 188 |
+
template <>
|
| 189 |
+
void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16));
|
| 190 |
+
|
| 191 |
+
#define CUDABLAS_TRSM_ARGTYPES(Dtype) \
|
| 192 |
+
cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
|
| 193 |
+
cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
|
| 194 |
+
const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb
|
| 195 |
+
|
| 196 |
+
template <typename Dtype>
|
| 197 |
+
inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) {
|
| 198 |
+
static_assert(false&&sizeof(Dtype), "at::cuda::blas::trsm: not implemented");
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
template <>
|
| 202 |
+
TORCH_CUDA_CU_API void trsm<float>(CUDABLAS_TRSM_ARGTYPES(float));
|
| 203 |
+
template <>
|
| 204 |
+
TORCH_CUDA_CU_API void trsm<double>(CUDABLAS_TRSM_ARGTYPES(double));
|
| 205 |
+
template <>
|
| 206 |
+
TORCH_CUDA_CU_API void trsm<c10::complex<float>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<float>));
|
| 207 |
+
template <>
|
| 208 |
+
TORCH_CUDA_CU_API void trsm<c10::complex<double>>(CUDABLAS_TRSM_ARGTYPES(c10::complex<double>));
|
| 209 |
+
|
| 210 |
+
#define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \
|
| 211 |
+
cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \
|
| 212 |
+
cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \
|
| 213 |
+
const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \
|
| 214 |
+
int batchCount
|
| 215 |
+
|
| 216 |
+
template <typename Dtype>
|
| 217 |
+
inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) {
|
| 218 |
+
static_assert(false&&sizeof(Dtype), "at::cuda::blas::trsmBatched: not implemented");
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
template <>
|
| 222 |
+
TORCH_CUDA_CU_API void trsmBatched<float>(CUDABLAS_TRSM_BATCHED_ARGTYPES(float));
|
| 223 |
+
template <>
|
| 224 |
+
TORCH_CUDA_CU_API void trsmBatched<double>(CUDABLAS_TRSM_BATCHED_ARGTYPES(double));
|
| 225 |
+
template <>
|
| 226 |
+
TORCH_CUDA_CU_API void trsmBatched<c10::complex<float>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<float>));
|
| 227 |
+
template <>
|
| 228 |
+
TORCH_CUDA_CU_API void trsmBatched<c10::complex<double>>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex<double>));
|
| 229 |
+
|
| 230 |
+
/* LEVEL 2 BLAS FUNCTIONS */
|
| 231 |
+
|
| 232 |
+
#define CUDABLAS_GEMV_ARGTYPES(Dtype) \
|
| 233 |
+
char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \
|
| 234 |
+
const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy
|
| 235 |
+
|
| 236 |
+
template <typename Dtype>
|
| 237 |
+
inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) {
|
| 238 |
+
static_assert(false&&sizeof(Dtype), "at::cuda::blas::gemv: not implemented");
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
template <>
|
| 242 |
+
void gemv<double>(CUDABLAS_GEMV_ARGTYPES(double));
|
| 243 |
+
template <>
|
| 244 |
+
void gemv<float>(CUDABLAS_GEMV_ARGTYPES(float));
|
| 245 |
+
template <>
|
| 246 |
+
void gemv<c10::complex<double>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<double>));
|
| 247 |
+
template <>
|
| 248 |
+
void gemv<c10::complex<float>>(CUDABLAS_GEMV_ARGTYPES(c10::complex<float>));
|
| 249 |
+
template <>
|
| 250 |
+
void gemv<at::Half>(CUDABLAS_GEMV_ARGTYPES(at::Half));
|
| 251 |
+
template <>
|
| 252 |
+
void gemv<at::BFloat16>(CUDABLAS_GEMV_ARGTYPES(at::BFloat16));
|
| 253 |
+
|
| 254 |
+
/* LEVEL 1 BLAS FUNCTIONS */
|
| 255 |
+
|
| 256 |
+
#define CUDABLAS_DOT_ARGTYPES(Dtype) \
|
| 257 |
+
cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \
|
| 258 |
+
int incy, Dtype *result
|
| 259 |
+
|
| 260 |
+
template <typename Dtype>
|
| 261 |
+
inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
|
| 262 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::dot: not implemented");
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
template <>
|
| 266 |
+
void dot<double>(CUDABLAS_DOT_ARGTYPES(double));
|
| 267 |
+
template <>
|
| 268 |
+
void dot<float>(CUDABLAS_DOT_ARGTYPES(float));
|
| 269 |
+
template <>
|
| 270 |
+
void dot<at::Half>(CUDABLAS_DOT_ARGTYPES(at::Half));
|
| 271 |
+
template <>
|
| 272 |
+
void dot<at::BFloat16>(CUDABLAS_DOT_ARGTYPES(at::BFloat16));
|
| 273 |
+
template <>
|
| 274 |
+
void dot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
|
| 275 |
+
template <>
|
| 276 |
+
void dot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
|
| 277 |
+
|
| 278 |
+
template <typename Dtype>
|
| 279 |
+
inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) {
|
| 280 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::vdot: not implemented");
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
template <>
|
| 284 |
+
void vdot<c10::complex<float>>(CUDABLAS_DOT_ARGTYPES(c10::complex<float>));
|
| 285 |
+
template <>
|
| 286 |
+
void vdot<c10::complex<double>>(CUDABLAS_DOT_ARGTYPES(c10::complex<double>));
|
| 287 |
+
|
| 288 |
+
#define CUDABLAS_GETRS_ARGTYPES(Dtype) \
|
| 289 |
+
cublasHandle_t handle, cublasOperation_t trans, \
|
| 290 |
+
int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \
|
| 291 |
+
Dtype** dB_array, int ldb, int* info_array, int batchsize
|
| 292 |
+
|
| 293 |
+
template<class Dtype>
|
| 294 |
+
void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) {
|
| 295 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::getrsBatched: not implemented");
|
| 296 |
+
}
|
| 297 |
+
template<>
|
| 298 |
+
TORCH_CUDA_CU_API void getrsBatched<float>(CUDABLAS_GETRS_ARGTYPES(float));
|
| 299 |
+
template<>
|
| 300 |
+
TORCH_CUDA_CU_API void getrsBatched<double>(CUDABLAS_GETRS_ARGTYPES(double));
|
| 301 |
+
template<>
|
| 302 |
+
TORCH_CUDA_CU_API void getrsBatched<c10::complex<float>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<float>));
|
| 303 |
+
template<>
|
| 304 |
+
TORCH_CUDA_CU_API void getrsBatched<c10::complex<double>>(CUDABLAS_GETRS_ARGTYPES(c10::complex<double>));
|
| 305 |
+
|
| 306 |
+
#define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \
|
| 307 |
+
cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \
|
| 308 |
+
Dtype **tau_array, int *info, int batchsize
|
| 309 |
+
|
| 310 |
+
template <class Dtype>
|
| 311 |
+
void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) {
|
| 312 |
+
static_assert(false&&sizeof(Dtype), "at::cuda::blas::geqrfBatched: not implemented");
|
| 313 |
+
}
|
| 314 |
+
template <>
|
| 315 |
+
TORCH_CUDA_CU_API void geqrfBatched<float>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float));
|
| 316 |
+
template <>
|
| 317 |
+
TORCH_CUDA_CU_API void geqrfBatched<double>(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double));
|
| 318 |
+
template <>
|
| 319 |
+
TORCH_CUDA_CU_API void geqrfBatched<c10::complex<double>>(
|
| 320 |
+
CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<double>));
|
| 321 |
+
template <>
|
| 322 |
+
TORCH_CUDA_CU_API void geqrfBatched<c10::complex<float>>(
|
| 323 |
+
CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex<float>));
|
| 324 |
+
|
| 325 |
+
#define CUDABLAS_GETRF_ARGTYPES(Dtype) \
|
| 326 |
+
int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize
|
| 327 |
+
|
| 328 |
+
template<class Dtype>
|
| 329 |
+
void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) {
|
| 330 |
+
TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented");
|
| 331 |
+
}
|
| 332 |
+
template<>
|
| 333 |
+
TORCH_CUDA_CU_API void getrfBatched<float>(CUDABLAS_GETRF_ARGTYPES(float));
|
| 334 |
+
template<>
|
| 335 |
+
TORCH_CUDA_CU_API void getrfBatched<double>(CUDABLAS_GETRF_ARGTYPES(double));
|
| 336 |
+
template<>
|
| 337 |
+
TORCH_CUDA_CU_API void getrfBatched<c10::complex<double>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<double>));
|
| 338 |
+
template<>
|
| 339 |
+
TORCH_CUDA_CU_API void getrfBatched<c10::complex<float>>(CUDABLAS_GETRF_ARGTYPES(c10::complex<float>));
|
| 340 |
+
|
| 341 |
+
#define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \
|
| 342 |
+
cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize
|
| 343 |
+
|
| 344 |
+
template <class Dtype>
|
| 345 |
+
void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) {
|
| 346 |
+
static_assert(false&&sizeof(Dtype),"at::cuda::blas::gelsBatched: not implemented");
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
template<>
|
| 350 |
+
TORCH_CUDA_CU_API void gelsBatched<double>(CUDABLAS_GELS_BATCHED_ARGTYPES(double));
|
| 351 |
+
template<>
|
| 352 |
+
TORCH_CUDA_CU_API void gelsBatched<float>(CUDABLAS_GELS_BATCHED_ARGTYPES(float));
|
| 353 |
+
template<>
|
| 354 |
+
TORCH_CUDA_CU_API void gelsBatched<c10::complex<double>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<double>));
|
| 355 |
+
template<>
|
| 356 |
+
TORCH_CUDA_CU_API void gelsBatched<c10::complex<float>>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex<float>));
|
| 357 |
+
|
| 358 |
+
} // namespace at::cuda::blas
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// Test these using #if AT_CUDNN_ENABLED(), not #ifdef, so that it's
|
| 4 |
+
// obvious if you forgot to include Config.h
|
| 5 |
+
// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
|
| 6 |
+
//
|
| 7 |
+
// NB: This header MUST NOT be included from other headers; it should
|
| 8 |
+
// only be included from C++ files.
|
| 9 |
+
#define AT_CUDNN_ENABLED() 1
|
| 10 |
+
#define AT_CUSPARSELT_ENABLED() 1
|
| 11 |
+
#define AT_ROCM_ENABLED() 0
|
| 12 |
+
#define AT_MAGMA_ENABLED() 1
|
| 13 |
+
|
| 14 |
+
// Needed for hipMAGMA to correctly identify implementation
|
| 15 |
+
#if (AT_ROCM_ENABLED() && AT_MAGMA_ENABLED())
|
| 16 |
+
#define HAVE_HIP 1
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#define NVCC_FLAGS_EXTRA "-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90"
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContext.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/CUDAContextLight.h>
|
| 4 |
+
|
| 5 |
+
// Preserved for BC, as many files depend on these includes
|
| 6 |
+
#include <ATen/Context.h>
|
| 7 |
+
#include <c10/cuda/CUDAStream.h>
|
| 8 |
+
#include <c10/util/Logging.h>
|
| 9 |
+
#include <ATen/cuda/Exceptions.h>
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ScalarType.h>
|
| 4 |
+
|
| 5 |
+
#include <cuda.h>
|
| 6 |
+
#include <library_types.h>
|
| 7 |
+
|
| 8 |
+
namespace at::cuda {
|
| 9 |
+
|
| 10 |
+
template <typename scalar_t>
|
| 11 |
+
cudaDataType getCudaDataType() {
|
| 12 |
+
static_assert(false && sizeof(scalar_t), "Cannot convert type to cudaDataType.");
|
| 13 |
+
return {};
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
template<> inline cudaDataType getCudaDataType<at::Half>() {
|
| 17 |
+
return CUDA_R_16F;
|
| 18 |
+
}
|
| 19 |
+
template<> inline cudaDataType getCudaDataType<float>() {
|
| 20 |
+
return CUDA_R_32F;
|
| 21 |
+
}
|
| 22 |
+
template<> inline cudaDataType getCudaDataType<double>() {
|
| 23 |
+
return CUDA_R_64F;
|
| 24 |
+
}
|
| 25 |
+
template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() {
|
| 26 |
+
return CUDA_C_16F;
|
| 27 |
+
}
|
| 28 |
+
template<> inline cudaDataType getCudaDataType<c10::complex<float>>() {
|
| 29 |
+
return CUDA_C_32F;
|
| 30 |
+
}
|
| 31 |
+
template<> inline cudaDataType getCudaDataType<c10::complex<double>>() {
|
| 32 |
+
return CUDA_C_64F;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
template<> inline cudaDataType getCudaDataType<uint8_t>() {
|
| 36 |
+
return CUDA_R_8U;
|
| 37 |
+
}
|
| 38 |
+
template<> inline cudaDataType getCudaDataType<int8_t>() {
|
| 39 |
+
return CUDA_R_8I;
|
| 40 |
+
}
|
| 41 |
+
template<> inline cudaDataType getCudaDataType<int>() {
|
| 42 |
+
return CUDA_R_32I;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template<> inline cudaDataType getCudaDataType<int16_t>() {
|
| 46 |
+
return CUDA_R_16I;
|
| 47 |
+
}
|
| 48 |
+
template<> inline cudaDataType getCudaDataType<int64_t>() {
|
| 49 |
+
return CUDA_R_64I;
|
| 50 |
+
}
|
| 51 |
+
template<> inline cudaDataType getCudaDataType<at::BFloat16>() {
|
| 52 |
+
return CUDA_R_16BF;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) {
|
| 56 |
+
switch (scalar_type) {
|
| 57 |
+
case c10::ScalarType::Byte:
|
| 58 |
+
return CUDA_R_8U;
|
| 59 |
+
case c10::ScalarType::Char:
|
| 60 |
+
return CUDA_R_8I;
|
| 61 |
+
case c10::ScalarType::Int:
|
| 62 |
+
return CUDA_R_32I;
|
| 63 |
+
case c10::ScalarType::Half:
|
| 64 |
+
return CUDA_R_16F;
|
| 65 |
+
case c10::ScalarType::Float:
|
| 66 |
+
return CUDA_R_32F;
|
| 67 |
+
case c10::ScalarType::Double:
|
| 68 |
+
return CUDA_R_64F;
|
| 69 |
+
case c10::ScalarType::ComplexHalf:
|
| 70 |
+
return CUDA_C_16F;
|
| 71 |
+
case c10::ScalarType::ComplexFloat:
|
| 72 |
+
return CUDA_C_32F;
|
| 73 |
+
case c10::ScalarType::ComplexDouble:
|
| 74 |
+
return CUDA_C_64F;
|
| 75 |
+
case c10::ScalarType::Short:
|
| 76 |
+
return CUDA_R_16I;
|
| 77 |
+
case c10::ScalarType::Long:
|
| 78 |
+
return CUDA_R_64I;
|
| 79 |
+
case c10::ScalarType::BFloat16:
|
| 80 |
+
return CUDA_R_16BF;
|
| 81 |
+
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11080
|
| 82 |
+
case c10::ScalarType::Float8_e4m3fn:
|
| 83 |
+
return CUDA_R_8F_E4M3;
|
| 84 |
+
case c10::ScalarType::Float8_e5m2:
|
| 85 |
+
return CUDA_R_8F_E5M2;
|
| 86 |
+
#endif
|
| 87 |
+
#if defined(USE_ROCM)
|
| 88 |
+
#if defined(HIP_NEW_TYPE_ENUMS)
|
| 89 |
+
case c10::ScalarType::Float8_e4m3fnuz:
|
| 90 |
+
return HIP_R_8F_E4M3_FNUZ;
|
| 91 |
+
case c10::ScalarType::Float8_e5m2fnuz:
|
| 92 |
+
return HIP_R_8F_E5M2_FNUZ;
|
| 93 |
+
#else
|
| 94 |
+
case c10::ScalarType::Float8_e4m3fnuz:
|
| 95 |
+
return static_cast<hipDataType>(1000);
|
| 96 |
+
case c10::ScalarType::Float8_e5m2fnuz:
|
| 97 |
+
return static_cast<hipDataType>(1001);
|
| 98 |
+
#endif
|
| 99 |
+
#endif
|
| 100 |
+
default:
|
| 101 |
+
TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.")
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
| 4 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 5 |
+
#include <c10/core/impl/GPUTrace.h>
|
| 6 |
+
#include <c10/cuda/CUDAStream.h>
|
| 7 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 8 |
+
#include <ATen/cuda/Exceptions.h>
|
| 9 |
+
#include <c10/util/Exception.h>
|
| 10 |
+
|
| 11 |
+
#include <cuda_runtime_api.h>
|
| 12 |
+
|
| 13 |
+
#include <cstdint>
|
| 14 |
+
#include <utility>
|
| 15 |
+
|
| 16 |
+
namespace at::cuda {
|
| 17 |
+
|
| 18 |
+
/*
|
| 19 |
+
* CUDAEvents are movable not copyable wrappers around CUDA's events.
|
| 20 |
+
*
|
| 21 |
+
* CUDAEvents are constructed lazily when first recorded unless it is
|
| 22 |
+
* reconstructed from a cudaIpcEventHandle_t. The event has a device, and this
|
| 23 |
+
* device is acquired from the first recording stream. However, if reconstructed
|
| 24 |
+
* from a handle, the device should be explicitly specified; or if ipc_handle() is
|
| 25 |
+
* called before the event is ever recorded, it will use the current device.
|
| 26 |
+
* Later streams that record the event must match this device.
|
| 27 |
+
*/
|
| 28 |
+
struct TORCH_CUDA_CPP_API CUDAEvent {
|
| 29 |
+
// Constructors
|
| 30 |
+
// Default value for `flags` is specified below - it's cudaEventDisableTiming
|
| 31 |
+
CUDAEvent() noexcept = default;
|
| 32 |
+
CUDAEvent(unsigned int flags) noexcept : flags_{flags} {}
|
| 33 |
+
|
| 34 |
+
CUDAEvent(
|
| 35 |
+
DeviceIndex device_index, const cudaIpcEventHandle_t* handle) : device_index_(device_index) {
|
| 36 |
+
CUDAGuard guard(device_index_);
|
| 37 |
+
|
| 38 |
+
AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle));
|
| 39 |
+
is_created_ = true;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Note: event destruction done on creating device to avoid creating a
|
| 43 |
+
// CUDA context on other devices.
|
| 44 |
+
~CUDAEvent() {
|
| 45 |
+
try {
|
| 46 |
+
if (is_created_) {
|
| 47 |
+
CUDAGuard guard(device_index_);
|
| 48 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 49 |
+
if (C10_UNLIKELY(interp)) {
|
| 50 |
+
(*interp)->trace_gpu_event_deletion(at::kCUDA, reinterpret_cast<uintptr_t>(event_));
|
| 51 |
+
}
|
| 52 |
+
AT_CUDA_CHECK(cudaEventDestroy(event_));
|
| 53 |
+
}
|
| 54 |
+
} catch (...) { /* No throw */ }
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
CUDAEvent(const CUDAEvent&) = delete;
|
| 58 |
+
CUDAEvent& operator=(const CUDAEvent&) = delete;
|
| 59 |
+
|
| 60 |
+
CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); }
|
| 61 |
+
CUDAEvent& operator=(CUDAEvent&& other) noexcept {
|
| 62 |
+
if (this != &other) {
|
| 63 |
+
moveHelper(std::move(other));
|
| 64 |
+
}
|
| 65 |
+
return *this;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
operator cudaEvent_t() const { return event(); }
|
| 69 |
+
|
| 70 |
+
// Less than operator (to allow use in sets)
|
| 71 |
+
friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) {
|
| 72 |
+
return left.event_ < right.event_;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
std::optional<at::Device> device() const {
|
| 76 |
+
if (is_created_) {
|
| 77 |
+
return at::Device(at::kCUDA, device_index_);
|
| 78 |
+
} else {
|
| 79 |
+
return {};
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
bool isCreated() const { return is_created_; }
|
| 84 |
+
DeviceIndex device_index() const {return device_index_;}
|
| 85 |
+
cudaEvent_t event() const { return event_; }
|
| 86 |
+
|
| 87 |
+
// Note: cudaEventQuery can be safely called from any device
|
| 88 |
+
bool query() const {
|
| 89 |
+
if (!is_created_) {
|
| 90 |
+
return true;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
cudaError_t err = cudaEventQuery(event_);
|
| 94 |
+
if (err == cudaSuccess) {
|
| 95 |
+
return true;
|
| 96 |
+
} else if (err != cudaErrorNotReady) {
|
| 97 |
+
C10_CUDA_CHECK(err);
|
| 98 |
+
} else {
|
| 99 |
+
// ignore and clear the error if not ready
|
| 100 |
+
(void)cudaGetLastError();
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
return false;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
void record() { record(getCurrentCUDAStream()); }
|
| 107 |
+
|
| 108 |
+
void recordOnce(const CUDAStream& stream) {
|
| 109 |
+
if (!was_recorded_) record(stream);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
// Note: cudaEventRecord must be called on the same device as the event.
|
| 113 |
+
void record(const CUDAStream& stream) {
|
| 114 |
+
if (!is_created_) {
|
| 115 |
+
createEvent(stream.device_index());
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_,
|
| 119 |
+
" does not match recording stream's device ", stream.device_index(), ".");
|
| 120 |
+
CUDAGuard guard(device_index_);
|
| 121 |
+
AT_CUDA_CHECK(cudaEventRecord(event_, stream));
|
| 122 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 123 |
+
if (C10_UNLIKELY(interp)) {
|
| 124 |
+
(*interp)->trace_gpu_event_record(at::kCUDA,
|
| 125 |
+
reinterpret_cast<uintptr_t>(event_),
|
| 126 |
+
reinterpret_cast<uintptr_t>(stream.stream())
|
| 127 |
+
);
|
| 128 |
+
}
|
| 129 |
+
was_recorded_ = true;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
// Note: cudaStreamWaitEvent must be called on the same device as the stream.
|
| 133 |
+
// The event has no actual GPU resources associated with it.
|
| 134 |
+
void block(const CUDAStream& stream) {
|
| 135 |
+
if (is_created_) {
|
| 136 |
+
CUDAGuard guard(stream.device_index());
|
| 137 |
+
AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0));
|
| 138 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 139 |
+
if (C10_UNLIKELY(interp)) {
|
| 140 |
+
(*interp)->trace_gpu_event_wait(at::kCUDA,
|
| 141 |
+
reinterpret_cast<uintptr_t>(event_),
|
| 142 |
+
reinterpret_cast<uintptr_t>(stream.stream())
|
| 143 |
+
);
|
| 144 |
+
}
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// Note: cudaEventElapsedTime can be safely called from any device
|
| 149 |
+
float elapsed_time(const CUDAEvent& other) const {
|
| 150 |
+
TORCH_CHECK(is_created_ && other.isCreated(),
|
| 151 |
+
"Both events must be recorded before calculating elapsed time.");
|
| 152 |
+
float time_ms = 0;
|
| 153 |
+
// We do not strictly have to set the device index to the same as our event,
|
| 154 |
+
// but if we don't and the current device is not initialized, it will
|
| 155 |
+
// create a new cuda context, which will consume a lot of memory.
|
| 156 |
+
CUDAGuard guard(device_index_);
|
| 157 |
+
// raise cudaErrorNotReady if either event is recorded but not yet completed
|
| 158 |
+
AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_));
|
| 159 |
+
return time_ms;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
// Note: cudaEventSynchronize can be safely called from any device
|
| 163 |
+
void synchronize() const {
|
| 164 |
+
if (is_created_) {
|
| 165 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 166 |
+
if (C10_UNLIKELY(interp)) {
|
| 167 |
+
(*interp)->trace_gpu_event_synchronization(at::kCUDA, reinterpret_cast<uintptr_t>(event_));
|
| 168 |
+
}
|
| 169 |
+
AT_CUDA_CHECK(cudaEventSynchronize(event_));
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
// Note: cudaIpcGetEventHandle must be called on the same device as the event
|
| 174 |
+
void ipc_handle(cudaIpcEventHandle_t * handle) {
|
| 175 |
+
if (!is_created_) {
|
| 176 |
+
// this CUDAEvent object was initially constructed from flags but event_
|
| 177 |
+
// is not created yet.
|
| 178 |
+
createEvent(getCurrentCUDAStream().device_index());
|
| 179 |
+
}
|
| 180 |
+
CUDAGuard guard(device_index_);
|
| 181 |
+
AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_));
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
private:
|
| 185 |
+
unsigned int flags_ = cudaEventDisableTiming;
|
| 186 |
+
bool is_created_ = false;
|
| 187 |
+
bool was_recorded_ = false;
|
| 188 |
+
DeviceIndex device_index_ = -1;
|
| 189 |
+
cudaEvent_t event_{};
|
| 190 |
+
|
| 191 |
+
void createEvent(DeviceIndex device_index) {
|
| 192 |
+
device_index_ = device_index;
|
| 193 |
+
CUDAGuard guard(device_index_);
|
| 194 |
+
AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_));
|
| 195 |
+
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
|
| 196 |
+
if (C10_UNLIKELY(interp)) {
|
| 197 |
+
(*interp)->trace_gpu_event_creation(at::kCUDA, reinterpret_cast<uintptr_t>(event_));
|
| 198 |
+
}
|
| 199 |
+
is_created_ = true;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
void moveHelper(CUDAEvent&& other) {
|
| 203 |
+
std::swap(flags_, other.flags_);
|
| 204 |
+
std::swap(is_created_, other.is_created_);
|
| 205 |
+
std::swap(was_recorded_, other.was_recorded_);
|
| 206 |
+
std::swap(device_index_, other.device_index_);
|
| 207 |
+
std::swap(event_, other.event_);
|
| 208 |
+
}
|
| 209 |
+
};
|
| 210 |
+
|
| 211 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Context.h>
|
| 4 |
+
#include <ATen/core/Generator.h>
|
| 5 |
+
#include <ATen/core/TensorBase.h>
|
| 6 |
+
#include <ATen/cuda/PhiloxCudaState.h>
|
| 7 |
+
#include <atomic>
|
| 8 |
+
#include <limits>
|
| 9 |
+
#include <memory>
|
| 10 |
+
#include <unordered_set>
|
| 11 |
+
namespace at {
|
| 12 |
+
|
| 13 |
+
namespace cuda {
|
| 14 |
+
struct CUDAGraph;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
/**
|
| 18 |
+
* Note [CUDA Graph-safe RNG states]
|
| 19 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 20 |
+
*
|
| 21 |
+
* Strategy:
|
| 22 |
+
* ~~~~~~~~~
|
| 23 |
+
* (It helps to look at
|
| 24 |
+
* cuda/detail/PhiloxCudaStateRaw.cuh and
|
| 25 |
+
* cuda/detail/UnpackRaw.cuh
|
| 26 |
+
* while you read this.)
|
| 27 |
+
*
|
| 28 |
+
* A CUDA graph containing multiple RNG ops behaves like a
|
| 29 |
+
* single giant kernel from the perspective of ops external
|
| 30 |
+
* to the graph. During graph capture, logic in CUDAGeneratorImpl
|
| 31 |
+
* records the total of all offset increments that occur in the
|
| 32 |
+
* graphed region, and records the final total as the offset for
|
| 33 |
+
* the entire graph.
|
| 34 |
+
*
|
| 35 |
+
* When the graph reruns, the logic that reruns it
|
| 36 |
+
* increments this device's CUDA generator's offset
|
| 37 |
+
* by that total.
|
| 38 |
+
*
|
| 39 |
+
* Meanwhile, within the graph, at capture time, instead of
|
| 40 |
+
* populating PhiloxCudaStates with the uint64_t offset pulled
|
| 41 |
+
* directly from the global state, PhiloxCudaState uses a pointer
|
| 42 |
+
* to a one-element stream-local int64_t device tensor
|
| 43 |
+
* holding an initial offset value, and a uint64_t holding an
|
| 44 |
+
* intra-graph offset. (The intra-graph offset starts from zero
|
| 45 |
+
* when capture begins.) In each consumer kernel,
|
| 46 |
+
* at::cuda::philox::unpack computes the offset to use for this kernel
|
| 47 |
+
* as intra-graph offset + *initial offset.
|
| 48 |
+
*
|
| 49 |
+
* When the graph reruns, the logic that reruns it first
|
| 50 |
+
* fill_s the initial offset tensor with this device's
|
| 51 |
+
* CUDA generator's current offset.
|
| 52 |
+
*
|
| 53 |
+
* The control flow above ensures graphed execution is bitwise
|
| 54 |
+
* identical to eager execution as long as RNG ops are enqueued
|
| 55 |
+
* from a single thread, even if RNG ops and graphs containing
|
| 56 |
+
* RNG ops are enqueued and run simultaneously on multiple streams.
|
| 57 |
+
*
|
| 58 |
+
* Usage:
|
| 59 |
+
* ~~~~~~
|
| 60 |
+
* PhiloxCudaState in this file, and unpack() in
|
| 61 |
+
* cuda/CUDAGraphsUtils.cuh allow non-divergent use of
|
| 62 |
+
* CUDAGeneratorImpl whether graph capture is underway or not.
|
| 63 |
+
*
|
| 64 |
+
* Each PhiloxCudaState instance should be used for one and only one
|
| 65 |
+
* consumer kernel.
|
| 66 |
+
*
|
| 67 |
+
* Example (see e.g. native/cuda/Dropout.cu):
|
| 68 |
+
*
|
| 69 |
+
* #include <ATen/cuda/CUDAGeneratorImpl.h>
|
| 70 |
+
* #include <ATen/cuda/CUDAGraphsUtils.cuh>
|
| 71 |
+
*
|
| 72 |
+
* __global__ void kernel(..., PhiloxCudaState philox_args) {
|
| 73 |
+
* auto seeds = at::cuda::philox::unpack(philox_args);
|
| 74 |
+
* IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
|
| 75 |
+
* curandStatePhilox4_32_10_t state;
|
| 76 |
+
* curand_init(std::get<0>(seeds), // seed
|
| 77 |
+
* idx, // per-thread subsequence
|
| 78 |
+
* std::get<1>(seeds), // offset in subsequence
|
| 79 |
+
* &state);
|
| 80 |
+
* ...
|
| 81 |
+
* }
|
| 82 |
+
*
|
| 83 |
+
* host_caller(...) {
|
| 84 |
+
* PhiloxCudaState rng_engine_inputs;
|
| 85 |
+
* {
|
| 86 |
+
* // See Note [Acquire lock when using random generators]
|
| 87 |
+
* std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 88 |
+
*
|
| 89 |
+
* // gen could be HostState or DevState here! No divergent code needed!
|
| 90 |
+
* rng_engine_inputs = gen->philox_cuda_state(offset_increment);
|
| 91 |
+
* }
|
| 92 |
+
* kernel<<<...>>>(..., rng_engine_inputs);
|
| 93 |
+
* }
|
| 94 |
+
*
|
| 95 |
+
*/
|
| 96 |
+
|
| 97 |
+
struct CUDAGeneratorState : public c10::intrusive_ptr_target {
|
| 98 |
+
uint64_t seed_;
|
| 99 |
+
uint64_t philox_offset_per_thread_;
|
| 100 |
+
uint32_t offset_intragraph_;
|
| 101 |
+
bool capturing_{};
|
| 102 |
+
std::unordered_set<cuda::CUDAGraph*> registered_graphs_;
|
| 103 |
+
at::TensorBase seed_extragraph_{};
|
| 104 |
+
at::TensorBase offset_extragraph_{};
|
| 105 |
+
|
| 106 |
+
CUDAGeneratorState(
|
| 107 |
+
uint64_t seed = default_rng_seed_val,
|
| 108 |
+
uint64_t philox_offset_per_thread = 0,
|
| 109 |
+
uint32_t offset_intragraph = 0)
|
| 110 |
+
: seed_(seed),
|
| 111 |
+
philox_offset_per_thread_(philox_offset_per_thread),
|
| 112 |
+
offset_intragraph_(offset_intragraph) {}
|
| 113 |
+
|
| 114 |
+
void increase(uint64_t increment);
|
| 115 |
+
|
| 116 |
+
void register_graph(cuda::CUDAGraph* graph);
|
| 117 |
+
void unregister_graph(cuda::CUDAGraph* graph);
|
| 118 |
+
|
| 119 |
+
void capture_prologue();
|
| 120 |
+
// capture_epilogue returns the wholegraph_increment
|
| 121 |
+
uint64_t capture_epilogue();
|
| 122 |
+
void replay_prologue(uint64_t wholegraph_increment);
|
| 123 |
+
c10::intrusive_ptr<CUDAGeneratorState> clone();
|
| 124 |
+
};
|
| 125 |
+
|
| 126 |
+
struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl {
|
| 127 |
+
// Constructors
|
| 128 |
+
CUDAGeneratorImpl(DeviceIndex device_index = -1);
|
| 129 |
+
CUDAGeneratorImpl(
|
| 130 |
+
DeviceIndex device_index,
|
| 131 |
+
c10::intrusive_ptr<CUDAGeneratorState> state_);
|
| 132 |
+
~CUDAGeneratorImpl() override = default;
|
| 133 |
+
|
| 134 |
+
// CUDAGeneratorImpl methods
|
| 135 |
+
std::shared_ptr<CUDAGeneratorImpl> clone() const;
|
| 136 |
+
void set_current_seed(uint64_t seed) override;
|
| 137 |
+
void set_offset(uint64_t offset) override;
|
| 138 |
+
uint64_t get_offset() const override;
|
| 139 |
+
uint64_t current_seed() const override;
|
| 140 |
+
uint64_t seed() override;
|
| 141 |
+
void set_state(const c10::TensorImpl& new_state) override;
|
| 142 |
+
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
|
| 143 |
+
void graphsafe_set_state(
|
| 144 |
+
const c10::intrusive_ptr<GeneratorImpl>& state) override;
|
| 145 |
+
c10::intrusive_ptr<c10::GeneratorImpl> graphsafe_get_state() const override;
|
| 146 |
+
|
| 147 |
+
void set_philox_offset_per_thread(uint64_t offset);
|
| 148 |
+
uint64_t philox_offset_per_thread() const;
|
| 149 |
+
|
| 150 |
+
void register_graph(cuda::CUDAGraph* graph);
|
| 151 |
+
void unregister_graph(cuda::CUDAGraph* graph);
|
| 152 |
+
|
| 153 |
+
// Generates a PhiloxCudaState with a specified increment, and increment
|
| 154 |
+
// current state
|
| 155 |
+
PhiloxCudaState philox_cuda_state(uint64_t increment);
|
| 156 |
+
|
| 157 |
+
bool reset_rnn_state() {
|
| 158 |
+
return !no_reset_rnn_state_.test_and_set();
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// Temporarily accommodates call sites that use philox_engine_inputs.
|
| 162 |
+
// Allows incremental refactor of call sites to use philox_cuda_state.
|
| 163 |
+
std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
|
| 164 |
+
|
| 165 |
+
static c10::DeviceType device_type();
|
| 166 |
+
|
| 167 |
+
private:
|
| 168 |
+
CUDAGeneratorImpl* clone_impl() const override;
|
| 169 |
+
|
| 170 |
+
c10::intrusive_ptr<CUDAGeneratorState> state_;
|
| 171 |
+
std::atomic_flag no_reset_rnn_state_;
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
namespace cuda::detail {
|
| 175 |
+
|
| 176 |
+
TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator(
|
| 177 |
+
DeviceIndex device_index = -1);
|
| 178 |
+
TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1);
|
| 179 |
+
|
| 180 |
+
} // namespace cuda::detail
|
| 181 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <c10/core/Device.h>
|
| 5 |
+
#include <c10/cuda/CUDAGraphsC10Utils.h>
|
| 6 |
+
#include <c10/cuda/CUDAStream.h>
|
| 7 |
+
#include <c10/util/flat_hash_map.h>
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
|
| 11 |
+
struct Generator;
|
| 12 |
+
struct CUDAGeneratorImpl;
|
| 13 |
+
struct CUDAGeneratorState;
|
| 14 |
+
|
| 15 |
+
namespace cuda {
|
| 16 |
+
|
| 17 |
+
// Standalone way to get a unique mempool id usable as a pool=... argument
|
| 18 |
+
// to CUDAGraph::capture_begin
|
| 19 |
+
TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle();
|
| 20 |
+
|
| 21 |
+
struct TORCH_CUDA_CPP_API CUDAGraph {
|
| 22 |
+
CUDAGraph();
|
| 23 |
+
~CUDAGraph();
|
| 24 |
+
|
| 25 |
+
static void inc_pending_event_queries();
|
| 26 |
+
static void dec_pending_event_queries();
|
| 27 |
+
static int num_pending_event_queries();
|
| 28 |
+
// See Note [Explicit Registration of Generators to the CUDA Graph]
|
| 29 |
+
void register_generator_state(c10::intrusive_ptr<at::CUDAGeneratorState> state);
|
| 30 |
+
void register_generator_state(const at::Generator& generator);
|
| 31 |
+
void capture_begin(
|
| 32 |
+
MempoolId_t pool = {0, 0},
|
| 33 |
+
cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal);
|
| 34 |
+
void capture_end();
|
| 35 |
+
void replay();
|
| 36 |
+
void reset();
|
| 37 |
+
MempoolId_t pool();
|
| 38 |
+
void enable_debug_mode();
|
| 39 |
+
void debug_dump(const std::string& debug_path);
|
| 40 |
+
|
| 41 |
+
protected:
|
| 42 |
+
cudaGraph_t graph_ = nullptr;
|
| 43 |
+
cudaGraphExec_t graph_exec_ = nullptr;
|
| 44 |
+
|
| 45 |
+
static std::atomic<int> pending_event_queries;
|
| 46 |
+
|
| 47 |
+
// internal states so reset() can do its best cleaning up
|
| 48 |
+
// Set to true in capture_end if cudaStreamEndCapture succeeded
|
| 49 |
+
// Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate
|
| 50 |
+
// to create graph_exec_, then graph_ is deleted
|
| 51 |
+
bool has_graph_ = false;
|
| 52 |
+
// Set to true in capture_end if cudaGraphInstantiate succeeded
|
| 53 |
+
bool has_graph_exec_ = false;
|
| 54 |
+
|
| 55 |
+
// the ID assigned by cuda during graph capture,
|
| 56 |
+
// used to identify when a stream is participating in capture
|
| 57 |
+
CaptureId_t capture_id_ = -1;
|
| 58 |
+
|
| 59 |
+
// uuid used to request a particular private mempool from CUDACachingAllocator.
|
| 60 |
+
// By default, this will be set to {id_, 0}.
|
| 61 |
+
//
|
| 62 |
+
// If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_
|
| 63 |
+
// will be set to the other graph's mempool_id_, and therefore share a mempool with the
|
| 64 |
+
// other graph.
|
| 65 |
+
//
|
| 66 |
+
// If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(),
|
| 67 |
+
// it will share a mempool with any other captures that used "pool=handle".
|
| 68 |
+
//
|
| 69 |
+
// Sharing a mempool across graphs saves memory, and it's safe if you
|
| 70 |
+
// know you'll replay those graphs in the same order you captured them.
|
| 71 |
+
MempoolId_t mempool_id_;
|
| 72 |
+
|
| 73 |
+
// Stream on which capture began
|
| 74 |
+
at::cuda::CUDAStream capture_stream_;
|
| 75 |
+
|
| 76 |
+
// multiple generator states and their wholegraph_increments in this graph
|
| 77 |
+
// that are managed by the CUDA Graph
|
| 78 |
+
ska::flat_hash_map<c10::intrusive_ptr<at::CUDAGeneratorState>, uint64_t>
|
| 79 |
+
captured_generator_states_;
|
| 80 |
+
|
| 81 |
+
// Device where capture occurred. Right now, for simplicity, we require all ops
|
| 82 |
+
// in a capture to run on the same device, but this is a limitation of CUDAGraph,
|
| 83 |
+
// not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
|
| 84 |
+
// captures if needed.
|
| 85 |
+
int capture_dev_;
|
| 86 |
+
};
|
| 87 |
+
|
| 88 |
+
} // namespace cuda
|
| 89 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/CUDAGeneratorImpl.h>
|
| 4 |
+
#include <ATen/cuda/CUDAEvent.h>
|
| 5 |
+
#include <ATen/cuda/PhiloxUtils.cuh>
|
| 6 |
+
#include <ATen/cuda/detail/CUDAHooks.h>
|
| 7 |
+
#include <ATen/detail/CUDAHooksInterface.h>
|
| 8 |
+
#include <c10/core/StreamGuard.h>
|
| 9 |
+
#include <c10/cuda/CUDAGraphsC10Utils.h>
|
| 10 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 11 |
+
|
| 12 |
+
// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.
|
| 13 |
+
// This file adds utils used by aten only.
|
| 14 |
+
|
| 15 |
+
namespace at::cuda {
|
| 16 |
+
|
| 17 |
+
using CaptureId_t = c10::cuda::CaptureId_t;
|
| 18 |
+
using CaptureStatus = c10::cuda::CaptureStatus;
|
| 19 |
+
|
| 20 |
+
// Use this version where you don't want to create a CUDA context if none exists.
|
| 21 |
+
inline CaptureStatus currentStreamCaptureStatus() {
|
| 22 |
+
// don't create a context if we don't have to
|
| 23 |
+
if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {
|
| 24 |
+
return c10::cuda::currentStreamCaptureStatusMayInitCtx();
|
| 25 |
+
} else {
|
| 26 |
+
return CaptureStatus::None;
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
inline void assertNotCapturing(const std::string& attempt) {
|
| 31 |
+
auto status = currentStreamCaptureStatus();
|
| 32 |
+
TORCH_CHECK(status == CaptureStatus::None,
|
| 33 |
+
attempt,
|
| 34 |
+
" during CUDA graph capture. If you need this call to be captured, "
|
| 35 |
+
"please file an issue. "
|
| 36 |
+
"Current cudaStreamCaptureStatus: ",
|
| 37 |
+
status);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
inline void errorIfCapturingCudnnBenchmark(const std::string& version_specific) {
|
| 41 |
+
auto status = currentStreamCaptureStatus();
|
| 42 |
+
TORCH_CHECK(status == CaptureStatus::None,
|
| 43 |
+
"Current cudaStreamCaptureStatus: ",
|
| 44 |
+
status,
|
| 45 |
+
"\nCapturing ",
|
| 46 |
+
version_specific,
|
| 47 |
+
"is prohibited. Possible causes of this error:\n"
|
| 48 |
+
"1. No warmup iterations occurred before capture.\n"
|
| 49 |
+
"2. The convolutions you're trying to capture use dynamic shapes, "
|
| 50 |
+
"in which case capturing them is generally prohibited.");
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 4 |
+
#if defined(USE_ROCM)
|
| 5 |
+
#include <hipsparse/hipsparse-version.h>
|
| 6 |
+
#define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch)
|
| 7 |
+
#endif
|
| 8 |
+
|
| 9 |
+
// cuSparse Generic API added in CUDA 10.1
|
| 10 |
+
// Windows support added in CUDA 11.0
|
| 11 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32)))
|
| 12 |
+
#define AT_USE_CUSPARSE_GENERIC_API() 1
|
| 13 |
+
#else
|
| 14 |
+
#define AT_USE_CUSPARSE_GENERIC_API() 0
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
// cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0
|
| 18 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
|
| 19 |
+
(CUSPARSE_VERSION < 12000)
|
| 20 |
+
#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1
|
| 21 |
+
#else
|
| 22 |
+
#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
|
| 26 |
+
(CUSPARSE_VERSION >= 12000)
|
| 27 |
+
#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1
|
| 28 |
+
#else
|
| 29 |
+
#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0
|
| 30 |
+
#endif
|
| 31 |
+
|
| 32 |
+
#if defined(USE_ROCM)
|
| 33 |
+
// hipSparse const API added in v2.4.0
|
| 34 |
+
#if HIPSPARSE_VERSION >= 200400
|
| 35 |
+
#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1
|
| 36 |
+
#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
|
| 37 |
+
#define AT_USE_HIPSPARSE_GENERIC_API() 1
|
| 38 |
+
#else
|
| 39 |
+
#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
|
| 40 |
+
#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1
|
| 41 |
+
#define AT_USE_HIPSPARSE_GENERIC_API() 1
|
| 42 |
+
#endif
|
| 43 |
+
#else // USE_ROCM
|
| 44 |
+
#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
|
| 45 |
+
#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
|
| 46 |
+
#define AT_USE_HIPSPARSE_GENERIC_API() 0
|
| 47 |
+
#endif // USE_ROCM
|
| 48 |
+
|
| 49 |
+
// cuSparse Generic API spsv function was added in CUDA 11.3.0
|
| 50 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500)
|
| 51 |
+
#define AT_USE_CUSPARSE_GENERIC_SPSV() 1
|
| 52 |
+
#else
|
| 53 |
+
#define AT_USE_CUSPARSE_GENERIC_SPSV() 0
|
| 54 |
+
#endif
|
| 55 |
+
|
| 56 |
+
// cuSparse Generic API spsm function was added in CUDA 11.3.1
|
| 57 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600)
|
| 58 |
+
#define AT_USE_CUSPARSE_GENERIC_SPSM() 1
|
| 59 |
+
#else
|
| 60 |
+
#define AT_USE_CUSPARSE_GENERIC_SPSM() 0
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
// cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400)
|
| 64 |
+
#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400)
|
| 65 |
+
#define AT_USE_CUSPARSE_GENERIC_SDDMM() 1
|
| 66 |
+
#else
|
| 67 |
+
#define AT_USE_CUSPARSE_GENERIC_SDDMM() 0
|
| 68 |
+
#endif
|
| 69 |
+
|
| 70 |
+
// BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0)
|
| 71 |
+
#if defined(CUDART_VERSION) || defined(USE_ROCM)
|
| 72 |
+
#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1
|
| 73 |
+
#else
|
| 74 |
+
#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0
|
| 75 |
+
#endif
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
Provides a subset of cuSPARSE functions as templates:
|
| 5 |
+
|
| 6 |
+
csrgeam2<scalar_t>(...)
|
| 7 |
+
|
| 8 |
+
where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
|
| 9 |
+
The functions are available in at::cuda::sparse namespace.
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 13 |
+
#include <ATen/cuda/CUDASparse.h>
|
| 14 |
+
|
| 15 |
+
namespace at::cuda::sparse {
|
| 16 |
+
|
| 17 |
+
#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
|
| 18 |
+
cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
|
| 19 |
+
const cusparseMatDescr_t descrA, int nnzA, \
|
| 20 |
+
const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
|
| 21 |
+
const int *csrSortedColIndA, const scalar_t *beta, \
|
| 22 |
+
const cusparseMatDescr_t descrB, int nnzB, \
|
| 23 |
+
const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
|
| 24 |
+
const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
|
| 25 |
+
const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \
|
| 26 |
+
const int *csrSortedColIndC, size_t *pBufferSizeInBytes
|
| 27 |
+
|
| 28 |
+
template <typename scalar_t>
|
| 29 |
+
inline void csrgeam2_bufferSizeExt(
|
| 30 |
+
CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) {
|
| 31 |
+
TORCH_INTERNAL_ASSERT(
|
| 32 |
+
false,
|
| 33 |
+
"at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ",
|
| 34 |
+
typeid(scalar_t).name());
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
template <>
|
| 38 |
+
void csrgeam2_bufferSizeExt<float>(
|
| 39 |
+
CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float));
|
| 40 |
+
template <>
|
| 41 |
+
void csrgeam2_bufferSizeExt<double>(
|
| 42 |
+
CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double));
|
| 43 |
+
template <>
|
| 44 |
+
void csrgeam2_bufferSizeExt<c10::complex<float>>(
|
| 45 |
+
CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>));
|
| 46 |
+
template <>
|
| 47 |
+
void csrgeam2_bufferSizeExt<c10::complex<double>>(
|
| 48 |
+
CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>));
|
| 49 |
+
|
| 50 |
+
#define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \
|
| 51 |
+
cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \
|
| 52 |
+
int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \
|
| 53 |
+
const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \
|
| 54 |
+
const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
|
| 55 |
+
int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace
|
| 56 |
+
|
| 57 |
+
template <typename scalar_t>
|
| 58 |
+
inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) {
|
| 59 |
+
TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz(
|
| 60 |
+
handle,
|
| 61 |
+
m,
|
| 62 |
+
n,
|
| 63 |
+
descrA,
|
| 64 |
+
nnzA,
|
| 65 |
+
csrSortedRowPtrA,
|
| 66 |
+
csrSortedColIndA,
|
| 67 |
+
descrB,
|
| 68 |
+
nnzB,
|
| 69 |
+
csrSortedRowPtrB,
|
| 70 |
+
csrSortedColIndB,
|
| 71 |
+
descrC,
|
| 72 |
+
csrSortedRowPtrC,
|
| 73 |
+
nnzTotalDevHostPtr,
|
| 74 |
+
workspace));
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
#define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \
|
| 78 |
+
cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
|
| 79 |
+
const cusparseMatDescr_t descrA, int nnzA, \
|
| 80 |
+
const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
|
| 81 |
+
const int *csrSortedColIndA, const scalar_t *beta, \
|
| 82 |
+
const cusparseMatDescr_t descrB, int nnzB, \
|
| 83 |
+
const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
|
| 84 |
+
const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
|
| 85 |
+
scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \
|
| 86 |
+
void *pBuffer
|
| 87 |
+
|
| 88 |
+
template <typename scalar_t>
|
| 89 |
+
inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) {
|
| 90 |
+
TORCH_INTERNAL_ASSERT(
|
| 91 |
+
false,
|
| 92 |
+
"at::cuda::sparse::csrgeam2: not implemented for ",
|
| 93 |
+
typeid(scalar_t).name());
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template <>
|
| 97 |
+
void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float));
|
| 98 |
+
template <>
|
| 99 |
+
void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double));
|
| 100 |
+
template <>
|
| 101 |
+
void csrgeam2<c10::complex<float>>(
|
| 102 |
+
CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>));
|
| 103 |
+
template <>
|
| 104 |
+
void csrgeam2<c10::complex<double>>(
|
| 105 |
+
CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>));
|
| 106 |
+
|
| 107 |
+
#define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \
|
| 108 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 109 |
+
cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \
|
| 110 |
+
int kb, int nnzb, const scalar_t *alpha, \
|
| 111 |
+
const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
|
| 112 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 113 |
+
const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc
|
| 114 |
+
|
| 115 |
+
template <typename scalar_t>
|
| 116 |
+
inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) {
|
| 117 |
+
TORCH_INTERNAL_ASSERT(
|
| 118 |
+
false,
|
| 119 |
+
"at::cuda::sparse::bsrmm: not implemented for ",
|
| 120 |
+
typeid(scalar_t).name());
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <>
|
| 124 |
+
void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float));
|
| 125 |
+
template <>
|
| 126 |
+
void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double));
|
| 127 |
+
template <>
|
| 128 |
+
void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>));
|
| 129 |
+
template <>
|
| 130 |
+
void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>));
|
| 131 |
+
|
| 132 |
+
#define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \
|
| 133 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 134 |
+
cusparseOperation_t transA, int mb, int nb, int nnzb, \
|
| 135 |
+
const scalar_t *alpha, const cusparseMatDescr_t descrA, \
|
| 136 |
+
const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
|
| 137 |
+
int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y
|
| 138 |
+
|
| 139 |
+
template <typename scalar_t>
|
| 140 |
+
inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) {
|
| 141 |
+
TORCH_INTERNAL_ASSERT(
|
| 142 |
+
false,
|
| 143 |
+
"at::cuda::sparse::bsrmv: not implemented for ",
|
| 144 |
+
typeid(scalar_t).name());
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
template <>
|
| 148 |
+
void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float));
|
| 149 |
+
template <>
|
| 150 |
+
void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double));
|
| 151 |
+
template <>
|
| 152 |
+
void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>));
|
| 153 |
+
template <>
|
| 154 |
+
void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>));
|
| 155 |
+
|
| 156 |
+
#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
|
| 157 |
+
|
| 158 |
+
#define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \
|
| 159 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 160 |
+
cusparseOperation_t transA, int mb, int nnzb, \
|
| 161 |
+
const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
|
| 162 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 163 |
+
bsrsv2Info_t info, int *pBufferSizeInBytes
|
| 164 |
+
|
| 165 |
+
template <typename scalar_t>
|
| 166 |
+
inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) {
|
| 167 |
+
TORCH_INTERNAL_ASSERT(
|
| 168 |
+
false,
|
| 169 |
+
"at::cuda::sparse::bsrsv2_bufferSize: not implemented for ",
|
| 170 |
+
typeid(scalar_t).name());
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
template <>
|
| 174 |
+
void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float));
|
| 175 |
+
template <>
|
| 176 |
+
void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double));
|
| 177 |
+
template <>
|
| 178 |
+
void bsrsv2_bufferSize<c10::complex<float>>(
|
| 179 |
+
CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>));
|
| 180 |
+
template <>
|
| 181 |
+
void bsrsv2_bufferSize<c10::complex<double>>(
|
| 182 |
+
CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>));
|
| 183 |
+
|
| 184 |
+
#define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \
|
| 185 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 186 |
+
cusparseOperation_t transA, int mb, int nnzb, \
|
| 187 |
+
const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
|
| 188 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 189 |
+
bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
|
| 190 |
+
|
| 191 |
+
template <typename scalar_t>
|
| 192 |
+
inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) {
|
| 193 |
+
TORCH_INTERNAL_ASSERT(
|
| 194 |
+
false,
|
| 195 |
+
"at::cuda::sparse::bsrsv2_analysis: not implemented for ",
|
| 196 |
+
typeid(scalar_t).name());
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
template <>
|
| 200 |
+
void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float));
|
| 201 |
+
template <>
|
| 202 |
+
void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double));
|
| 203 |
+
template <>
|
| 204 |
+
void bsrsv2_analysis<c10::complex<float>>(
|
| 205 |
+
CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>));
|
| 206 |
+
template <>
|
| 207 |
+
void bsrsv2_analysis<c10::complex<double>>(
|
| 208 |
+
CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>));
|
| 209 |
+
|
| 210 |
+
#define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \
|
| 211 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 212 |
+
cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \
|
| 213 |
+
const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
|
| 214 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 215 |
+
bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \
|
| 216 |
+
cusparseSolvePolicy_t policy, void *pBuffer
|
| 217 |
+
|
| 218 |
+
template <typename scalar_t>
|
| 219 |
+
inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) {
|
| 220 |
+
TORCH_INTERNAL_ASSERT(
|
| 221 |
+
false,
|
| 222 |
+
"at::cuda::sparse::bsrsv2_solve: not implemented for ",
|
| 223 |
+
typeid(scalar_t).name());
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
template <>
|
| 227 |
+
void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float));
|
| 228 |
+
template <>
|
| 229 |
+
void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double));
|
| 230 |
+
template <>
|
| 231 |
+
void bsrsv2_solve<c10::complex<float>>(
|
| 232 |
+
CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>));
|
| 233 |
+
template <>
|
| 234 |
+
void bsrsv2_solve<c10::complex<double>>(
|
| 235 |
+
CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>));
|
| 236 |
+
|
| 237 |
+
#define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \
|
| 238 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 239 |
+
cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
|
| 240 |
+
int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
|
| 241 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 242 |
+
bsrsm2Info_t info, int *pBufferSizeInBytes
|
| 243 |
+
|
| 244 |
+
template <typename scalar_t>
|
| 245 |
+
inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) {
|
| 246 |
+
TORCH_INTERNAL_ASSERT(
|
| 247 |
+
false,
|
| 248 |
+
"at::cuda::sparse::bsrsm2_bufferSize: not implemented for ",
|
| 249 |
+
typeid(scalar_t).name());
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
template <>
|
| 253 |
+
void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float));
|
| 254 |
+
template <>
|
| 255 |
+
void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double));
|
| 256 |
+
template <>
|
| 257 |
+
void bsrsm2_bufferSize<c10::complex<float>>(
|
| 258 |
+
CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>));
|
| 259 |
+
template <>
|
| 260 |
+
void bsrsm2_bufferSize<c10::complex<double>>(
|
| 261 |
+
CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>));
|
| 262 |
+
|
| 263 |
+
#define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \
|
| 264 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 265 |
+
cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
|
| 266 |
+
int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
|
| 267 |
+
const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
|
| 268 |
+
bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
|
| 269 |
+
|
| 270 |
+
template <typename scalar_t>
|
| 271 |
+
inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) {
|
| 272 |
+
TORCH_INTERNAL_ASSERT(
|
| 273 |
+
false,
|
| 274 |
+
"at::cuda::sparse::bsrsm2_analysis: not implemented for ",
|
| 275 |
+
typeid(scalar_t).name());
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
template <>
|
| 279 |
+
void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float));
|
| 280 |
+
template <>
|
| 281 |
+
void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double));
|
| 282 |
+
template <>
|
| 283 |
+
void bsrsm2_analysis<c10::complex<float>>(
|
| 284 |
+
CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>));
|
| 285 |
+
template <>
|
| 286 |
+
void bsrsm2_analysis<c10::complex<double>>(
|
| 287 |
+
CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>));
|
| 288 |
+
|
| 289 |
+
#define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \
|
| 290 |
+
cusparseHandle_t handle, cusparseDirection_t dirA, \
|
| 291 |
+
cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
|
| 292 |
+
int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \
|
| 293 |
+
const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
|
| 294 |
+
int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \
|
| 295 |
+
scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer
|
| 296 |
+
|
| 297 |
+
template <typename scalar_t>
|
| 298 |
+
inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) {
|
| 299 |
+
TORCH_INTERNAL_ASSERT(
|
| 300 |
+
false,
|
| 301 |
+
"at::cuda::sparse::bsrsm2_solve: not implemented for ",
|
| 302 |
+
typeid(scalar_t).name());
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
template <>
|
| 306 |
+
void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float));
|
| 307 |
+
template <>
|
| 308 |
+
void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double));
|
| 309 |
+
template <>
|
| 310 |
+
void bsrsm2_solve<c10::complex<float>>(
|
| 311 |
+
CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>));
|
| 312 |
+
template <>
|
| 313 |
+
void bsrsm2_solve<c10::complex<double>>(
|
| 314 |
+
CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>));
|
| 315 |
+
|
| 316 |
+
#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
|
| 317 |
+
|
| 318 |
+
} // namespace at::cuda::sparse
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 5 |
+
#include <ATen/cuda/CUDASparse.h>
|
| 6 |
+
|
| 7 |
+
#include <c10/core/ScalarType.h>
|
| 8 |
+
|
| 9 |
+
#if defined(USE_ROCM)
|
| 10 |
+
#include <type_traits>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
namespace at::cuda::sparse {
|
| 14 |
+
|
| 15 |
+
template <typename T, cusparseStatus_t (*destructor)(T*)>
|
| 16 |
+
struct CuSparseDescriptorDeleter {
|
| 17 |
+
void operator()(T* x) {
|
| 18 |
+
if (x != nullptr) {
|
| 19 |
+
TORCH_CUDASPARSE_CHECK(destructor(x));
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
template <typename T, cusparseStatus_t (*destructor)(T*)>
|
| 25 |
+
class CuSparseDescriptor {
|
| 26 |
+
public:
|
| 27 |
+
T* descriptor() const {
|
| 28 |
+
return descriptor_.get();
|
| 29 |
+
}
|
| 30 |
+
T* descriptor() {
|
| 31 |
+
return descriptor_.get();
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
protected:
|
| 35 |
+
std::unique_ptr<T, CuSparseDescriptorDeleter<T, destructor>> descriptor_;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
#if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
|
| 39 |
+
template <typename T, cusparseStatus_t (*destructor)(const T*)>
|
| 40 |
+
struct ConstCuSparseDescriptorDeleter {
|
| 41 |
+
void operator()(T* x) {
|
| 42 |
+
if (x != nullptr) {
|
| 43 |
+
TORCH_CUDASPARSE_CHECK(destructor(x));
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template <typename T, cusparseStatus_t (*destructor)(const T*)>
|
| 49 |
+
class ConstCuSparseDescriptor {
|
| 50 |
+
public:
|
| 51 |
+
T* descriptor() const {
|
| 52 |
+
return descriptor_.get();
|
| 53 |
+
}
|
| 54 |
+
T* descriptor() {
|
| 55 |
+
return descriptor_.get();
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
protected:
|
| 59 |
+
std::unique_ptr<T, ConstCuSparseDescriptorDeleter<T, destructor>> descriptor_;
|
| 60 |
+
};
|
| 61 |
+
#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS
|
| 62 |
+
|
| 63 |
+
#if defined(USE_ROCM)
|
| 64 |
+
using cusparseMatDescr = std::remove_pointer<hipsparseMatDescr_t>::type;
|
| 65 |
+
using cusparseDnMatDescr = std::remove_pointer<hipsparseDnMatDescr_t>::type;
|
| 66 |
+
using cusparseDnVecDescr = std::remove_pointer<hipsparseDnVecDescr_t>::type;
|
| 67 |
+
using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
|
| 68 |
+
using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
|
| 69 |
+
using cusparseSpGEMMDescr = std::remove_pointer<hipsparseSpGEMMDescr_t>::type;
|
| 70 |
+
#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
|
| 71 |
+
using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type;
|
| 72 |
+
using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type;
|
| 73 |
+
#endif
|
| 74 |
+
#endif
|
| 75 |
+
|
| 76 |
+
// NOTE: This is only needed for CUDA 11 and earlier, since CUDA 12 introduced
|
| 77 |
+
// API for const descriptors
|
| 78 |
+
cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr);
|
| 79 |
+
|
| 80 |
+
class TORCH_CUDA_CPP_API CuSparseMatDescriptor
|
| 81 |
+
: public CuSparseDescriptor<cusparseMatDescr, &cusparseDestroyMatDescr> {
|
| 82 |
+
public:
|
| 83 |
+
CuSparseMatDescriptor() {
|
| 84 |
+
cusparseMatDescr_t raw_descriptor = nullptr;
|
| 85 |
+
TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
|
| 86 |
+
descriptor_.reset(raw_descriptor);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
CuSparseMatDescriptor(bool upper, bool unit) {
|
| 90 |
+
cusparseFillMode_t fill_mode =
|
| 91 |
+
upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
|
| 92 |
+
cusparseDiagType_t diag_type =
|
| 93 |
+
unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
|
| 94 |
+
cusparseMatDescr_t raw_descriptor = nullptr;
|
| 95 |
+
TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor));
|
| 96 |
+
TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode));
|
| 97 |
+
TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type));
|
| 98 |
+
descriptor_.reset(raw_descriptor);
|
| 99 |
+
}
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
|
| 103 |
+
|
| 104 |
+
class TORCH_CUDA_CPP_API CuSparseBsrsv2Info
|
| 105 |
+
: public CuSparseDescriptor<bsrsv2Info, &cusparseDestroyBsrsv2Info> {
|
| 106 |
+
public:
|
| 107 |
+
CuSparseBsrsv2Info() {
|
| 108 |
+
bsrsv2Info_t raw_descriptor = nullptr;
|
| 109 |
+
TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor));
|
| 110 |
+
descriptor_.reset(raw_descriptor);
|
| 111 |
+
}
|
| 112 |
+
};
|
| 113 |
+
|
| 114 |
+
class TORCH_CUDA_CPP_API CuSparseBsrsm2Info
|
| 115 |
+
: public CuSparseDescriptor<bsrsm2Info, &cusparseDestroyBsrsm2Info> {
|
| 116 |
+
public:
|
| 117 |
+
CuSparseBsrsm2Info() {
|
| 118 |
+
bsrsm2Info_t raw_descriptor = nullptr;
|
| 119 |
+
TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor));
|
| 120 |
+
descriptor_.reset(raw_descriptor);
|
| 121 |
+
}
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
|
| 125 |
+
|
| 126 |
+
#if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
|
| 127 |
+
|
| 128 |
+
cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type);
|
| 129 |
+
|
| 130 |
+
#if AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS()
|
| 131 |
+
class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
|
| 132 |
+
: public CuSparseDescriptor<cusparseDnMatDescr, &cusparseDestroyDnMat> {
|
| 133 |
+
public:
|
| 134 |
+
explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
|
| 138 |
+
: public CuSparseDescriptor<const cusparseDnMatDescr, &destroyConstDnMat> {
|
| 139 |
+
public:
|
| 140 |
+
explicit CuSparseConstDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1);
|
| 141 |
+
cusparseDnMatDescr* unsafe_mutable_descriptor() const {
|
| 142 |
+
return const_cast<cusparseDnMatDescr*>(descriptor());
|
| 143 |
+
}
|
| 144 |
+
cusparseDnMatDescr* unsafe_mutable_descriptor() {
|
| 145 |
+
return const_cast<cusparseDnMatDescr*>(descriptor());
|
| 146 |
+
}
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
|
| 150 |
+
: public CuSparseDescriptor<cusparseDnVecDescr, &cusparseDestroyDnVec> {
|
| 151 |
+
public:
|
| 152 |
+
explicit CuSparseDnVecDescriptor(const Tensor& input);
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
|
| 156 |
+
: public CuSparseDescriptor<cusparseSpMatDescr, &cusparseDestroySpMat> {};
|
| 157 |
+
|
| 158 |
+
#elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
|
| 159 |
+
class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor
|
| 160 |
+
: public ConstCuSparseDescriptor<
|
| 161 |
+
cusparseDnMatDescr,
|
| 162 |
+
&cusparseDestroyDnMat> {
|
| 163 |
+
public:
|
| 164 |
+
explicit CuSparseDnMatDescriptor(
|
| 165 |
+
const Tensor& input,
|
| 166 |
+
int64_t batch_offset = -1);
|
| 167 |
+
};
|
| 168 |
+
|
| 169 |
+
class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor
|
| 170 |
+
: public ConstCuSparseDescriptor<
|
| 171 |
+
const cusparseDnMatDescr,
|
| 172 |
+
&destroyConstDnMat> {
|
| 173 |
+
public:
|
| 174 |
+
explicit CuSparseConstDnMatDescriptor(
|
| 175 |
+
const Tensor& input,
|
| 176 |
+
int64_t batch_offset = -1);
|
| 177 |
+
cusparseDnMatDescr* unsafe_mutable_descriptor() const {
|
| 178 |
+
return const_cast<cusparseDnMatDescr*>(descriptor());
|
| 179 |
+
}
|
| 180 |
+
cusparseDnMatDescr* unsafe_mutable_descriptor() {
|
| 181 |
+
return const_cast<cusparseDnMatDescr*>(descriptor());
|
| 182 |
+
}
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor
|
| 186 |
+
: public ConstCuSparseDescriptor<
|
| 187 |
+
cusparseDnVecDescr,
|
| 188 |
+
&cusparseDestroyDnVec> {
|
| 189 |
+
public:
|
| 190 |
+
explicit CuSparseDnVecDescriptor(const Tensor& input);
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor
|
| 194 |
+
: public ConstCuSparseDescriptor<
|
| 195 |
+
cusparseSpMatDescr,
|
| 196 |
+
&cusparseDestroySpMat> {};
|
| 197 |
+
#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS()
|
| 198 |
+
|
| 199 |
+
class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor
|
| 200 |
+
: public CuSparseSpMatDescriptor {
|
| 201 |
+
public:
|
| 202 |
+
explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1);
|
| 203 |
+
|
| 204 |
+
std::tuple<int64_t, int64_t, int64_t> get_size() {
|
| 205 |
+
int64_t rows = 0, cols = 0, nnz = 0;
|
| 206 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize(
|
| 207 |
+
this->descriptor(),
|
| 208 |
+
&rows,
|
| 209 |
+
&cols,
|
| 210 |
+
&nnz));
|
| 211 |
+
return std::make_tuple(rows, cols, nnz);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
void set_tensor(const Tensor& input) {
|
| 215 |
+
auto crow_indices = input.crow_indices();
|
| 216 |
+
auto col_indices = input.col_indices();
|
| 217 |
+
auto values = input.values();
|
| 218 |
+
|
| 219 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous());
|
| 220 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous());
|
| 221 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous());
|
| 222 |
+
TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers(
|
| 223 |
+
this->descriptor(),
|
| 224 |
+
crow_indices.data_ptr(),
|
| 225 |
+
col_indices.data_ptr(),
|
| 226 |
+
values.data_ptr()));
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
#if AT_USE_CUSPARSE_GENERIC_SPSV()
|
| 230 |
+
void set_mat_fill_mode(bool upper) {
|
| 231 |
+
cusparseFillMode_t fill_mode =
|
| 232 |
+
upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER;
|
| 233 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
|
| 234 |
+
this->descriptor(),
|
| 235 |
+
CUSPARSE_SPMAT_FILL_MODE,
|
| 236 |
+
&fill_mode,
|
| 237 |
+
sizeof(fill_mode)));
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
void set_mat_diag_type(bool unit) {
|
| 241 |
+
cusparseDiagType_t diag_type =
|
| 242 |
+
unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT;
|
| 243 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute(
|
| 244 |
+
this->descriptor(),
|
| 245 |
+
CUSPARSE_SPMAT_DIAG_TYPE,
|
| 246 |
+
&diag_type,
|
| 247 |
+
sizeof(diag_type)));
|
| 248 |
+
}
|
| 249 |
+
#endif
|
| 250 |
+
};
|
| 251 |
+
|
| 252 |
+
#if AT_USE_CUSPARSE_GENERIC_SPSV()
|
| 253 |
+
class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor
|
| 254 |
+
: public CuSparseDescriptor<cusparseSpSVDescr, &cusparseSpSV_destroyDescr> {
|
| 255 |
+
public:
|
| 256 |
+
CuSparseSpSVDescriptor() {
|
| 257 |
+
cusparseSpSVDescr_t raw_descriptor = nullptr;
|
| 258 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor));
|
| 259 |
+
descriptor_.reset(raw_descriptor);
|
| 260 |
+
}
|
| 261 |
+
};
|
| 262 |
+
#endif
|
| 263 |
+
|
| 264 |
+
#if AT_USE_CUSPARSE_GENERIC_SPSM()
|
| 265 |
+
class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor
|
| 266 |
+
: public CuSparseDescriptor<cusparseSpSMDescr, &cusparseSpSM_destroyDescr> {
|
| 267 |
+
public:
|
| 268 |
+
CuSparseSpSMDescriptor() {
|
| 269 |
+
cusparseSpSMDescr_t raw_descriptor = nullptr;
|
| 270 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor));
|
| 271 |
+
descriptor_.reset(raw_descriptor);
|
| 272 |
+
}
|
| 273 |
+
};
|
| 274 |
+
#endif
|
| 275 |
+
|
| 276 |
+
class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor
|
| 277 |
+
: public CuSparseDescriptor<cusparseSpGEMMDescr, &cusparseSpGEMM_destroyDescr> {
|
| 278 |
+
public:
|
| 279 |
+
CuSparseSpGEMMDescriptor() {
|
| 280 |
+
cusparseSpGEMMDescr_t raw_descriptor = nullptr;
|
| 281 |
+
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor));
|
| 282 |
+
descriptor_.reset(raw_descriptor);
|
| 283 |
+
}
|
| 284 |
+
};
|
| 285 |
+
|
| 286 |
+
#endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API()
|
| 287 |
+
|
| 288 |
+
} // namespace at::cuda::sparse
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <c10/util/Half.h>
|
| 5 |
+
|
| 6 |
+
#include <cuda.h>
|
| 7 |
+
#include <cuda_runtime.h>
|
| 8 |
+
#include <cuda_fp16.h>
|
| 9 |
+
|
| 10 |
+
namespace at {
|
| 11 |
+
template <>
|
| 12 |
+
inline __half* Tensor::data() const {
|
| 13 |
+
return reinterpret_cast<__half*>(data<Half>());
|
| 14 |
+
}
|
| 15 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 4 |
+
|
| 5 |
+
namespace at::cuda {
|
| 6 |
+
|
| 7 |
+
// Check if every tensor in a list of tensors matches the current
|
| 8 |
+
// device.
|
| 9 |
+
inline bool check_device(ArrayRef<Tensor> ts) {
|
| 10 |
+
if (ts.empty()) {
|
| 11 |
+
return true;
|
| 12 |
+
}
|
| 13 |
+
Device curDevice = Device(kCUDA, current_device());
|
| 14 |
+
for (const Tensor& t : ts) {
|
| 15 |
+
if (t.device() != curDevice) return false;
|
| 16 |
+
}
|
| 17 |
+
return true;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/CachingHostAllocator.h>
|
| 4 |
+
#include <c10/core/Allocator.h>
|
| 5 |
+
#include <c10/cuda/CUDAStream.h>
|
| 6 |
+
|
| 7 |
+
namespace at::cuda {
|
| 8 |
+
|
| 9 |
+
//
|
| 10 |
+
// A caching allocator for CUDA host allocations (pinned memory).
|
| 11 |
+
//
|
| 12 |
+
// This provides a drop-in replacement for THCudaHostAllocator, which re-uses
|
| 13 |
+
// freed pinned (page-locked) memory allocations. This avoids device
|
| 14 |
+
// synchronizations due to cudaFreeHost calls.
|
| 15 |
+
//
|
| 16 |
+
// To ensure correct behavior, THCCachingHostAllocator_recordEvent must be
|
| 17 |
+
// called anytime a pointer from this allocator is used in a cudaMemcpyAsync
|
| 18 |
+
// call between host and device, and passed the corresponding context from the
|
| 19 |
+
// allocation. This is currently invoked by at::native::copy_kernel_cuda.
|
| 20 |
+
//
|
| 21 |
+
TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator();
|
| 22 |
+
|
| 23 |
+
// Records an event in the specified stream. The allocation corresponding to the
|
| 24 |
+
// input `ptr`/`ctx` will not be re-used until the event has occurred.
|
| 25 |
+
TORCH_CUDA_CPP_API bool CachingHostAllocator_recordEvent(
|
| 26 |
+
void* ptr,
|
| 27 |
+
void* ctx,
|
| 28 |
+
c10::cuda::CUDAStream stream);
|
| 29 |
+
|
| 30 |
+
// Releases cached pinned memory allocations via cudaHostFree
|
| 31 |
+
TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache();
|
| 32 |
+
|
| 33 |
+
inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) {
|
| 34 |
+
return getCachingHostAllocator()->allocate(size);
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cuda.h>
|
| 4 |
+
#include <c10/util/complex.h>
|
| 5 |
+
#include <c10/util/Half.h>
|
| 6 |
+
|
| 7 |
+
__device__ __forceinline__ unsigned int ACTIVE_MASK()
|
| 8 |
+
{
|
| 9 |
+
#if !defined(USE_ROCM)
|
| 10 |
+
return __activemask();
|
| 11 |
+
#else
|
| 12 |
+
// will be ignored anyway
|
| 13 |
+
return 0xffffffff;
|
| 14 |
+
#endif
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
__device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) {
|
| 18 |
+
#if !defined(USE_ROCM)
|
| 19 |
+
return __syncwarp(mask);
|
| 20 |
+
#endif
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
#if defined(USE_ROCM)
|
| 24 |
+
__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
|
| 25 |
+
{
|
| 26 |
+
return __ballot(predicate);
|
| 27 |
+
}
|
| 28 |
+
#else
|
| 29 |
+
__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
|
| 30 |
+
{
|
| 31 |
+
#if !defined(USE_ROCM)
|
| 32 |
+
return __ballot_sync(mask, predicate);
|
| 33 |
+
#else
|
| 34 |
+
return __ballot(predicate);
|
| 35 |
+
#endif
|
| 36 |
+
}
|
| 37 |
+
#endif
|
| 38 |
+
|
| 39 |
+
template <typename T>
|
| 40 |
+
__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 41 |
+
{
|
| 42 |
+
#if !defined(USE_ROCM)
|
| 43 |
+
return __shfl_xor_sync(mask, value, laneMask, width);
|
| 44 |
+
#else
|
| 45 |
+
return __shfl_xor(value, laneMask, width);
|
| 46 |
+
#endif
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template <typename T>
|
| 50 |
+
__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 51 |
+
{
|
| 52 |
+
#if !defined(USE_ROCM)
|
| 53 |
+
return __shfl_sync(mask, value, srcLane, width);
|
| 54 |
+
#else
|
| 55 |
+
return __shfl(value, srcLane, width);
|
| 56 |
+
#endif
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template <typename T>
|
| 60 |
+
__device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 61 |
+
{
|
| 62 |
+
#if !defined(USE_ROCM)
|
| 63 |
+
return __shfl_up_sync(mask, value, delta, width);
|
| 64 |
+
#else
|
| 65 |
+
return __shfl_up(value, delta, width);
|
| 66 |
+
#endif
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
template <typename T>
|
| 70 |
+
__device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 71 |
+
{
|
| 72 |
+
#if !defined(USE_ROCM)
|
| 73 |
+
return __shfl_down_sync(mask, value, delta, width);
|
| 74 |
+
#else
|
| 75 |
+
return __shfl_down(value, delta, width);
|
| 76 |
+
#endif
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
#if defined(USE_ROCM)
|
| 80 |
+
template<>
|
| 81 |
+
__device__ __forceinline__ int64_t WARP_SHFL_DOWN<int64_t>(int64_t value, unsigned int delta, int width , unsigned int mask)
|
| 82 |
+
{
|
| 83 |
+
//(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
|
| 84 |
+
int2 a = *reinterpret_cast<int2*>(&value);
|
| 85 |
+
a.x = __shfl_down(a.x, delta);
|
| 86 |
+
a.y = __shfl_down(a.y, delta);
|
| 87 |
+
return *reinterpret_cast<int64_t*>(&a);
|
| 88 |
+
}
|
| 89 |
+
#endif
|
| 90 |
+
|
| 91 |
+
template<>
|
| 92 |
+
__device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half value, unsigned int delta, int width, unsigned int mask)
|
| 93 |
+
{
|
| 94 |
+
return c10::Half(WARP_SHFL_DOWN<unsigned short>(value.x, delta, width, mask), c10::Half::from_bits_t{});
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
template <typename T>
|
| 98 |
+
__device__ __forceinline__ c10::complex<T> WARP_SHFL_DOWN(c10::complex<T> value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 99 |
+
{
|
| 100 |
+
#if !defined(USE_ROCM)
|
| 101 |
+
return c10::complex<T>(
|
| 102 |
+
__shfl_down_sync(mask, value.real_, delta, width),
|
| 103 |
+
__shfl_down_sync(mask, value.imag_, delta, width));
|
| 104 |
+
#else
|
| 105 |
+
return c10::complex<T>(
|
| 106 |
+
__shfl_down(value.real_, delta, width),
|
| 107 |
+
__shfl_down(value.imag_, delta, width));
|
| 108 |
+
#endif
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* For CC 3.5+, perform a load using __ldg
|
| 113 |
+
*/
|
| 114 |
+
template <typename T>
|
| 115 |
+
__device__ __forceinline__ T doLdg(const T* p) {
|
| 116 |
+
#if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM)
|
| 117 |
+
return __ldg(p);
|
| 118 |
+
#else
|
| 119 |
+
return *p;
|
| 120 |
+
#endif
|
| 121 |
+
}
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/TensorBase.h>
|
| 3 |
+
|
| 4 |
+
namespace at::detail {
|
| 5 |
+
|
| 6 |
+
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
| 7 |
+
IntArrayRef size,
|
| 8 |
+
ScalarType dtype,
|
| 9 |
+
std::optional<Device> device_opt,
|
| 10 |
+
std::optional<c10::MemoryFormat> memory_format_opt);
|
| 11 |
+
|
| 12 |
+
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
| 13 |
+
IntArrayRef size,
|
| 14 |
+
std::optional<ScalarType> dtype_opt,
|
| 15 |
+
std::optional<Layout> layout_opt,
|
| 16 |
+
std::optional<Device> device_opt,
|
| 17 |
+
std::optional<bool> pin_memory_opt,
|
| 18 |
+
std::optional<c10::MemoryFormat> memory_format_opt);
|
| 19 |
+
|
| 20 |
+
TORCH_CUDA_CPP_API TensorBase empty_cuda(
|
| 21 |
+
IntArrayRef size,
|
| 22 |
+
const TensorOptions &options);
|
| 23 |
+
|
| 24 |
+
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
| 25 |
+
IntArrayRef size,
|
| 26 |
+
IntArrayRef stride,
|
| 27 |
+
ScalarType dtype,
|
| 28 |
+
std::optional<Device> device_opt);
|
| 29 |
+
|
| 30 |
+
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
| 31 |
+
IntArrayRef size,
|
| 32 |
+
IntArrayRef stride,
|
| 33 |
+
std::optional<ScalarType> dtype_opt,
|
| 34 |
+
std::optional<Layout> layout_opt,
|
| 35 |
+
std::optional<Device> device_opt,
|
| 36 |
+
std::optional<bool> pin_memory_opt);
|
| 37 |
+
|
| 38 |
+
TORCH_CUDA_CPP_API TensorBase empty_strided_cuda(
|
| 39 |
+
IntArrayRef size,
|
| 40 |
+
IntArrayRef stride,
|
| 41 |
+
const TensorOptions &options);
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
} // namespace at::detail
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cuda.h>
|
| 4 |
+
#include <limits.h>
|
| 5 |
+
#include <math.h>
|
| 6 |
+
#include <float.h>
|
| 7 |
+
|
| 8 |
+
// NumericLimits.cuh is a holder for numeric limits definitions of commonly used
|
| 9 |
+
// types. This header is very specific to ROCm HIP and may be removed in the future.
|
| 10 |
+
// This header is derived from the legacy THCNumerics.cuh.
|
| 11 |
+
|
| 12 |
+
// The lower_bound and upper_bound constants are same as lowest and max for
|
| 13 |
+
// integral types, but are -inf and +inf for floating point types. They are
|
| 14 |
+
// useful in implementing min, max, etc.
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
template <typename T>
|
| 19 |
+
struct numeric_limits {
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
// WARNING: the following at::numeric_limits definitions are there only to support
|
| 23 |
+
// HIP compilation for the moment. Use std::numeric_limits if you are not
|
| 24 |
+
// compiling for ROCm.
|
| 25 |
+
// from @colesbury: "The functions on numeric_limits aren't marked with
|
| 26 |
+
// __device__ which is why they don't work with ROCm. CUDA allows them
|
| 27 |
+
// because they're constexpr."
|
| 28 |
+
|
| 29 |
+
namespace {
|
| 30 |
+
// ROCm doesn't like INFINITY too.
|
| 31 |
+
constexpr double inf = INFINITY;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
struct numeric_limits<bool> {
|
| 36 |
+
static inline __host__ __device__ bool lowest() { return false; }
|
| 37 |
+
static inline __host__ __device__ bool max() { return true; }
|
| 38 |
+
static inline __host__ __device__ bool lower_bound() { return false; }
|
| 39 |
+
static inline __host__ __device__ bool upper_bound() { return true; }
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
template <>
|
| 43 |
+
struct numeric_limits<uint8_t> {
|
| 44 |
+
static inline __host__ __device__ uint8_t lowest() { return 0; }
|
| 45 |
+
static inline __host__ __device__ uint8_t max() { return UINT8_MAX; }
|
| 46 |
+
static inline __host__ __device__ uint8_t lower_bound() { return 0; }
|
| 47 |
+
static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; }
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
template <>
|
| 51 |
+
struct numeric_limits<int8_t> {
|
| 52 |
+
static inline __host__ __device__ int8_t lowest() { return INT8_MIN; }
|
| 53 |
+
static inline __host__ __device__ int8_t max() { return INT8_MAX; }
|
| 54 |
+
static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; }
|
| 55 |
+
static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; }
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
template <>
|
| 59 |
+
struct numeric_limits<int16_t> {
|
| 60 |
+
static inline __host__ __device__ int16_t lowest() { return INT16_MIN; }
|
| 61 |
+
static inline __host__ __device__ int16_t max() { return INT16_MAX; }
|
| 62 |
+
static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; }
|
| 63 |
+
static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; }
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
struct numeric_limits<int32_t> {
|
| 68 |
+
static inline __host__ __device__ int32_t lowest() { return INT32_MIN; }
|
| 69 |
+
static inline __host__ __device__ int32_t max() { return INT32_MAX; }
|
| 70 |
+
static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; }
|
| 71 |
+
static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; }
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
template <>
|
| 75 |
+
struct numeric_limits<int64_t> {
|
| 76 |
+
#ifdef _MSC_VER
|
| 77 |
+
static inline __host__ __device__ int64_t lowest() { return _I64_MIN; }
|
| 78 |
+
static inline __host__ __device__ int64_t max() { return _I64_MAX; }
|
| 79 |
+
static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; }
|
| 80 |
+
static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; }
|
| 81 |
+
#else
|
| 82 |
+
static inline __host__ __device__ int64_t lowest() { return INT64_MIN; }
|
| 83 |
+
static inline __host__ __device__ int64_t max() { return INT64_MAX; }
|
| 84 |
+
static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; }
|
| 85 |
+
static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; }
|
| 86 |
+
#endif
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
template <>
|
| 90 |
+
struct numeric_limits<at::Half> {
|
| 91 |
+
static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); }
|
| 92 |
+
static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); }
|
| 93 |
+
static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); }
|
| 94 |
+
static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); }
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
template <>
|
| 98 |
+
struct numeric_limits<at::BFloat16> {
|
| 99 |
+
static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); }
|
| 100 |
+
static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); }
|
| 101 |
+
static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); }
|
| 102 |
+
static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); }
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
template <>
|
| 106 |
+
struct numeric_limits<float> {
|
| 107 |
+
static inline __host__ __device__ float lowest() { return -FLT_MAX; }
|
| 108 |
+
static inline __host__ __device__ float max() { return FLT_MAX; }
|
| 109 |
+
static inline __host__ __device__ float lower_bound() { return -static_cast<float>(inf); }
|
| 110 |
+
static inline __host__ __device__ float upper_bound() { return static_cast<float>(inf); }
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
template <>
|
| 114 |
+
struct numeric_limits<double> {
|
| 115 |
+
static inline __host__ __device__ double lowest() { return -DBL_MAX; }
|
| 116 |
+
static inline __host__ __device__ double max() { return DBL_MAX; }
|
| 117 |
+
static inline __host__ __device__ double lower_bound() { return -inf; }
|
| 118 |
+
static inline __host__ __device__ double upper_bound() { return inf; }
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
#include <ATen/cuda/detail/PhiloxCudaStateRaw.cuh>
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/PhiloxCudaState.h>
|
| 4 |
+
#include <ATen/cuda/detail/UnpackRaw.cuh>
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Allocator.h>
|
| 4 |
+
#include <ATen/cuda/CachingHostAllocator.h>
|
| 5 |
+
|
| 6 |
+
namespace at::cuda {
|
| 7 |
+
|
| 8 |
+
inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() {
|
| 9 |
+
return getCachingHostAllocator();
|
| 10 |
+
}
|
| 11 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ceil_div.h>
|
| 4 |
+
#include <ATen/cuda/DeviceUtils.cuh>
|
| 5 |
+
#include <ATen/cuda/AsmUtils.cuh>
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
|
| 8 |
+
// Collection of in-kernel scan / prefix sum utilities
|
| 9 |
+
|
| 10 |
+
namespace at::cuda {
|
| 11 |
+
|
| 12 |
+
// Inclusive prefix sum for binary vars using intra-warp voting +
|
| 13 |
+
// shared memory
|
| 14 |
+
template <typename T, bool KillWARDependency, class BinaryFunction>
|
| 15 |
+
__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
|
| 16 |
+
// Within-warp, we use warp voting.
|
| 17 |
+
#if defined (USE_ROCM)
|
| 18 |
+
unsigned long long int vote = WARP_BALLOT(in);
|
| 19 |
+
T index = __popcll(getLaneMaskLe() & vote);
|
| 20 |
+
T carry = __popcll(vote);
|
| 21 |
+
#else
|
| 22 |
+
T vote = WARP_BALLOT(in);
|
| 23 |
+
T index = __popc(getLaneMaskLe() & vote);
|
| 24 |
+
T carry = __popc(vote);
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
int warp = threadIdx.x / C10_WARP_SIZE;
|
| 28 |
+
|
| 29 |
+
// Per each warp, write out a value
|
| 30 |
+
if (getLaneId() == 0) {
|
| 31 |
+
smem[warp] = carry;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
__syncthreads();
|
| 35 |
+
|
| 36 |
+
// Sum across warps in one thread. This appears to be faster than a
|
| 37 |
+
// warp shuffle scan for CC 3.0+
|
| 38 |
+
if (threadIdx.x == 0) {
|
| 39 |
+
int current = 0;
|
| 40 |
+
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
|
| 41 |
+
T v = smem[i];
|
| 42 |
+
smem[i] = binop(smem[i], current);
|
| 43 |
+
current = binop(current, v);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
__syncthreads();
|
| 48 |
+
|
| 49 |
+
// load the carry from the preceding warp
|
| 50 |
+
if (warp >= 1) {
|
| 51 |
+
index = binop(index, smem[warp - 1]);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
*out = index;
|
| 55 |
+
|
| 56 |
+
if (KillWARDependency) {
|
| 57 |
+
__syncthreads();
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
// Exclusive prefix sum for binary vars using intra-warp voting +
|
| 62 |
+
// shared memory
|
| 63 |
+
template <typename T, bool KillWARDependency, class BinaryFunction>
|
| 64 |
+
__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
|
| 65 |
+
inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
|
| 66 |
+
|
| 67 |
+
// Inclusive to exclusive
|
| 68 |
+
*out -= (T) in;
|
| 69 |
+
|
| 70 |
+
// The outgoing carry for all threads is the last warp's sum
|
| 71 |
+
*carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
|
| 72 |
+
|
| 73 |
+
if (KillWARDependency) {
|
| 74 |
+
__syncthreads();
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/macros/Export.h>
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
namespace at::cuda {
|
| 6 |
+
|
| 7 |
+
// enqueues a kernel that spins for the specified number of cycles
|
| 8 |
+
TORCH_CUDA_CU_API void sleep(int64_t cycles);
|
| 9 |
+
|
| 10 |
+
// flushes instruction cache for ROCm; no-op for CUDA
|
| 11 |
+
TORCH_CUDA_CU_API void flush_icache();
|
| 12 |
+
|
| 13 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 5 |
+
|
| 6 |
+
namespace at::cuda {
|
| 7 |
+
|
| 8 |
+
/// Allocator for Thrust to re-route its internal device allocations
|
| 9 |
+
/// to the THC allocator
|
| 10 |
+
class ThrustAllocator {
|
| 11 |
+
public:
|
| 12 |
+
typedef char value_type;
|
| 13 |
+
|
| 14 |
+
char* allocate(std::ptrdiff_t size) {
|
| 15 |
+
return static_cast<char*>(c10::cuda::CUDACachingAllocator::raw_alloc(size));
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
void deallocate(char* p, size_t size) {
|
| 19 |
+
c10::cuda::CUDACachingAllocator::raw_delete(p);
|
| 20 |
+
}
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
} // namespace at::cuda
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/cuda/cub.h>
|
| 3 |
+
|
| 4 |
+
#include <cstddef>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
#include <iterator>
|
| 7 |
+
#include <limits>
|
| 8 |
+
|
| 9 |
+
#include <ATen/cuda/cub_definitions.cuh>
|
| 10 |
+
|
| 11 |
+
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
|
| 12 |
+
|
| 13 |
+
#include <cub/cub.cuh>
|
| 14 |
+
|
| 15 |
+
#else
|
| 16 |
+
|
| 17 |
+
// include cub in a safe manner, see:
|
| 18 |
+
// https://github.com/pytorch/pytorch/pull/55292
|
| 19 |
+
#undef CUB_NS_POSTFIX //undef to avoid redefinition warnings
|
| 20 |
+
#undef CUB_NS_PREFIX
|
| 21 |
+
#undef CUB_NS_QUALIFIER
|
| 22 |
+
#define CUB_NS_PREFIX namespace at_cuda_detail {
|
| 23 |
+
#define CUB_NS_POSTFIX }
|
| 24 |
+
#define CUB_NS_QUALIFIER ::at_cuda_detail::cub
|
| 25 |
+
#include <cub/cub.cuh>
|
| 26 |
+
#undef CUB_NS_POSTFIX
|
| 27 |
+
#undef CUB_NS_PREFIX
|
| 28 |
+
#undef CUB_NS_QUALIFIER
|
| 29 |
+
|
| 30 |
+
#endif
|
| 31 |
+
|
| 32 |
+
#include <ATen/cuda/Exceptions.h>
|
| 33 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 34 |
+
#include <c10/cuda/CUDAStream.h>
|
| 35 |
+
|
| 36 |
+
// handle the temporary storage and 'twice' calls for cub API
|
| 37 |
+
#define CUB_WRAPPER(func, ...) do { \
|
| 38 |
+
size_t temp_storage_bytes = 0; \
|
| 39 |
+
func(nullptr, temp_storage_bytes, __VA_ARGS__); \
|
| 40 |
+
auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \
|
| 41 |
+
auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \
|
| 42 |
+
func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \
|
| 43 |
+
AT_CUDA_CHECK(cudaGetLastError()); \
|
| 44 |
+
} while (false)
|
| 45 |
+
|
| 46 |
+
#ifdef USE_ROCM
|
| 47 |
+
#define NO_ROCM(x)
|
| 48 |
+
#define ROCM_HIPCUB(x) ::hipcub
|
| 49 |
+
#else
|
| 50 |
+
#define NO_ROCM(x) x
|
| 51 |
+
#define ROCM_HIPCUB(x) x
|
| 52 |
+
#endif
|
| 53 |
+
|
| 54 |
+
#if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || defined(USE_ROCM)
|
| 55 |
+
|
| 56 |
+
#if !defined(USE_ROCM)
|
| 57 |
+
namespace at_cuda_detail {
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16
|
| 61 |
+
|
| 62 |
+
template <>
|
| 63 |
+
struct ROCM_HIPCUB(cub)::FpLimits<c10::BFloat16>
|
| 64 |
+
{
|
| 65 |
+
static __host__ __device__ __forceinline__ c10::BFloat16 Max() {
|
| 66 |
+
unsigned short max_word = 0x7F7F;
|
| 67 |
+
return reinterpret_cast<c10::BFloat16&>(max_word);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() {
|
| 71 |
+
unsigned short lowest_word = 0xFF7F;
|
| 72 |
+
return reinterpret_cast<c10::BFloat16&>(lowest_word);
|
| 73 |
+
}
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
template <>
|
| 77 |
+
struct ROCM_HIPCUB(cub)::NumericTraits<c10::BFloat16>:
|
| 78 |
+
ROCM_HIPCUB(cub)::BaseTraits<ROCM_HIPCUB(cub)::FLOATING_POINT, true, false, unsigned short, c10::BFloat16> {};
|
| 79 |
+
|
| 80 |
+
#if !defined(USE_ROCM)
|
| 81 |
+
} // namespace at_cuda_detail
|
| 82 |
+
#endif
|
| 83 |
+
|
| 84 |
+
#endif
|
| 85 |
+
|
| 86 |
+
#if !defined(USE_ROCM)
|
| 87 |
+
namespace at::native {
|
| 88 |
+
namespace cub = ::at_cuda_detail::cub;
|
| 89 |
+
} // namespace at::native
|
| 90 |
+
#endif
|
| 91 |
+
|
| 92 |
+
namespace at::cuda::cub {
|
| 93 |
+
|
| 94 |
+
namespace detail {
|
| 95 |
+
|
| 96 |
+
template<typename T>
|
| 97 |
+
struct cuda_type {
|
| 98 |
+
using type = T;
|
| 99 |
+
};
|
| 100 |
+
template<>
|
| 101 |
+
struct cuda_type<c10::Half> {
|
| 102 |
+
using type = __half;
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
#if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16()
|
| 106 |
+
|
| 107 |
+
template<>
|
| 108 |
+
struct cuda_type<c10::BFloat16> {
|
| 109 |
+
using type = __nv_bfloat16;
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
#elif defined(USE_ROCM)
|
| 113 |
+
|
| 114 |
+
template<>
|
| 115 |
+
struct cuda_type<c10::BFloat16> {
|
| 116 |
+
using type = hip_bfloat16;
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
#endif
|
| 120 |
+
|
| 121 |
+
} // namespace detail
|
| 122 |
+
|
| 123 |
+
template<typename key_t, typename value_t, typename OffsetIteratorT>
|
| 124 |
+
inline void segmented_sort_pairs(
|
| 125 |
+
const key_t *keys_in, key_t *keys_out,
|
| 126 |
+
const value_t *values_in, value_t *values_out,
|
| 127 |
+
int64_t num_elements, int64_t num_segments,
|
| 128 |
+
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
|
| 129 |
+
bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8
|
| 130 |
+
) {
|
| 131 |
+
TORCH_CHECK(num_elements <= std::numeric_limits<int>::max(),
|
| 132 |
+
"cub sort does not support sorting more than INT_MAX elements");
|
| 133 |
+
TORCH_CHECK(num_segments <= std::numeric_limits<int>::max(),
|
| 134 |
+
"cub sort does not support sorting more than INT_MAX elements");
|
| 135 |
+
using key_t_ = typename detail::cuda_type<key_t>::type;
|
| 136 |
+
|
| 137 |
+
auto allocator = c10::cuda::CUDACachingAllocator::get();
|
| 138 |
+
c10::DataPtr keys_out_owner;
|
| 139 |
+
|
| 140 |
+
if (keys_out == nullptr) {
|
| 141 |
+
keys_out_owner = allocator->allocate(num_elements * sizeof(key_t));
|
| 142 |
+
keys_out = reinterpret_cast<key_t *>(keys_out_owner.get());
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
const key_t_ *keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
|
| 146 |
+
key_t_ *keys_out_ = reinterpret_cast<key_t_*>(keys_out);
|
| 147 |
+
|
| 148 |
+
if (descending) {
|
| 149 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending,
|
| 150 |
+
keys_in_, keys_out_, values_in, values_out,
|
| 151 |
+
num_elements, num_segments, begin_offsets, end_offsets,
|
| 152 |
+
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
|
| 153 |
+
} else {
|
| 154 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs,
|
| 155 |
+
keys_in_, keys_out_, values_in, values_out,
|
| 156 |
+
num_elements, num_segments, begin_offsets, end_offsets,
|
| 157 |
+
begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
#if CUB_SUPPORTS_UNIQUE_BY_KEY()
|
| 162 |
+
template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT, typename NumSelectedIteratorT>
|
| 163 |
+
inline void unique_by_key(
|
| 164 |
+
KeysInputIteratorT keys_in, ValuesInputIteratorT values_in,
|
| 165 |
+
ValuesOutputIteratorT values_out,
|
| 166 |
+
NumSelectedIteratorT num_selected, int64_t num_input_items)
|
| 167 |
+
{
|
| 168 |
+
// TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed.
|
| 169 |
+
using KeyT = typename std::iterator_traits<KeysInputIteratorT>::value_type;
|
| 170 |
+
auto allocator = c10::cuda::CUDACachingAllocator::get();
|
| 171 |
+
c10::DataPtr keys_out_owner;
|
| 172 |
+
keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT));
|
| 173 |
+
auto keys_out_ = static_cast<KeyT *>(keys_out_owner.get());
|
| 174 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey,
|
| 175 |
+
keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream());
|
| 176 |
+
}
|
| 177 |
+
#endif
|
| 178 |
+
|
| 179 |
+
namespace impl {
|
| 180 |
+
|
| 181 |
+
template<typename InputIteratorT1, typename InputIteratorT2, typename OutputIteratorT, class ScanOpT>
|
| 182 |
+
C10_LAUNCH_BOUNDS_1(1)
|
| 183 |
+
__global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){
|
| 184 |
+
// NOTE: out here not the final scan output, but an intermediate of the accumulation type.
|
| 185 |
+
using acc_t = typename std::iterator_traits<OutputIteratorT>::value_type;
|
| 186 |
+
*out = scan_op(static_cast<acc_t>(*a), static_cast<acc_t>(*b));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
#if !CUB_SUPPORTS_FUTURE_VALUE()
|
| 190 |
+
template<typename ValueT, typename InputIteratorT>
|
| 191 |
+
struct chained_iterator {
|
| 192 |
+
using iterator_category = std::random_access_iterator_tag;
|
| 193 |
+
using difference_type = std::ptrdiff_t;
|
| 194 |
+
using value_type = ValueT;
|
| 195 |
+
using pointer = ValueT*;
|
| 196 |
+
using reference = ValueT&;
|
| 197 |
+
|
| 198 |
+
InputIteratorT iter;
|
| 199 |
+
ValueT *first;
|
| 200 |
+
difference_type offset = 0;
|
| 201 |
+
|
| 202 |
+
__device__ ValueT operator[](difference_type i) {
|
| 203 |
+
i += offset;
|
| 204 |
+
if (i == 0) {
|
| 205 |
+
return *first;
|
| 206 |
+
} else {
|
| 207 |
+
return ValueT(iter[i - 1]);
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
__device__ chained_iterator operator+(difference_type i) {
|
| 211 |
+
return chained_iterator{iter, first, i};
|
| 212 |
+
}
|
| 213 |
+
__device__ ValueT operator*() {
|
| 214 |
+
return (*this)[0];
|
| 215 |
+
}
|
| 216 |
+
};
|
| 217 |
+
#endif
|
| 218 |
+
|
| 219 |
+
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
|
| 220 |
+
// so split at int_max/2
|
| 221 |
+
constexpr int max_cub_size = std::numeric_limits<int>::max() / 2 + 1; // 2**30
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
// non synchronizing cub call
|
| 225 |
+
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
|
| 226 |
+
// so split at int_max/2
|
| 227 |
+
template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, int max_cub_size=impl::max_cub_size>
|
| 228 |
+
inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
|
| 229 |
+
#if defined(USE_ROCM)
|
| 230 |
+
//For ROCm, use hipCUB chained iterators
|
| 231 |
+
CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan,
|
| 232 |
+
input,
|
| 233 |
+
output,
|
| 234 |
+
scan_op,
|
| 235 |
+
num_items,
|
| 236 |
+
at::cuda::getCurrentCUDAStream());
|
| 237 |
+
C10_HIP_KERNEL_LAUNCH_CHECK();
|
| 238 |
+
#else
|
| 239 |
+
// non synchronizing cub call
|
| 240 |
+
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
|
| 241 |
+
// so split at int_max/2
|
| 242 |
+
int size_cub = std::min<int64_t>(num_items, max_cub_size);
|
| 243 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
|
| 244 |
+
input,
|
| 245 |
+
output,
|
| 246 |
+
scan_op,
|
| 247 |
+
size_cub,
|
| 248 |
+
at::cuda::getCurrentCUDAStream());
|
| 249 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
| 250 |
+
using input_t = typename std::iterator_traits<InputIteratorT>::value_type;
|
| 251 |
+
for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
|
| 252 |
+
auto allocator = c10::cuda::CUDACachingAllocator::get();
|
| 253 |
+
c10::DataPtr first_elem = allocator->allocate(sizeof(input_t));
|
| 254 |
+
auto first_elem_ptr = reinterpret_cast<input_t *>(first_elem.get());
|
| 255 |
+
|
| 256 |
+
size_cub = std::min<int64_t>(num_items - i, max_cub_size);
|
| 257 |
+
impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
|
| 258 |
+
output + i - 1,
|
| 259 |
+
input + i,
|
| 260 |
+
first_elem_ptr,
|
| 261 |
+
scan_op);
|
| 262 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
| 263 |
+
#if !CUB_SUPPORTS_FUTURE_VALUE()
|
| 264 |
+
using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator<InputIteratorT>;
|
| 265 |
+
using tuple = typename ArgIndexInputIterator::value_type;
|
| 266 |
+
auto input_iter_transform = [=] __device__ (const tuple &x)->input_t {
|
| 267 |
+
if (x.key == 0) {
|
| 268 |
+
return *first_elem_ptr;
|
| 269 |
+
} else {
|
| 270 |
+
return x.value;
|
| 271 |
+
}
|
| 272 |
+
};
|
| 273 |
+
auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator<input_t, decltype(input_iter_transform), ArgIndexInputIterator>(
|
| 274 |
+
ArgIndexInputIterator(input + i), input_iter_transform);
|
| 275 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
|
| 276 |
+
input_,
|
| 277 |
+
output + i,
|
| 278 |
+
scan_op,
|
| 279 |
+
size_cub,
|
| 280 |
+
at::cuda::getCurrentCUDAStream());
|
| 281 |
+
#else
|
| 282 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
|
| 283 |
+
input + i + 1,
|
| 284 |
+
output + i,
|
| 285 |
+
scan_op,
|
| 286 |
+
::at_cuda_detail::cub::FutureValue<input_t>(first_elem_ptr),
|
| 287 |
+
size_cub,
|
| 288 |
+
at::cuda::getCurrentCUDAStream());
|
| 289 |
+
#endif
|
| 290 |
+
}
|
| 291 |
+
#endif
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
template<typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitValueT, int max_cub_size=impl::max_cub_size>
|
| 295 |
+
inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) {
|
| 296 |
+
#if defined(USE_ROCM)
|
| 297 |
+
//For ROCm, use hipCUB chained iterators
|
| 298 |
+
CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan,
|
| 299 |
+
input,
|
| 300 |
+
output,
|
| 301 |
+
scan_op,
|
| 302 |
+
init_value,
|
| 303 |
+
num_items,
|
| 304 |
+
at::cuda::getCurrentCUDAStream());
|
| 305 |
+
C10_HIP_KERNEL_LAUNCH_CHECK();
|
| 306 |
+
#else
|
| 307 |
+
// non synchronizing cub call
|
| 308 |
+
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
|
| 309 |
+
// so split at int_max/2
|
| 310 |
+
int size_cub = std::min<int64_t>(num_items, max_cub_size);
|
| 311 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
|
| 312 |
+
input,
|
| 313 |
+
output,
|
| 314 |
+
scan_op,
|
| 315 |
+
init_value,
|
| 316 |
+
size_cub,
|
| 317 |
+
at::cuda::getCurrentCUDAStream());
|
| 318 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
| 319 |
+
for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) {
|
| 320 |
+
auto allocator = c10::cuda::CUDACachingAllocator::get();
|
| 321 |
+
c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT));
|
| 322 |
+
auto first_elem_ptr = reinterpret_cast<InitValueT *>(first_elem.get());
|
| 323 |
+
|
| 324 |
+
size_cub = std::min<int64_t>(num_items - i, max_cub_size);
|
| 325 |
+
impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
|
| 326 |
+
output + i - 1,
|
| 327 |
+
input + i - 1,
|
| 328 |
+
first_elem_ptr,
|
| 329 |
+
scan_op);
|
| 330 |
+
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
| 331 |
+
#if !CUB_SUPPORTS_FUTURE_VALUE()
|
| 332 |
+
auto input_ = impl::chained_iterator<InitValueT, InputIteratorT>{
|
| 333 |
+
input + i, first_elem_ptr};
|
| 334 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan,
|
| 335 |
+
input_,
|
| 336 |
+
output + i,
|
| 337 |
+
scan_op,
|
| 338 |
+
size_cub,
|
| 339 |
+
at::cuda::getCurrentCUDAStream());
|
| 340 |
+
#else
|
| 341 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan,
|
| 342 |
+
input + i,
|
| 343 |
+
output + i,
|
| 344 |
+
scan_op,
|
| 345 |
+
::at_cuda_detail::cub::FutureValue<InitValueT>(first_elem_ptr),
|
| 346 |
+
size_cub,
|
| 347 |
+
at::cuda::getCurrentCUDAStream());
|
| 348 |
+
#endif
|
| 349 |
+
}
|
| 350 |
+
#endif
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
#if CUB_SUPPORTS_SCAN_BY_KEY()
|
| 354 |
+
|
| 355 |
+
template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT>
|
| 356 |
+
inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) {
|
| 357 |
+
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
|
| 358 |
+
"cub InclusiveSumByKey does not support more than INT_MAX elements");
|
| 359 |
+
CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey,
|
| 360 |
+
keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename ValuesOutputIteratorT, typename ScanOpT>
|
| 364 |
+
inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) {
|
| 365 |
+
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
|
| 366 |
+
"cub InclusiveSumByKey does not support more than INT_MAX elements");
|
| 367 |
+
CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey,
|
| 368 |
+
keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream());
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
#endif
|
| 372 |
+
|
| 373 |
+
template <typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT>
|
| 374 |
+
void unique(InputIteratorT input, OutputIteratorT output,
|
| 375 |
+
NumSelectedIteratorT num_selected_out, int64_t num_items) {
|
| 376 |
+
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
|
| 377 |
+
"cub unique does not support more than INT_MAX elements");
|
| 378 |
+
CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique,
|
| 379 |
+
input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream());
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
template <typename InputIteratorT, typename OutputIteratorT, typename CountsOutputIteratorT,
|
| 383 |
+
typename LengthOutputIteratorT>
|
| 384 |
+
void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out,
|
| 385 |
+
LengthOutputIteratorT length_out, int64_t num_items) {
|
| 386 |
+
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
|
| 387 |
+
"cub run_length_encode does not support more than INT_MAX elements");
|
| 388 |
+
CUB_WRAPPER(
|
| 389 |
+
NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode,
|
| 390 |
+
input, output, counts_out, length_out, num_items,
|
| 391 |
+
at::cuda::getCurrentCUDAStream());
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
template <typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T>
|
| 395 |
+
void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) {
|
| 396 |
+
TORCH_CHECK(num_items <= std::numeric_limits<int>::max(),
|
| 397 |
+
"cub reduce does not support more than INT_MAX elements");
|
| 398 |
+
CUB_WRAPPER(
|
| 399 |
+
NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce,
|
| 400 |
+
input, output, num_items, op, init,
|
| 401 |
+
at::cuda::getCurrentCUDAStream());
|
| 402 |
+
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
} // namespace at::cuda::cub
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#if !defined(USE_ROCM)
|
| 4 |
+
#include <cuda.h> // for CUDA_VERSION
|
| 5 |
+
#endif
|
| 6 |
+
|
| 7 |
+
#if !defined(USE_ROCM)
|
| 8 |
+
#include <cub/version.cuh>
|
| 9 |
+
#else
|
| 10 |
+
#define CUB_VERSION 0
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
// cub sort support for __nv_bfloat16 is added to cub 1.13 in:
|
| 14 |
+
// https://github.com/NVIDIA/cub/pull/306
|
| 15 |
+
#if CUB_VERSION >= 101300
|
| 16 |
+
#define CUB_SUPPORTS_NV_BFLOAT16() true
|
| 17 |
+
#else
|
| 18 |
+
#define CUB_SUPPORTS_NV_BFLOAT16() false
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
|
| 22 |
+
// https://github.com/NVIDIA/cub/pull/326
|
| 23 |
+
// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
|
| 24 |
+
// starting from CUDA 11.5
|
| 25 |
+
#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
|
| 26 |
+
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
|
| 27 |
+
#else
|
| 28 |
+
#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
// cub support for UniqueByKey is added to cub 1.16 in:
|
| 32 |
+
// https://github.com/NVIDIA/cub/pull/405
|
| 33 |
+
#if CUB_VERSION >= 101600
|
| 34 |
+
#define CUB_SUPPORTS_UNIQUE_BY_KEY() true
|
| 35 |
+
#else
|
| 36 |
+
#define CUB_SUPPORTS_UNIQUE_BY_KEY() false
|
| 37 |
+
#endif
|
| 38 |
+
|
| 39 |
+
// cub support for scan by key is added to cub 1.15
|
| 40 |
+
// in https://github.com/NVIDIA/cub/pull/376
|
| 41 |
+
#if CUB_VERSION >= 101500
|
| 42 |
+
#define CUB_SUPPORTS_SCAN_BY_KEY() 1
|
| 43 |
+
#else
|
| 44 |
+
#define CUB_SUPPORTS_SCAN_BY_KEY() 0
|
| 45 |
+
#endif
|
| 46 |
+
|
| 47 |
+
// cub support for cub::FutureValue is added to cub 1.15 in:
|
| 48 |
+
// https://github.com/NVIDIA/cub/pull/305
|
| 49 |
+
#if CUB_VERSION >= 101500
|
| 50 |
+
#define CUB_SUPPORTS_FUTURE_VALUE() true
|
| 51 |
+
#else
|
| 52 |
+
#define CUB_SUPPORTS_FUTURE_VALUE() false
|
| 53 |
+
#endif
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/jit_macros.h>
|
| 3 |
+
|
| 4 |
+
#if AT_USE_JITERATOR()
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Export.h>
|
| 7 |
+
#include <c10/util/SmallVector.h>
|
| 8 |
+
#include <ATen/core/Tensor.h>
|
| 9 |
+
|
| 10 |
+
#include <string>
|
| 11 |
+
#include <vector>
|
| 12 |
+
|
| 13 |
+
namespace at::cuda {
|
| 14 |
+
|
| 15 |
+
TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
|
| 16 |
+
const std::string& code_string,
|
| 17 |
+
const std::string& kernel_name,
|
| 18 |
+
const int num_outputs,
|
| 19 |
+
const c10::SmallVector<at::Tensor>& tensors,
|
| 20 |
+
const c10::SmallVector<at::Scalar>& extra_args,
|
| 21 |
+
bool return_by_ref);
|
| 22 |
+
|
| 23 |
+
} // namespace at::cuda
|
| 24 |
+
|
| 25 |
+
#else
|
| 26 |
+
|
| 27 |
+
namespace at::cuda {
|
| 28 |
+
|
| 29 |
+
TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel(
|
| 30 |
+
const std::string& code_string,
|
| 31 |
+
const std::string& kernel_name,
|
| 32 |
+
const int num_outputs,
|
| 33 |
+
const c10::SmallVector<at::Tensor>& tensors,
|
| 34 |
+
const c10::SmallVector<at::Scalar>& extra_args,
|
| 35 |
+
bool return_by_ref) {
|
| 36 |
+
TORCH_CHECK(false, "Jiterator is not supported");
|
| 37 |
+
}
|
| 38 |
+
} // namespace at::cuda
|
| 39 |
+
|
| 40 |
+
#endif // AT_USE_JITERATOR()
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmRocblas.h
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Microsoft Corporation. All rights reserved.
|
| 2 |
+
// Licensed under the MIT License.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 7 |
+
#include <ATen/cuda/tunable/TunableOp.h>
|
| 8 |
+
#include <ATen/cuda/tunable/GemmCommon.h>
|
| 9 |
+
#include <c10/util/StringUtil.h>
|
| 10 |
+
|
| 11 |
+
#define ROCBLAS_BETA_FEATURES_API
|
| 12 |
+
#include <rocblas/rocblas.h>
|
| 13 |
+
|
| 14 |
+
#define TORCH_ROCBLAS_CHECK(EXPR) \
|
| 15 |
+
do { \
|
| 16 |
+
rocblas_status __err = EXPR; \
|
| 17 |
+
TORCH_CHECK(__err == rocblas_status_success, \
|
| 18 |
+
"rocblas error: ", \
|
| 19 |
+
rocblas_status_to_string(__err), \
|
| 20 |
+
" when calling `" #EXPR "`"); \
|
| 21 |
+
} while (0)
|
| 22 |
+
|
| 23 |
+
namespace at::cuda::tunable {
|
| 24 |
+
|
| 25 |
+
template <typename T>
|
| 26 |
+
constexpr rocblas_datatype RocBlasDataTypeFor();
|
| 27 |
+
|
| 28 |
+
template <>
|
| 29 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<float>() {
|
| 30 |
+
return rocblas_datatype_f32_r;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
template <>
|
| 34 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<double>() {
|
| 35 |
+
return rocblas_datatype_f64_r;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<Half>() {
|
| 40 |
+
return rocblas_datatype_f16_r;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<BFloat16>() {
|
| 45 |
+
return rocblas_datatype_bf16_r;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <>
|
| 49 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<c10::complex<float>>() {
|
| 50 |
+
return rocblas_datatype_f32_c;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
template <>
|
| 54 |
+
constexpr rocblas_datatype RocBlasDataTypeFor<c10::complex<double>>() {
|
| 55 |
+
return rocblas_datatype_f64_c;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
template <typename T>
|
| 59 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor();
|
| 60 |
+
|
| 61 |
+
template <>
|
| 62 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<float>() {
|
| 63 |
+
return rocblas_datatype_f32_r;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<double>() {
|
| 68 |
+
return rocblas_datatype_f64_r;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template <>
|
| 72 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<Half>() {
|
| 73 |
+
// Note that we're returning the _compute_ type for a given datatype.
|
| 74 |
+
// As of 12/2022, using compute type FP16 for 16-bit floats was much
|
| 75 |
+
// slower than using compute type FP32. So we use FP32 compute even for
|
| 76 |
+
// FP16 datatypes. This is how GEMM is implemented even in the function
|
| 77 |
+
// rocblasGemmHelper (see fpgeneric.h)
|
| 78 |
+
return rocblas_datatype_f32_r;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template <>
|
| 82 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<BFloat16>() {
|
| 83 |
+
// Note that we're returning the _compute_ type for a given datatype.
|
| 84 |
+
// As of 12/2022, using compute type FP16 for 16-bit floats was much
|
| 85 |
+
// slower than using compute type FP32. So we use FP32 compute even for
|
| 86 |
+
// BF16 datatypes. This is how GEMM is implemented even in the function
|
| 87 |
+
// rocblasGemmHelper (see fpgeneric.h)
|
| 88 |
+
return rocblas_datatype_f32_r;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
template <>
|
| 92 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<c10::complex<float>>() {
|
| 93 |
+
return rocblas_datatype_f32_c;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template <>
|
| 97 |
+
constexpr rocblas_datatype RocBlasComputeTypeFor<c10::complex<double>>() {
|
| 98 |
+
return rocblas_datatype_f64_c;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
template <typename T>
|
| 102 |
+
auto DoCastForHalfOrBfloat16(const T fp) {
|
| 103 |
+
return fp;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
template <>
|
| 107 |
+
inline auto DoCastForHalfOrBfloat16<Half>(const Half fp) {
|
| 108 |
+
// alpha and beta should be the same as compute_type, in Half case it is float.
|
| 109 |
+
float h = fp;
|
| 110 |
+
return h;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
template <>
|
| 114 |
+
inline auto DoCastForHalfOrBfloat16<BFloat16>(const BFloat16 fp) {
|
| 115 |
+
// alpha and beta should be the same as compute_type, in bfloat16 case it is float.
|
| 116 |
+
float h = fp;
|
| 117 |
+
return h;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
static rocblas_operation _rocblasOpFromChar(char op) {
|
| 121 |
+
switch (op) {
|
| 122 |
+
case 'n':
|
| 123 |
+
case 'N':
|
| 124 |
+
return rocblas_operation_none;
|
| 125 |
+
case 't':
|
| 126 |
+
case 'T':
|
| 127 |
+
return rocblas_operation_transpose;
|
| 128 |
+
case 'c':
|
| 129 |
+
case 'C':
|
| 130 |
+
return rocblas_operation_conjugate_transpose;
|
| 131 |
+
}
|
| 132 |
+
AT_ERROR(
|
| 133 |
+
"_rocblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
template <typename T>
|
| 137 |
+
class RocblasGemmOp : public Callable<GemmParams<T>> {
|
| 138 |
+
public:
|
| 139 |
+
RocblasGemmOp(int solution) : solution_{solution} {}
|
| 140 |
+
|
| 141 |
+
TuningStatus Call(const GemmParams<T>* params) override {
|
| 142 |
+
auto input_output_type = RocBlasDataTypeFor<T>();
|
| 143 |
+
auto compute_type = RocBlasComputeTypeFor<T>();
|
| 144 |
+
auto h_a = DoCastForHalfOrBfloat16(params->alpha);
|
| 145 |
+
auto h_b = DoCastForHalfOrBfloat16(params->beta);
|
| 146 |
+
auto status = rocblas_gemm_ex(
|
| 147 |
+
(rocblas_handle)at::cuda::getCurrentCUDABlasHandle(),
|
| 148 |
+
_rocblasOpFromChar(params->transa),
|
| 149 |
+
_rocblasOpFromChar(params->transb),
|
| 150 |
+
params->m, params->n, params->k,
|
| 151 |
+
&h_a,
|
| 152 |
+
params->a, input_output_type, params->lda,
|
| 153 |
+
params->b, input_output_type, params->ldb,
|
| 154 |
+
&h_b,
|
| 155 |
+
params->c, input_output_type, params->ldc,
|
| 156 |
+
params->c, input_output_type, params->ldc,
|
| 157 |
+
compute_type,
|
| 158 |
+
rocblas_gemm_algo_solution_index,
|
| 159 |
+
solution_,
|
| 160 |
+
rocblas_gemm_flags_none);
|
| 161 |
+
if (status != rocblas_status_success) {
|
| 162 |
+
return FAIL;
|
| 163 |
+
}
|
| 164 |
+
return OK;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
private:
|
| 168 |
+
int solution_;
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
template <typename T>
|
| 172 |
+
auto GetRocBlasGemmTypeStringAndOps() {
|
| 173 |
+
rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle();
|
| 174 |
+
int solution_size;
|
| 175 |
+
auto input_output_type = RocBlasDataTypeFor<T>();
|
| 176 |
+
auto compute_type = RocBlasComputeTypeFor<T>();
|
| 177 |
+
// Get the number of available solutions
|
| 178 |
+
TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
|
| 179 |
+
input_output_type,
|
| 180 |
+
input_output_type,
|
| 181 |
+
compute_type,
|
| 182 |
+
rocblas_gemm_flags_none,
|
| 183 |
+
nullptr,
|
| 184 |
+
&solution_size));
|
| 185 |
+
std::vector<int> solutions(solution_size);
|
| 186 |
+
// Get the list of available solutions
|
| 187 |
+
TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
|
| 188 |
+
input_output_type,
|
| 189 |
+
input_output_type,
|
| 190 |
+
compute_type,
|
| 191 |
+
rocblas_gemm_flags_none,
|
| 192 |
+
solutions.data(),
|
| 193 |
+
&solution_size));
|
| 194 |
+
// Sort the solutions in ascending order to make the solution vector deterministic across runs
|
| 195 |
+
std::sort(solutions.begin(), solutions.end());
|
| 196 |
+
|
| 197 |
+
std::vector<std::pair<std::string, std::unique_ptr<Callable<GemmParams<T>>>>> ret;
|
| 198 |
+
for (size_t i = 0; i < solutions.size(); ++i) {
|
| 199 |
+
auto callable = std::make_unique<RocblasGemmOp<T>>(solutions[i]);
|
| 200 |
+
ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable)));
|
| 201 |
+
}
|
| 202 |
+
return ret;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
template <typename T>
|
| 206 |
+
class RocblasGemmStridedBatchedOp : public Callable<GemmStridedBatchedParams<T>> {
|
| 207 |
+
public:
|
| 208 |
+
RocblasGemmStridedBatchedOp(int solution) : solution_{solution} {}
|
| 209 |
+
|
| 210 |
+
TuningStatus Call(const GemmStridedBatchedParams<T>* params) override {
|
| 211 |
+
auto input_output_type = RocBlasDataTypeFor<T>();
|
| 212 |
+
auto compute_type = RocBlasComputeTypeFor<T>();
|
| 213 |
+
auto h_a = DoCastForHalfOrBfloat16(params->alpha);
|
| 214 |
+
auto h_b = DoCastForHalfOrBfloat16(params->beta);
|
| 215 |
+
auto status = rocblas_gemm_strided_batched_ex(
|
| 216 |
+
(rocblas_handle)at::cuda::getCurrentCUDABlasHandle(),
|
| 217 |
+
_rocblasOpFromChar(params->transa),
|
| 218 |
+
_rocblasOpFromChar(params->transb),
|
| 219 |
+
params->m, params->n, params->k,
|
| 220 |
+
&h_a,
|
| 221 |
+
params->a, input_output_type, params->lda, params->stride_a,
|
| 222 |
+
params->b, input_output_type, params->ldb, params->stride_b,
|
| 223 |
+
&h_b,
|
| 224 |
+
params->c, input_output_type, params->ldc, params->stride_c,
|
| 225 |
+
params->c, input_output_type, params->ldc, params->stride_c,
|
| 226 |
+
params->batch,
|
| 227 |
+
compute_type,
|
| 228 |
+
rocblas_gemm_algo_solution_index,
|
| 229 |
+
solution_,
|
| 230 |
+
rocblas_gemm_flags_none);
|
| 231 |
+
if (status != rocblas_status_success) {
|
| 232 |
+
return FAIL;
|
| 233 |
+
}
|
| 234 |
+
return OK;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
private:
|
| 238 |
+
int solution_;
|
| 239 |
+
};
|
| 240 |
+
|
| 241 |
+
template <typename T>
|
| 242 |
+
auto GetRocBlasGemmStridedBatchedTypeStringAndOps() {
|
| 243 |
+
rocblas_handle handle = (rocblas_handle)at::cuda::getCurrentCUDABlasHandle();
|
| 244 |
+
int solution_size;
|
| 245 |
+
auto input_output_type = RocBlasDataTypeFor<T>();
|
| 246 |
+
auto compute_type = RocBlasComputeTypeFor<T>();
|
| 247 |
+
// Get the number of available solutions
|
| 248 |
+
TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
|
| 249 |
+
input_output_type,
|
| 250 |
+
input_output_type,
|
| 251 |
+
compute_type,
|
| 252 |
+
rocblas_gemm_flags_none,
|
| 253 |
+
nullptr,
|
| 254 |
+
&solution_size));
|
| 255 |
+
std::vector<int> solutions(solution_size);
|
| 256 |
+
// Get the list of available solutions
|
| 257 |
+
TORCH_ROCBLAS_CHECK(rocblas_gemm_ex_get_solutions_by_type(handle,
|
| 258 |
+
input_output_type,
|
| 259 |
+
input_output_type,
|
| 260 |
+
compute_type,
|
| 261 |
+
rocblas_gemm_flags_none,
|
| 262 |
+
solutions.data(),
|
| 263 |
+
&solution_size));
|
| 264 |
+
// Sort the solutions in ascending order to make the solution vector deterministic across runs
|
| 265 |
+
std::sort(solutions.begin(), solutions.end());
|
| 266 |
+
|
| 267 |
+
std::vector<std::pair<std::string, std::unique_ptr<Callable<GemmStridedBatchedParams<T>>>>> ret;
|
| 268 |
+
for (size_t i = 0; i < solutions.size(); ++i) {
|
| 269 |
+
auto callable = std::make_unique<RocblasGemmStridedBatchedOp<T>>(solutions[i]);
|
| 270 |
+
ret.emplace_back(std::make_pair(c10::str("Gemm_Rocblas_", solutions[i]), std::move(callable)));
|
| 271 |
+
}
|
| 272 |
+
return ret;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
} // namespace at::cuda::tunable
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/StreamTimer.h
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Original TunableOp is from onnxruntime.
|
| 2 |
+
// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
|
| 3 |
+
// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
|
| 4 |
+
// Copyright (c) Microsoft Corporation.
|
| 5 |
+
// Licensed under the MIT license.
|
| 6 |
+
//
|
| 7 |
+
// Adapting TunableOp into PyTorch
|
| 8 |
+
// Copyright (c) Advanced Micro Devices, Inc.
|
| 9 |
+
//
|
| 10 |
+
#pragma once
|
| 11 |
+
|
| 12 |
+
#include <cuda_runtime.h>
|
| 13 |
+
|
| 14 |
+
#include <ATen/cuda/tunable/Tunable.h>
|
| 15 |
+
|
| 16 |
+
namespace at::cuda::tunable {
|
| 17 |
+
|
| 18 |
+
class StreamTimer : public ITimer {
|
| 19 |
+
public:
|
| 20 |
+
StreamTimer();
|
| 21 |
+
virtual ~StreamTimer() override;
|
| 22 |
+
|
| 23 |
+
void Start() override;
|
| 24 |
+
|
| 25 |
+
void End() override;
|
| 26 |
+
|
| 27 |
+
float Duration() override;
|
| 28 |
+
|
| 29 |
+
private:
|
| 30 |
+
cudaEvent_t start_;
|
| 31 |
+
cudaEvent_t end_;
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
} // namespace at::cuda::tunable
|
vllm/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Original TunableOp is from onnxruntime.
|
| 2 |
+
// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h
|
| 3 |
+
// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable
|
| 4 |
+
// Copyright (c) Microsoft Corporation.
|
| 5 |
+
// Licensed under the MIT license.
|
| 6 |
+
//
|
| 7 |
+
// Adapting TunableOp into PyTorch
|
| 8 |
+
// Copyright (c) Advanced Micro Devices, Inc.
|
| 9 |
+
//
|
| 10 |
+
#pragma once
|
| 11 |
+
|
| 12 |
+
#include <ATen/cuda/tunable/GemmCommon.h>
|
| 13 |
+
#ifdef USE_ROCM
|
| 14 |
+
#include <ATen/cuda/tunable/GemmHipblaslt.h>
|
| 15 |
+
#include <ATen/cuda/tunable/GemmRocblas.h>
|
| 16 |
+
#endif
|
| 17 |
+
#include <ATen/cuda/tunable/StreamTimer.h>
|
| 18 |
+
#include <ATen/cuda/tunable/TunableOp.h>
|
| 19 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 20 |
+
#include <c10/util/Float8_e4m3fn.h>
|
| 21 |
+
#include <c10/util/Float8_e4m3fnuz.h>
|
| 22 |
+
#include <c10/util/Float8_e5m2.h>
|
| 23 |
+
#include <c10/util/Float8_e5m2fnuz.h>
|
| 24 |
+
#include <c10/util/StringUtil.h>
|
| 25 |
+
|
| 26 |
+
namespace at::cuda::tunable {
|
| 27 |
+
|
| 28 |
+
template <typename T>
|
| 29 |
+
class DefaultGemmOp : public Callable<GemmParams<T>> {
|
| 30 |
+
public:
|
| 31 |
+
TuningStatus Call(const GemmParams<T>* params) override {
|
| 32 |
+
at::cuda::blas::gemm_internal<T>(
|
| 33 |
+
params->transa, params->transb,
|
| 34 |
+
params->m, params->n, params->k,
|
| 35 |
+
params->alpha,
|
| 36 |
+
params->a, params->lda,
|
| 37 |
+
params->b, params->ldb,
|
| 38 |
+
params->beta,
|
| 39 |
+
params->c, params->ldc);
|
| 40 |
+
return OK;
|
| 41 |
+
}
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
static bool _transposeBoolFromChar(char op) {
|
| 45 |
+
return op == 't' || op == 'T';
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename T>
|
| 49 |
+
class DefaultGemmAndBiasOp : public Callable<GemmAndBiasParams<T>> {
|
| 50 |
+
public:
|
| 51 |
+
TuningStatus Call(const GemmAndBiasParams<T>* params) override {
|
| 52 |
+
at::cuda::blas::gemm_and_bias<T>(
|
| 53 |
+
_transposeBoolFromChar(params->transa),
|
| 54 |
+
_transposeBoolFromChar(params->transb),
|
| 55 |
+
params->m, params->n, params->k,
|
| 56 |
+
params->alpha,
|
| 57 |
+
params->a, params->lda,
|
| 58 |
+
params->b, params->ldb,
|
| 59 |
+
params->bias,
|
| 60 |
+
params->c, params->ldc,
|
| 61 |
+
params->activation);
|
| 62 |
+
return OK;
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <typename T>
|
| 67 |
+
class DefaultGemmStridedBatchedOp : public Callable<GemmStridedBatchedParams<T>> {
|
| 68 |
+
public:
|
| 69 |
+
TuningStatus Call(const GemmStridedBatchedParams<T>* params) override {
|
| 70 |
+
at::cuda::blas::bgemm_internal<T>(
|
| 71 |
+
params->transa, params->transb,
|
| 72 |
+
params->m, params->n, params->k,
|
| 73 |
+
params->alpha,
|
| 74 |
+
params->a, params->lda, params->stride_a,
|
| 75 |
+
params->b, params->ldb, params->stride_b,
|
| 76 |
+
params->beta,
|
| 77 |
+
params->c, params->ldc, params->stride_c,
|
| 78 |
+
params->batch);
|
| 79 |
+
return OK;
|
| 80 |
+
}
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
template <typename T>
|
| 84 |
+
class DefaultScaledGemmOp : public Callable<ScaledGemmParams<T>> {
|
| 85 |
+
public:
|
| 86 |
+
TuningStatus Call(const ScaledGemmParams<T>* params) override {
|
| 87 |
+
at::cuda::blas::scaled_gemm(
|
| 88 |
+
params->transa,
|
| 89 |
+
params->transb,
|
| 90 |
+
params->m,
|
| 91 |
+
params->n,
|
| 92 |
+
params->k,
|
| 93 |
+
params->a,
|
| 94 |
+
params->a_scale_ptr,
|
| 95 |
+
params->lda,
|
| 96 |
+
params->a_dtype,
|
| 97 |
+
params->b,
|
| 98 |
+
params->b_scale_ptr,
|
| 99 |
+
params->ldb,
|
| 100 |
+
params->b_dtype,
|
| 101 |
+
params->bias_ptr,
|
| 102 |
+
params->bias_dtype,
|
| 103 |
+
params->c,
|
| 104 |
+
params->c_scale_ptr,
|
| 105 |
+
params->ldc,
|
| 106 |
+
params->c_dtype,
|
| 107 |
+
params->amax_ptr,
|
| 108 |
+
params->use_fast_accum);
|
| 109 |
+
return OK;
|
| 110 |
+
}
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
template <typename T>
|
| 114 |
+
inline bool IsZero(T v) {
|
| 115 |
+
return v == 0.0f;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
template <>
|
| 119 |
+
inline bool IsZero(BFloat16 v) {
|
| 120 |
+
return v.x == 0;
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <>
|
| 124 |
+
inline bool IsZero(Half v) {
|
| 125 |
+
return float(v) == 0.0f;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template <>
|
| 129 |
+
inline bool IsZero(c10::complex<double> v) {
|
| 130 |
+
return v == 0.0;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
template <>
|
| 134 |
+
inline bool IsZero(c10::complex<float> v) {
|
| 135 |
+
return v == 0.0f;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <typename T>
|
| 139 |
+
inline std::string TypeName(T v) {
|
| 140 |
+
return "unknown";
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
template <>
|
| 144 |
+
inline std::string TypeName(float v) {
|
| 145 |
+
return "float";
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
template <>
|
| 149 |
+
inline std::string TypeName(double v) {
|
| 150 |
+
return "double";
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template <>
|
| 154 |
+
inline std::string TypeName(BFloat16 v) {
|
| 155 |
+
return "BFloat16";
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
template <>
|
| 159 |
+
inline std::string TypeName(Half v) {
|
| 160 |
+
return "Half";
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
template <>
|
| 164 |
+
inline std::string TypeName(Float8_e4m3fn v) {
|
| 165 |
+
return "Float8_e4m3fn";
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
template <>
|
| 169 |
+
inline std::string TypeName(Float8_e5m2 v) {
|
| 170 |
+
return "Float8_e5m2";
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
template <>
|
| 174 |
+
inline std::string TypeName(Float8_e4m3fnuz v) {
|
| 175 |
+
return "Float8_e4m3fnuz";
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
template <>
|
| 179 |
+
inline std::string TypeName(Float8_e5m2fnuz v) {
|
| 180 |
+
return "Float8_e5m2fnuz";
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
template <>
|
| 184 |
+
inline std::string TypeName(c10::complex<double> v) {
|
| 185 |
+
return "c10::complex<double>";
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
template <>
|
| 189 |
+
inline std::string TypeName(c10::complex<float> v) {
|
| 190 |
+
return "c10::complex<float>";
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <typename T, BlasOp ALayout, BlasOp BLayout>
|
| 194 |
+
class GemmTunableOp : public TunableOp<GemmParams<T>, StreamTimer> {
|
| 195 |
+
public:
|
| 196 |
+
GemmTunableOp() {
|
| 197 |
+
this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmOp<T>>());
|
| 198 |
+
|
| 199 |
+
#ifdef USE_ROCM
|
| 200 |
+
static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED");
|
| 201 |
+
if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) {
|
| 202 |
+
for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps<T>()) {
|
| 203 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
|
| 208 |
+
if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) {
|
| 209 |
+
// disallow tuning of hipblaslt with c10::complex
|
| 210 |
+
if constexpr (
|
| 211 |
+
!std::is_same_v<T, c10::complex<float>> &&
|
| 212 |
+
!std::is_same_v<T, c10::complex<double>>) {
|
| 213 |
+
for (auto&& [name, op] : GetHipBlasLtGemmTypeStringAndOps<T, ALayout, BLayout>()) {
|
| 214 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
#endif
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
std::string Signature() override {
|
| 222 |
+
return c10::str("GemmTunableOp_", TypeName<T>(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout));
|
| 223 |
+
}
|
| 224 |
+
};
|
| 225 |
+
|
| 226 |
+
template <typename T, BlasOp ALayout, BlasOp BLayout>
|
| 227 |
+
class GemmAndBiasTunableOp : public TunableOp<GemmAndBiasParams<T>, StreamTimer> {
|
| 228 |
+
public:
|
| 229 |
+
GemmAndBiasTunableOp() {
|
| 230 |
+
this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmAndBiasOp<T>>());
|
| 231 |
+
|
| 232 |
+
#ifdef USE_ROCM
|
| 233 |
+
static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
|
| 234 |
+
if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) {
|
| 235 |
+
// disallow tuning of hipblaslt with c10::complex
|
| 236 |
+
if constexpr (
|
| 237 |
+
!std::is_same_v<T, c10::complex<float>> &&
|
| 238 |
+
!std::is_same_v<T, c10::complex<double>>) {
|
| 239 |
+
for (auto&& [name, op] : GetHipBlasLtGemmAndBiasTypeStringAndOps<T, ALayout, BLayout>()) {
|
| 240 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 241 |
+
}
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
#endif
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
std::string Signature() override {
|
| 248 |
+
return c10::str("GemmAndBiasTunableOp_", TypeName<T>(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout));
|
| 249 |
+
}
|
| 250 |
+
};
|
| 251 |
+
|
| 252 |
+
template <typename T, BlasOp ALayout, BlasOp BLayout>
|
| 253 |
+
class GemmStridedBatchedTunableOp : public TunableOp<GemmStridedBatchedParams<T>, StreamTimer> {
|
| 254 |
+
public:
|
| 255 |
+
GemmStridedBatchedTunableOp() {
|
| 256 |
+
this->RegisterOp(std::string("Default"), std::make_unique<DefaultGemmStridedBatchedOp<T>>());
|
| 257 |
+
|
| 258 |
+
#ifdef USE_ROCM
|
| 259 |
+
static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED");
|
| 260 |
+
if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) {
|
| 261 |
+
for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps<T>()) {
|
| 262 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
|
| 267 |
+
if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) {
|
| 268 |
+
// disallow tuning of hipblaslt with c10::complex
|
| 269 |
+
if constexpr (
|
| 270 |
+
!std::is_same_v<T, c10::complex<float>> &&
|
| 271 |
+
!std::is_same_v<T, c10::complex<double>>) {
|
| 272 |
+
for (auto&& [name, op] : GetHipBlasLtGemmStridedBatchedTypeStringAndOps<T, ALayout, BLayout>()) {
|
| 273 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
#endif
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
std::string Signature() override {
|
| 281 |
+
return c10::str("GemmStridedBatchedTunableOp_", TypeName<T>(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout));
|
| 282 |
+
}
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
template <typename AT, typename BT, typename CT, BlasOp ALayout, BlasOp BLayout>
|
| 286 |
+
class ScaledGemmTunableOp : public TunableOp<ScaledGemmParams<CT>, StreamTimer> {
|
| 287 |
+
public:
|
| 288 |
+
ScaledGemmTunableOp() {
|
| 289 |
+
this->RegisterOp(std::string("Default"), std::make_unique<DefaultScaledGemmOp<CT>>());
|
| 290 |
+
|
| 291 |
+
#ifdef USE_ROCM
|
| 292 |
+
for (auto&& [name, op] : GetHipBlasLtScaledGemmTypeStringAndOps<AT, BT, CT, ALayout, BLayout>()) {
|
| 293 |
+
this->RegisterOp(std::move(name), std::move(op));
|
| 294 |
+
}
|
| 295 |
+
#endif
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
std::string Signature() override {
|
| 299 |
+
return c10::str("ScaledGemmTunableOp",
|
| 300 |
+
"_", TypeName<AT>(AT{}),
|
| 301 |
+
"_", TypeName<BT>(BT{}),
|
| 302 |
+
"_", TypeName<CT>(CT{}),
|
| 303 |
+
"_", BlasOpToString(ALayout), BlasOpToString(BLayout));
|
| 304 |
+
}
|
| 305 |
+
};
|
| 306 |
+
|
| 307 |
+
} // namespace at::cuda::tunable
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/miopen/Exceptions.h>
|
| 4 |
+
|
| 5 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
|
| 9 |
+
namespace at { namespace native {
|
| 10 |
+
|
| 11 |
+
inline int dataSize(miopenDataType_t dataType)
|
| 12 |
+
{
|
| 13 |
+
switch (dataType) {
|
| 14 |
+
case miopenHalf: return 2;
|
| 15 |
+
case miopenFloat: return 4;
|
| 16 |
+
case miopenBFloat16: return 2;
|
| 17 |
+
default: return 8;
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
template <typename T, miopenStatus_t (*dtor)(T*)>
|
| 22 |
+
struct DescriptorDeleter {
|
| 23 |
+
void operator()(T* x) {
|
| 24 |
+
if (x != nullptr) {
|
| 25 |
+
MIOPEN_CHECK(dtor(x));
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
// A generic class for wrapping MIOpen descriptor types. All you need
|
| 31 |
+
// is to give the underlying type the Descriptor_t points to (usually,
|
| 32 |
+
// if it's miopenTensorDescriptor_t it points to miopenTensorStruct),
|
| 33 |
+
// the constructor and the destructor. Subclasses are responsible
|
| 34 |
+
// for defining a set() function to actually set the descriptor.
|
| 35 |
+
//
|
| 36 |
+
// Descriptors default construct to a nullptr, and have a descriptor
|
| 37 |
+
// initialized the first time you call set() or any other initializing
|
| 38 |
+
// function.
|
| 39 |
+
template <typename T, miopenStatus_t (*ctor)(T**), miopenStatus_t (*dtor)(T*)>
|
| 40 |
+
class Descriptor
|
| 41 |
+
{
|
| 42 |
+
public:
|
| 43 |
+
// Use desc() to access the underlying descriptor pointer in
|
| 44 |
+
// a read-only fashion. Most client code should use this.
|
| 45 |
+
// If the descriptor was never initialized, this will return
|
| 46 |
+
// nullptr.
|
| 47 |
+
T* desc() const { return desc_.get(); }
|
| 48 |
+
T* desc() { return desc_.get(); }
|
| 49 |
+
|
| 50 |
+
// Use mut_desc() to access the underlying descriptor pointer
|
| 51 |
+
// if you intend to modify what it points to (e.g., using
|
| 52 |
+
// miopenSetFooDescriptor). This will ensure that the descriptor
|
| 53 |
+
// is initialized. Code in this file will use this function.
|
| 54 |
+
T* mut_desc() { init(); return desc_.get(); }
|
| 55 |
+
protected:
|
| 56 |
+
void init() {
|
| 57 |
+
if (desc_ == nullptr) {
|
| 58 |
+
T* raw_desc;
|
| 59 |
+
MIOPEN_CHECK(ctor(&raw_desc));
|
| 60 |
+
desc_.reset(raw_desc);
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
private:
|
| 64 |
+
std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
class TensorDescriptor
|
| 68 |
+
: public Descriptor<miopenTensorDescriptor,
|
| 69 |
+
&miopenCreateTensorDescriptor,
|
| 70 |
+
&miopenDestroyTensorDescriptor>
|
| 71 |
+
{
|
| 72 |
+
public:
|
| 73 |
+
TensorDescriptor() {}
|
| 74 |
+
explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
|
| 75 |
+
set(t, pad);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
void set(const at::Tensor &t, size_t pad = 0);
|
| 79 |
+
void set(miopenDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
| 80 |
+
|
| 81 |
+
void print();
|
| 82 |
+
|
| 83 |
+
private:
|
| 84 |
+
void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
|
| 85 |
+
MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
|
| 86 |
+
}
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
|
| 90 |
+
|
| 91 |
+
class FilterDescriptor
|
| 92 |
+
: public Descriptor<miopenTensorDescriptor,
|
| 93 |
+
&miopenCreateTensorDescriptor,
|
| 94 |
+
&miopenDestroyTensorDescriptor>
|
| 95 |
+
{
|
| 96 |
+
public:
|
| 97 |
+
void set(const at::Tensor &t, int64_t pad = 0) {
|
| 98 |
+
set(t, at::MemoryFormat::Contiguous, pad);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
|
| 102 |
+
|
| 103 |
+
private:
|
| 104 |
+
void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
|
| 105 |
+
MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
|
| 106 |
+
}
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
struct ConvolutionDescriptor
|
| 110 |
+
: public Descriptor<miopenConvolutionDescriptor,
|
| 111 |
+
&miopenCreateConvolutionDescriptor,
|
| 112 |
+
&miopenDestroyConvolutionDescriptor>
|
| 113 |
+
{
|
| 114 |
+
void set(miopenDataType_t dataType, miopenConvolutionMode_t c_mode, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool deterministic) {
|
| 115 |
+
MIOPEN_CHECK(miopenInitConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, c_mode));
|
| 116 |
+
MIOPEN_CHECK(miopenSetConvolutionGroupCount(mut_desc(), groups));
|
| 117 |
+
MIOPEN_CHECK(miopenSetConvolutionAttribute(mut_desc(), MIOPEN_CONVOLUTION_ATTRIB_DETERMINISTIC, deterministic ? 1 : 0));
|
| 118 |
+
}
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
struct RNNDescriptor
|
| 123 |
+
: public Descriptor<miopenRNNDescriptor,
|
| 124 |
+
&miopenCreateRNNDescriptor,
|
| 125 |
+
&miopenDestroyRNNDescriptor>
|
| 126 |
+
{
|
| 127 |
+
void set(int64_t hidden_size, int64_t num_layers, miopenRNNInputMode_t input_mode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnn_mode,
|
| 128 |
+
miopenRNNBiasMode_t bias_mode, miopenRNNAlgo_t algorithm, miopenDataType_t datatype) {
|
| 129 |
+
MIOPEN_CHECK(miopenSetRNNDescriptor(mut_desc(), hidden_size, num_layers, input_mode, direction, rnn_mode, bias_mode, algorithm, datatype));
|
| 130 |
+
}
|
| 131 |
+
};
|
| 132 |
+
|
| 133 |
+
union Constant
|
| 134 |
+
{
|
| 135 |
+
float f;
|
| 136 |
+
double d;
|
| 137 |
+
Constant(miopenDataType_t dataType, double value) {
|
| 138 |
+
if (dataType == miopenHalf || dataType == miopenFloat || dataType == miopenBFloat16) {
|
| 139 |
+
f = static_cast<float>(value);
|
| 140 |
+
} else {
|
| 141 |
+
d = value;
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
};
|
| 145 |
+
|
| 146 |
+
}} // namespace
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <stdexcept>
|
| 6 |
+
#include <sstream>
|
| 7 |
+
|
| 8 |
+
namespace at { namespace native {
|
| 9 |
+
|
| 10 |
+
class miopen_exception : public std::runtime_error {
|
| 11 |
+
public:
|
| 12 |
+
miopenStatus_t status;
|
| 13 |
+
miopen_exception(miopenStatus_t status, const char* msg)
|
| 14 |
+
: std::runtime_error(msg)
|
| 15 |
+
, status(status) {}
|
| 16 |
+
miopen_exception(miopenStatus_t status, const std::string& msg)
|
| 17 |
+
: std::runtime_error(msg)
|
| 18 |
+
, status(status) {}
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
inline void MIOPEN_CHECK(miopenStatus_t status)
|
| 22 |
+
{
|
| 23 |
+
if (status != miopenStatusSuccess) {
|
| 24 |
+
if (status == miopenStatusNotImplemented) {
|
| 25 |
+
throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
|
| 26 |
+
". This error may appear if you passed in a non-contiguous input.");
|
| 27 |
+
}
|
| 28 |
+
throw miopen_exception(status, miopenGetErrorString(status));
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
inline void HIP_CHECK(hipError_t error)
|
| 33 |
+
{
|
| 34 |
+
if (error != hipSuccess) {
|
| 35 |
+
std::string msg("HIP error: ");
|
| 36 |
+
msg += hipGetErrorString(error);
|
| 37 |
+
throw std::runtime_error(msg);
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
}} // namespace at::native
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
| 4 |
+
|
| 5 |
+
namespace at { namespace native {
|
| 6 |
+
|
| 7 |
+
miopenHandle_t getMiopenHandle();
|
| 8 |
+
|
| 9 |
+
}} // namespace
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
| 4 |
+
#include <ATen/Tensor.h>
|
| 5 |
+
|
| 6 |
+
namespace at { namespace native {
|
| 7 |
+
|
| 8 |
+
miopenDataType_t getMiopenDataType(const at::Tensor& tensor);
|
| 9 |
+
|
| 10 |
+
int64_t miopen_version();
|
| 11 |
+
|
| 12 |
+
}} // namespace at::miopen
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/miopen/miopen-wrapper.h>
|
| 5 |
+
#include <ATen/miopen/Handle.h>
|
| 6 |
+
|
| 7 |
+
namespace at { namespace native {
|
| 8 |
+
|
| 9 |
+
// This function makes tensors which have zero stride contiguous, by
|
| 10 |
+
// setting the strides to 1.
|
| 11 |
+
inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
|
| 12 |
+
for (auto s : t.strides()) {
|
| 13 |
+
if (s == 0) return t.contiguous();
|
| 14 |
+
}
|
| 15 |
+
return t;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
}}
|
vllm/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <miopen/miopen.h>
|
vllm/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/macros/Export.h>
|
| 3 |
+
#include <limits>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
class TensorBase;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
namespace at::native {
|
| 10 |
+
|
| 11 |
+
TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
|
| 12 |
+
|
| 13 |
+
}
|
vllm/lib/python3.10/site-packages/torch/include/ATen/native/FractionalMaxPooling.h
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/Tensor.h>
|
| 3 |
+
#include <ATen/TensorUtils.h>
|
| 4 |
+
#include <c10/util/irange.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
template<typename scalar_t>
|
| 9 |
+
inline std::vector<int64_t> generate_intervals(
|
| 10 |
+
scalar_t sample,
|
| 11 |
+
int64_t inputSize,
|
| 12 |
+
int64_t outputSize,
|
| 13 |
+
int64_t poolSize) {
|
| 14 |
+
std::vector<int64_t> sequence(outputSize);
|
| 15 |
+
if (outputSize > 1) {
|
| 16 |
+
scalar_t alpha = static_cast<scalar_t>(inputSize - poolSize) /
|
| 17 |
+
static_cast<scalar_t>(outputSize - 1);
|
| 18 |
+
|
| 19 |
+
for (const auto i : c10::irange(outputSize - 1)) {
|
| 20 |
+
sequence[i] =
|
| 21 |
+
static_cast<int>((i + sample) * alpha) - static_cast<int>(sample * alpha);
|
| 22 |
+
}
|
| 23 |
+
}
|
| 24 |
+
if (outputSize > 0) {
|
| 25 |
+
sequence[outputSize - 1] = inputSize - poolSize;
|
| 26 |
+
}
|
| 27 |
+
return sequence;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
template <int64_t ndim>
|
| 31 |
+
inline void fractional_max_pool_check_shape(
|
| 32 |
+
const Tensor& input,
|
| 33 |
+
const Tensor& randomSamples) {
|
| 34 |
+
|
| 35 |
+
TORCH_CHECK(
|
| 36 |
+
input.scalar_type() == randomSamples.scalar_type(),
|
| 37 |
+
"Expect _random_samples to have the same dtype as input");
|
| 38 |
+
|
| 39 |
+
int64_t ndimension = randomSamples.ndimension();
|
| 40 |
+
TORCH_CHECK(
|
| 41 |
+
ndimension == 3,
|
| 42 |
+
"Expect _random_samples to have 3 dimensions, got ", ndimension);
|
| 43 |
+
|
| 44 |
+
int64_t N = randomSamples.size(0);
|
| 45 |
+
int64_t C = randomSamples.size(1);
|
| 46 |
+
int64_t D = randomSamples.size(2);
|
| 47 |
+
|
| 48 |
+
int64_t input_batch = 0, input_channel = 0;
|
| 49 |
+
if (ndim == 2) {
|
| 50 |
+
// fractional_max_pool2d
|
| 51 |
+
if (input.ndimension() == 3) {
|
| 52 |
+
input_batch = 1;
|
| 53 |
+
input_channel = input.size(0);
|
| 54 |
+
} else {
|
| 55 |
+
input_batch = input.size(0);
|
| 56 |
+
input_channel = input.size(1);
|
| 57 |
+
}
|
| 58 |
+
} else {
|
| 59 |
+
// factional_max_pool3d
|
| 60 |
+
if (input.ndimension() == 4) {
|
| 61 |
+
input_batch = 1;
|
| 62 |
+
input_channel = input.size(0);
|
| 63 |
+
} else {
|
| 64 |
+
input_batch = input.size(0);
|
| 65 |
+
input_channel = input.size(1);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
TORCH_CHECK(
|
| 70 |
+
N >= input_batch,
|
| 71 |
+
"Expect _random_samples.size(0) no less then input batch size.");
|
| 72 |
+
TORCH_CHECK(
|
| 73 |
+
C == input_channel,
|
| 74 |
+
"Expect _random_samples.size(1) equals to input channel size.");
|
| 75 |
+
TORCH_CHECK(
|
| 76 |
+
D == ndim,
|
| 77 |
+
"Expect _random_samples.size(2) equals to ", ndim, "; got ", D, ".");
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
} // namespace at::native
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_abs_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_abs(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_abs_out(at::TensorList out, at::TensorList self);
|
| 22 |
+
TORCH_API void _foreach_abs_outf(at::TensorList self, at::TensorList out);
|
| 23 |
+
TORCH_API void _foreach_abs_(at::TensorList self);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _neg_view {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_neg_view")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_neg_view(Tensor(a) self) -> Tensor(a)")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor slow_conv2d_forward_cpu(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding);
|
| 20 |
+
TORCH_API at::Tensor & slow_conv2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output);
|
| 21 |
+
TORCH_API at::Tensor slow_conv2d_forward_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding);
|
| 22 |
+
TORCH_API at::Tensor & slow_conv2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
| 21 |
+
TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
| 22 |
+
TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/argmax_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor argmax(const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false);
|
| 21 |
+
TORCH_API at::Tensor & argmax_out(at::Tensor & out, const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false);
|
| 22 |
+
TORCH_API at::Tensor & argmax_outf(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atan_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API atan {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API atan_ {
|
| 29 |
+
using schema = at::Tensor & (at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan_(Tensor(a!) self) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(at::Tensor & self);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API atan_out {
|
| 40 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_native.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/clamp_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_clamp_out : public at::meta::structured_clamp {
|
| 20 |
+
void impl(const at::Tensor & self, at::OptionalScalarRef min, at::OptionalScalarRef max, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
TORCH_API at::Tensor clamp_quantized_cpu(const at::Tensor & self, const ::std::optional<at::Scalar> & min=::std::nullopt, const ::std::optional<at::Scalar> & max=::std::nullopt);
|
| 23 |
+
struct TORCH_API structured_clamp_Tensor_out : public at::meta::structured_clamp_Tensor {
|
| 24 |
+
void impl(const at::Tensor & self, at::OptionalTensorRef min, at::OptionalTensorRef max, const at::Tensor & out);
|
| 25 |
+
};
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor conv2d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
|
| 20 |
+
TORCH_API at::Tensor conv2d_padding_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::string_view padding="valid", c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|