Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h +20 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h +301 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h +55 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/TriangularOpsUtils.h +59 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/UpSample.h +467 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/batch_norm.h +37 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h +12 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h +12 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h +94 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h +14 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h +14 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h +160 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h +719 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h +522 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h +205 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/radix_sort.h +196 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizer.h +130 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/PackedParams.h +147 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h +527 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h +235 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h +21 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h +411 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/init_qnnpack.h +13 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag.h +34 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h +11 -0
- openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/vol2col.h +112 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__init__.py +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/autoheuristic.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/autoheuristic_utils.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/learned_heuristic_controller.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/learnedheuristic_interface.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MMRankingA100.py +296 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MMRankingH100.py +321 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MixedMMA100.py +150 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_PadMMA100.py +109 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/__init__.py +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/__pycache__/_MixedMMH100.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/autoheuristic.py +315 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/autoheuristic_utils.py +339 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/learned_heuristic_controller.py +119 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/learnedheuristic_interface.py +92 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/aoti_hipify_utils.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/block_analysis.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_bmm_template.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_flex_attention_template.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_gemm_template.cpython-310.pyc +0 -0
- phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_micro_gemm.cpython-310.pyc +0 -0
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/Copy.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/native/DispatchStub.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
|
| 7 |
+
class Tensor;
|
| 8 |
+
struct TensorIterator;
|
| 9 |
+
class TensorBase;
|
| 10 |
+
|
| 11 |
+
namespace native {
|
| 12 |
+
|
| 13 |
+
using copy_fn = void (*)(TensorIterator&, bool non_blocking);
|
| 14 |
+
|
| 15 |
+
DECLARE_DISPATCH(copy_fn, copy_stub);
|
| 16 |
+
|
| 17 |
+
TORCH_API void copy_ignoring_overlaps(const TensorBase &dst, const TensorBase &src);
|
| 18 |
+
|
| 19 |
+
} // namespace native
|
| 20 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/DeviceType.h>
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
|
| 6 |
+
#include <atomic>
|
| 7 |
+
#include <utility>
|
| 8 |
+
|
| 9 |
+
// Implements instruction set specific function dispatch.
|
| 10 |
+
//
|
| 11 |
+
// Kernels that may make use of specialized instruction sets (e.g. AVX2) are
|
| 12 |
+
// compiled multiple times with different compiler flags (e.g. -mavx2). A
|
| 13 |
+
// DispatchStub contains a table of function pointers for a kernel. At runtime,
|
| 14 |
+
// the fastest available kernel is chosen based on the features reported by
|
| 15 |
+
// cpuinfo.
|
| 16 |
+
//
|
| 17 |
+
// Example:
|
| 18 |
+
//
|
| 19 |
+
// In native/MyKernel.h:
|
| 20 |
+
// using fn_type = void(*)(const Tensor& x);
|
| 21 |
+
// DECLARE_DISPATCH(fn_type, stub);
|
| 22 |
+
//
|
| 23 |
+
// In native/MyKernel.cpp
|
| 24 |
+
// DEFINE_DISPATCH(stub);
|
| 25 |
+
//
|
| 26 |
+
// In native/cpu/MyKernel.cpp:
|
| 27 |
+
// namespace {
|
| 28 |
+
// // use anonymous namespace so that different cpu versions won't conflict
|
| 29 |
+
// void kernel(const Tensor& x) { ... }
|
| 30 |
+
// }
|
| 31 |
+
// REGISTER_DISPATCH(stub, &kernel);
|
| 32 |
+
//
|
| 33 |
+
// To call:
|
| 34 |
+
// stub(kCPU, tensor);
|
| 35 |
+
//
|
| 36 |
+
// TODO: CPU instruction set selection should be folded into whatever
|
| 37 |
+
// the main dispatch mechanism is.
|
| 38 |
+
|
| 39 |
+
// ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere
|
| 40 |
+
#if defined(__clang__)
|
| 41 |
+
#pragma clang diagnostic push
|
| 42 |
+
#pragma clang diagnostic ignored "-Wundefined-var-template"
|
| 43 |
+
#endif
|
| 44 |
+
|
| 45 |
+
namespace at { namespace native {
|
| 46 |
+
|
| 47 |
+
enum class CPUCapability {
|
| 48 |
+
DEFAULT = 0,
|
| 49 |
+
#if defined(HAVE_VSX_CPU_DEFINITION)
|
| 50 |
+
VSX = 1,
|
| 51 |
+
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
|
| 52 |
+
ZVECTOR = 1,
|
| 53 |
+
#else
|
| 54 |
+
AVX2 = 1,
|
| 55 |
+
AVX512 = 2,
|
| 56 |
+
#endif
|
| 57 |
+
NUM_OPTIONS
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
CPUCapability get_cpu_capability();
|
| 61 |
+
|
| 62 |
+
template <typename FnPtr, typename T>
|
| 63 |
+
struct DispatchStub;
|
| 64 |
+
|
| 65 |
+
/**
|
| 66 |
+
* The sole purpose of this class is to outline methods that don't need to be
|
| 67 |
+
* specialized or otherwise inlined and duplicated (by the compiler due to
|
| 68 |
+
* template expansion), since it causes size bloat if there are a significant
|
| 69 |
+
* number of specialization of the DispatchStub<> class.
|
| 70 |
+
*/
|
| 71 |
+
struct TORCH_API DispatchStubImpl {
|
| 72 |
+
void* get_call_ptr(
|
| 73 |
+
DeviceType device_type
|
| 74 |
+
, void *DEFAULT
|
| 75 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
| 76 |
+
, void *AVX512
|
| 77 |
+
#endif
|
| 78 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
| 79 |
+
, void *AVX2
|
| 80 |
+
#endif
|
| 81 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
| 82 |
+
, void *VSX
|
| 83 |
+
#endif
|
| 84 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
| 85 |
+
, void *ZVECTOR
|
| 86 |
+
#endif
|
| 87 |
+
);
|
| 88 |
+
|
| 89 |
+
/**
|
| 90 |
+
* The CPU Dispatch actual method is chosen in decreasing order of preference by
|
| 91 |
+
* DispatchStubImpl::choose_cpu_impl() in case none is found by
|
| 92 |
+
* DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr.
|
| 93 |
+
*/
|
| 94 |
+
void* choose_cpu_impl(
|
| 95 |
+
void *DEFAULT
|
| 96 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
| 97 |
+
, void *AVX512
|
| 98 |
+
#endif
|
| 99 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
| 100 |
+
, void *AVX2
|
| 101 |
+
#endif
|
| 102 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
| 103 |
+
, void *VSX
|
| 104 |
+
#endif
|
| 105 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
| 106 |
+
, void *ZVECTOR
|
| 107 |
+
#endif
|
| 108 |
+
);
|
| 109 |
+
|
| 110 |
+
// Fixing dispatch error in Windows debug builds.
|
| 111 |
+
// See https://github.com/pytorch/pytorch/issues/22681 for more details.
|
| 112 |
+
#if defined(_MSC_VER) && defined(_DEBUG)
|
| 113 |
+
std::atomic<void*> cpu_dispatch_ptr;
|
| 114 |
+
void* cuda_dispatch_ptr;
|
| 115 |
+
void* hip_dispatch_ptr;
|
| 116 |
+
void* mps_dispatch_ptr;
|
| 117 |
+
#else
|
| 118 |
+
std::atomic<void*> cpu_dispatch_ptr{nullptr};
|
| 119 |
+
void* cuda_dispatch_ptr = nullptr;
|
| 120 |
+
void* hip_dispatch_ptr = nullptr;
|
| 121 |
+
void* mps_dispatch_ptr = nullptr;
|
| 122 |
+
#endif
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
template <typename rT, typename T, typename... Args>
|
| 126 |
+
struct DispatchStub<rT (*)(Args...), T> {
|
| 127 |
+
using FnPtr = rT (*) (Args...);
|
| 128 |
+
|
| 129 |
+
DispatchStub() = default;
|
| 130 |
+
DispatchStub(const DispatchStub&) = delete;
|
| 131 |
+
DispatchStub& operator=(const DispatchStub&) = delete;
|
| 132 |
+
|
| 133 |
+
private:
|
| 134 |
+
FnPtr get_call_ptr(DeviceType device_type) {
|
| 135 |
+
return reinterpret_cast<FnPtr>(
|
| 136 |
+
impl.get_call_ptr(device_type
|
| 137 |
+
, reinterpret_cast<void*>(DEFAULT)
|
| 138 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
| 139 |
+
, reinterpret_cast<void*>(AVX512)
|
| 140 |
+
#endif
|
| 141 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
| 142 |
+
, reinterpret_cast<void*>(AVX2)
|
| 143 |
+
#endif
|
| 144 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
| 145 |
+
, reinterpret_cast<void*>(VSX)
|
| 146 |
+
#endif
|
| 147 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
| 148 |
+
, reinterpret_cast<void*>(ZVECTOR)
|
| 149 |
+
#endif
|
| 150 |
+
)
|
| 151 |
+
);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
public:
|
| 155 |
+
template <typename... ArgTypes>
|
| 156 |
+
rT operator()(DeviceType device_type, ArgTypes&&... args) {
|
| 157 |
+
FnPtr call_ptr = get_call_ptr(device_type);
|
| 158 |
+
return (*call_ptr)(std::forward<ArgTypes>(args)...);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
void set_cuda_dispatch_ptr(FnPtr fn_ptr) {
|
| 162 |
+
impl.cuda_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
void set_hip_dispatch_ptr(FnPtr fn_ptr) {
|
| 166 |
+
impl.hip_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
void set_mps_dispatch_ptr(FnPtr fn_ptr) {
|
| 170 |
+
impl.mps_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
static TORCH_API FnPtr DEFAULT;
|
| 174 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
| 175 |
+
static TORCH_API FnPtr AVX512;
|
| 176 |
+
#endif
|
| 177 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
| 178 |
+
static TORCH_API FnPtr AVX2;
|
| 179 |
+
#endif
|
| 180 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
| 181 |
+
static TORCH_API FnPtr VSX;
|
| 182 |
+
#endif
|
| 183 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
| 184 |
+
static TORCH_API FnPtr ZVECTOR;
|
| 185 |
+
#endif
|
| 186 |
+
private:
|
| 187 |
+
DispatchStubImpl impl;
|
| 188 |
+
};
|
| 189 |
+
|
| 190 |
+
namespace {
|
| 191 |
+
template <typename DispatchStub>
|
| 192 |
+
struct RegisterCUDADispatch {
|
| 193 |
+
RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
| 194 |
+
stub.set_cuda_dispatch_ptr(value);
|
| 195 |
+
}
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
template <typename DispatchStub>
|
| 199 |
+
struct RegisterMPSDispatch {
|
| 200 |
+
RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
| 201 |
+
stub.set_mps_dispatch_ptr(value);
|
| 202 |
+
}
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
template <typename DispatchStub>
|
| 206 |
+
struct RegisterHIPDispatch {
|
| 207 |
+
RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
|
| 208 |
+
// TODO: make this point at hip_dispatch_ptr
|
| 209 |
+
stub.set_cuda_dispatch_ptr(value);
|
| 210 |
+
}
|
| 211 |
+
};
|
| 212 |
+
|
| 213 |
+
} // anonymous namespace
|
| 214 |
+
// Compiler will complain if you put things like std::tuple<Tensor, Tensor> in
|
| 215 |
+
// the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g.,
|
| 216 |
+
// adding parentheses and using helper struct to get rid of the parentheses, do
|
| 217 |
+
// not work with MSVC. So do a `using`-declaration if you need to pass in such
|
| 218 |
+
// `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h.
|
| 219 |
+
#define DECLARE_DISPATCH(fn, name) \
|
| 220 |
+
struct name : DispatchStub<fn, name> { \
|
| 221 |
+
name() = default; \
|
| 222 |
+
name(const name&) = delete; \
|
| 223 |
+
name& operator=(const name&) = delete; \
|
| 224 |
+
}; \
|
| 225 |
+
extern TORCH_API struct name name
|
| 226 |
+
|
| 227 |
+
#define DEFINE_DISPATCH(name) struct name name
|
| 228 |
+
|
| 229 |
+
#define REGISTER_ARCH_DISPATCH(name, arch, fn) \
|
| 230 |
+
template <> name::FnPtr TORCH_API DispatchStub<name::FnPtr, struct name>::arch = fn;
|
| 231 |
+
|
| 232 |
+
#ifdef HAVE_AVX512_CPU_DEFINITION
|
| 233 |
+
#define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn)
|
| 234 |
+
#else
|
| 235 |
+
#define REGISTER_AVX512_DISPATCH(name, fn)
|
| 236 |
+
#endif
|
| 237 |
+
|
| 238 |
+
#ifdef HAVE_AVX2_CPU_DEFINITION
|
| 239 |
+
#define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn)
|
| 240 |
+
#else
|
| 241 |
+
#define REGISTER_AVX2_DISPATCH(name, fn)
|
| 242 |
+
#endif
|
| 243 |
+
|
| 244 |
+
#ifdef HAVE_VSX_CPU_DEFINITION
|
| 245 |
+
#define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn)
|
| 246 |
+
#else
|
| 247 |
+
#define REGISTER_VSX_DISPATCH(name, fn)
|
| 248 |
+
#endif
|
| 249 |
+
|
| 250 |
+
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
|
| 251 |
+
#define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn)
|
| 252 |
+
#else
|
| 253 |
+
#define REGISTER_ZVECTOR_DISPATCH(name, fn)
|
| 254 |
+
#endif
|
| 255 |
+
|
| 256 |
+
// Macro to register the same kernel for all CPU arch types. This is useful
|
| 257 |
+
// if a kernel does not benefit from being recompiled across different arch types.
|
| 258 |
+
#define REGISTER_ALL_CPU_DISPATCH(name, fn) \
|
| 259 |
+
REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \
|
| 260 |
+
REGISTER_AVX512_DISPATCH(name, fn) \
|
| 261 |
+
REGISTER_AVX2_DISPATCH(name, fn) \
|
| 262 |
+
REGISTER_VSX_DISPATCH(name, fn) \
|
| 263 |
+
REGISTER_ZVECTOR_DISPATCH(name, fn)
|
| 264 |
+
|
| 265 |
+
#define REGISTER_NO_CPU_DISPATCH(name) \
|
| 266 |
+
REGISTER_ALL_CPU_DISPATCH(name, nullptr)
|
| 267 |
+
|
| 268 |
+
#define REGISTER_CUDA_DISPATCH(name, fn) \
|
| 269 |
+
static RegisterCUDADispatch<struct name> name ## __register(name, fn);
|
| 270 |
+
|
| 271 |
+
#define REGISTER_HIP_DISPATCH(name, fn) \
|
| 272 |
+
static RegisterHIPDispatch<struct name> name ## __register(name, fn);
|
| 273 |
+
|
| 274 |
+
#define REGISTER_MPS_DISPATCH(name, fn) \
|
| 275 |
+
static RegisterMPSDispatch<struct name> name ## __register(name, fn);
|
| 276 |
+
|
| 277 |
+
// NB: This macro must be used in an actual 'cu' file; if you try using
|
| 278 |
+
// it from a 'cpp' file it will not work!
|
| 279 |
+
#if defined(__CUDACC__)
|
| 280 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
|
| 281 |
+
#elif defined(__HIPCC__)
|
| 282 |
+
// TODO: cut this over to HIP dispatch once we stop pretending that CUDA
|
| 283 |
+
// is HIP in the PyTorch HIPify build.
|
| 284 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
|
| 285 |
+
// #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn)
|
| 286 |
+
#elif defined(__OBJC__) && defined(USE_MPS)
|
| 287 |
+
// NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel
|
| 288 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn)
|
| 289 |
+
#elif defined(CPU_CAPABILITY)
|
| 290 |
+
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
|
| 291 |
+
#define REGISTER_NO_AVX512_DISPATCH(name) \
|
| 292 |
+
REGISTER_AVX512_DISPATCH(name, nullptr)
|
| 293 |
+
#endif
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
}} // namespace at::native
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
#if defined(__clang__)
|
| 300 |
+
#pragma clang diagnostic pop
|
| 301 |
+
#endif
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/ReduceOps.h
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/native/DispatchStub.h>
|
| 4 |
+
#include <c10/util/Optional.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
class Scalar;
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
namespace at {
|
| 11 |
+
struct TensorIterator;
|
| 12 |
+
class Tensor;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
namespace at { namespace native {
|
| 16 |
+
|
| 17 |
+
using reduce_fn = void(*)(TensorIterator &);
|
| 18 |
+
|
| 19 |
+
DECLARE_DISPATCH(reduce_fn, sum_stub);
|
| 20 |
+
DECLARE_DISPATCH(reduce_fn, nansum_stub);
|
| 21 |
+
DECLARE_DISPATCH(reduce_fn, prod_stub);
|
| 22 |
+
DECLARE_DISPATCH(reduce_fn, mean_stub);
|
| 23 |
+
DECLARE_DISPATCH(reduce_fn, and_stub);
|
| 24 |
+
DECLARE_DISPATCH(reduce_fn, or_stub);
|
| 25 |
+
DECLARE_DISPATCH(reduce_fn, min_values_stub);
|
| 26 |
+
DECLARE_DISPATCH(reduce_fn, max_values_stub);
|
| 27 |
+
DECLARE_DISPATCH(reduce_fn, argmax_stub);
|
| 28 |
+
DECLARE_DISPATCH(reduce_fn, argmin_stub);
|
| 29 |
+
|
| 30 |
+
using reduce_std_var_function =
|
| 31 |
+
void (*)(TensorIterator&, int64_t correction, bool take_sqrt);
|
| 32 |
+
DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
|
| 33 |
+
|
| 34 |
+
using reduce_norm_fn =
|
| 35 |
+
void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional<int64_t>);
|
| 36 |
+
DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
|
| 37 |
+
|
| 38 |
+
using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
|
| 39 |
+
DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
|
| 40 |
+
|
| 41 |
+
using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t);
|
| 42 |
+
using cum_fn = void (*)(Tensor&, const Tensor&, int64_t);
|
| 43 |
+
DECLARE_DISPATCH(structured_cum_fn, cumsum_stub);
|
| 44 |
+
DECLARE_DISPATCH(structured_cum_fn, cumprod_stub);
|
| 45 |
+
DECLARE_DISPATCH(cum_fn, logcumsumexp_stub);
|
| 46 |
+
|
| 47 |
+
DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub);
|
| 48 |
+
DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub);
|
| 49 |
+
|
| 50 |
+
// Used in cuda/Normalization.cu
|
| 51 |
+
TORCH_API std::tuple<Tensor&,Tensor&> var_mean_out(
|
| 52 |
+
Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim,
|
| 53 |
+
int64_t correction, bool keepdim);
|
| 54 |
+
|
| 55 |
+
}} // namespace at::native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/TriangularOpsUtils.h
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/Tensor.h>
|
| 2 |
+
#include <ATen/native/LinearAlgebraUtils.h>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
namespace native {
|
| 6 |
+
|
| 7 |
+
/*
|
| 8 |
+
* Given batches of matrices with arbitrary batch dim,
|
| 9 |
+
* computes the number of batches for Triu and Tril. This ignores stride 0 dimension
|
| 10 |
+
*/
|
| 11 |
+
static inline int64_t batchCountTrilTriu(const Tensor& batched_matrices) {
|
| 12 |
+
int64_t result = 1;
|
| 13 |
+
for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
|
| 14 |
+
if (batched_matrices.stride(i) != 0) {
|
| 15 |
+
result *= batched_matrices.size(i);
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
return result;
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
/* Checks a necessary property for the triu and tril implementations, hence the name.
|
| 22 |
+
* Here batch contiguity is checked for tensors with greater than 4 dimensions.
|
| 23 |
+
* Contiguous tensors and tensors with less than 3 dimensions pass this check
|
| 24 |
+
*/
|
| 25 |
+
static inline std::tuple<bool, Tensor> checkTrilTriuBatchContiguous(const Tensor& tensor, bool allow_zero_stride) {
|
| 26 |
+
// Complete contiguity is the most desired property, which is why
|
| 27 |
+
// we return true if the tensor is contiguous
|
| 28 |
+
if (tensor.is_contiguous()) {
|
| 29 |
+
auto default_strides_for_size = batched_matrix_contiguous_strides(tensor.sizes());
|
| 30 |
+
if (tensor.strides() == default_strides_for_size) {
|
| 31 |
+
return std::make_tuple(true, tensor);
|
| 32 |
+
} else {
|
| 33 |
+
return std::make_tuple(false, tensor.as_strided(tensor.sizes(), default_strides_for_size));
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
int64_t dims = tensor.dim();
|
| 38 |
+
|
| 39 |
+
// Tensors with dimension less than 4 are handled by default
|
| 40 |
+
if (allow_zero_stride && dims <= 3) {
|
| 41 |
+
return std::make_tuple(true, tensor);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
int64_t expected_stride = tensor.size(-1) * tensor.size(-2);
|
| 45 |
+
for (int64_t i = dims - 3; i >= 0; i--) {
|
| 46 |
+
// Skip trivial dimension;
|
| 47 |
+
if (allow_zero_stride && i == 0 && (tensor.stride(i) == 0 || tensor.size(i) == 1)) {
|
| 48 |
+
continue;
|
| 49 |
+
}
|
| 50 |
+
if (expected_stride != tensor.stride(i)) {
|
| 51 |
+
return std::make_tuple(false, tensor.contiguous());
|
| 52 |
+
}
|
| 53 |
+
expected_stride *= tensor.size(i);
|
| 54 |
+
}
|
| 55 |
+
return std::make_tuple(true, tensor);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
} // namespace native
|
| 59 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/UpSample.h
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <math.h>
|
| 4 |
+
|
| 5 |
+
#include <ATen/OpMathType.h>
|
| 6 |
+
#include <ATen/TensorUtils.h>
|
| 7 |
+
#include <ATen/core/Tensor.h>
|
| 8 |
+
#include <ATen/native/DispatchStub.h>
|
| 9 |
+
|
| 10 |
+
/**
|
| 11 |
+
* Note [compute_scales_value]
|
| 12 |
+
* Note [area_pixel_compute_scale]
|
| 13 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 14 |
+
* Interpolate with scale_factor can have different behaviors
|
| 15 |
+
* depending on the value of recompute_scale_factor:
|
| 16 |
+
*
|
| 17 |
+
* - With recompute_scale_factor = True (current default behavior):
|
| 18 |
+
* the scale_factor, when provided by the user, are used to calculate
|
| 19 |
+
* the output size. The input size and the computed output_size
|
| 20 |
+
* are then used to infer new values for the scales which are
|
| 21 |
+
* used in the interpolation. Because floating-point math is not exact,
|
| 22 |
+
* this may be a different value from the user-supplied scales.
|
| 23 |
+
*
|
| 24 |
+
* - With recompute_scale_factor = False (which will be the default
|
| 25 |
+
* behavior starting 1.5.0):
|
| 26 |
+
* the behavior follows opencv logic, and the scales provided by
|
| 27 |
+
* the user are the ones used in the interpolation calculations.
|
| 28 |
+
*
|
| 29 |
+
* If the scales are not provided or if they are provided but
|
| 30 |
+
* recompute_scale_factor is set to True (default behavior), the scales
|
| 31 |
+
* are computed from the input and the output size;
|
| 32 |
+
*
|
| 33 |
+
*
|
| 34 |
+
* When the scales are inferred from the input and output sizes,
|
| 35 |
+
* we view each pixel as an area, idx + 0.5 as its center index.
|
| 36 |
+
* Here is an example formula in 1D case.
|
| 37 |
+
* if align_corners: center of two corner pixel areas are preserved,
|
| 38 |
+
* (0.5, 0.5) -> (0.5, 0.5),
|
| 39 |
+
* (input_size - 0.5, 0.5) -> (output_size - 0.5)
|
| 40 |
+
* scale = (input_size - 0.5 - 0.5) / (output_size - 0.5 - 0.5)
|
| 41 |
+
* src_index + 0.5 - 0.5 = scale * (dst_index + 0.5 - 0.5)
|
| 42 |
+
* if not align_corners: the whole range is scaled accordingly
|
| 43 |
+
* scale = input_size / output_size
|
| 44 |
+
* src_idx + 0.5 = scale * (dst_index + 0.5)
|
| 45 |
+
*/
|
| 46 |
+
|
| 47 |
+
namespace at {
|
| 48 |
+
namespace native {
|
| 49 |
+
|
| 50 |
+
namespace upsample {
|
| 51 |
+
|
| 52 |
+
TORCH_API c10::SmallVector<int64_t, 3> compute_output_size(
|
| 53 |
+
c10::IntArrayRef input_size, // Full input tensor size.
|
| 54 |
+
at::OptionalIntArrayRef output_size,
|
| 55 |
+
c10::optional<c10::ArrayRef<double>> scale_factors);
|
| 56 |
+
|
| 57 |
+
inline c10::optional<double> get_scale_value(c10::optional<c10::ArrayRef<double>> scales, int idx) {
|
| 58 |
+
if (!scales) {
|
| 59 |
+
return c10::nullopt;
|
| 60 |
+
}
|
| 61 |
+
return scales->at(idx);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
} // namespace upsample
|
| 65 |
+
|
| 66 |
+
using scale_t = c10::optional<double>;
|
| 67 |
+
using upsampling_nearest1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w);
|
| 68 |
+
using _upsampling_nearest_exact1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w);
|
| 69 |
+
using upsampling_nearest2d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_h, scale_t scales_w);
|
| 70 |
+
using _upsampling_nearest_exact2d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_h, scale_t scales_w);
|
| 71 |
+
using upsampling_nearest3d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_d, scale_t scales_h, scale_t scales_w);
|
| 72 |
+
using _upsampling_nearest_exact3d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_d, scale_t scales_h, scale_t scales_w);
|
| 73 |
+
using upsampling_linear1d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_w);
|
| 74 |
+
using upsampling_bilinear2d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w);
|
| 75 |
+
using _upsampling_bilinear2d_aa = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w);
|
| 76 |
+
using upsampling_trilinear3d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_d, scale_t scales_h, scale_t scales_w);
|
| 77 |
+
using upsampling_bicubic2d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w);
|
| 78 |
+
using _upsampling_bicubic2d_aa = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w);
|
| 79 |
+
DECLARE_DISPATCH(upsampling_nearest1d, upsample_nearest1d_kernel);
|
| 80 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact1d, _upsample_nearest_exact1d_kernel);
|
| 81 |
+
DECLARE_DISPATCH(upsampling_nearest2d, upsample_nearest2d_kernel);
|
| 82 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact2d, _upsample_nearest_exact2d_kernel);
|
| 83 |
+
DECLARE_DISPATCH(upsampling_nearest3d, upsample_nearest3d_kernel);
|
| 84 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact3d, _upsample_nearest_exact3d_kernel);
|
| 85 |
+
DECLARE_DISPATCH(upsampling_nearest1d, upsample_nearest1d_backward_kernel);
|
| 86 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact1d, _upsample_nearest_exact1d_backward_kernel);
|
| 87 |
+
DECLARE_DISPATCH(upsampling_nearest2d, upsample_nearest2d_backward_kernel);
|
| 88 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact2d, _upsample_nearest_exact2d_backward_kernel);
|
| 89 |
+
DECLARE_DISPATCH(upsampling_nearest3d, upsample_nearest3d_backward_kernel);
|
| 90 |
+
DECLARE_DISPATCH(_upsampling_nearest_exact3d, _upsample_nearest_exact3d_backward_kernel);
|
| 91 |
+
DECLARE_DISPATCH(upsampling_linear1d, upsample_linear1d_kernel);
|
| 92 |
+
DECLARE_DISPATCH(upsampling_bilinear2d, upsample_bilinear2d_kernel);
|
| 93 |
+
DECLARE_DISPATCH(_upsampling_bilinear2d_aa, _upsample_bilinear2d_aa_kernel);
|
| 94 |
+
DECLARE_DISPATCH(upsampling_trilinear3d, upsample_trilinear3d_kernel);
|
| 95 |
+
DECLARE_DISPATCH(upsampling_linear1d, upsample_linear1d_backward_kernel);
|
| 96 |
+
DECLARE_DISPATCH(upsampling_bilinear2d, upsample_bilinear2d_backward_kernel);
|
| 97 |
+
DECLARE_DISPATCH(_upsampling_bilinear2d_aa, _upsample_bilinear2d_aa_backward_kernel);
|
| 98 |
+
DECLARE_DISPATCH(upsampling_trilinear3d, upsample_trilinear3d_backward_kernel);
|
| 99 |
+
DECLARE_DISPATCH(upsampling_bicubic2d, upsample_bicubic2d_kernel);
|
| 100 |
+
DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_kernel);
|
| 101 |
+
DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_backward_kernel);
|
| 102 |
+
|
| 103 |
+
static C10_UNUSED std::array<int64_t, 3> upsample_1d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
| 104 |
+
TORCH_CHECK(
|
| 105 |
+
output_size.size() == 1,
|
| 106 |
+
"It is expected output_size equals to 1, but got size ",
|
| 107 |
+
output_size.size());
|
| 108 |
+
|
| 109 |
+
TORCH_CHECK(
|
| 110 |
+
input_size.size() == 3,
|
| 111 |
+
"It is expected input_size equals to 3, but got size ",
|
| 112 |
+
input_size.size());
|
| 113 |
+
|
| 114 |
+
int64_t output_width = output_size[0];
|
| 115 |
+
|
| 116 |
+
int64_t nbatch = input_size[0];
|
| 117 |
+
int64_t channels = input_size[1];
|
| 118 |
+
int64_t input_width = input_size[2];
|
| 119 |
+
|
| 120 |
+
TORCH_CHECK(
|
| 121 |
+
input_width > 0 && output_width > 0,
|
| 122 |
+
"Input and output sizes should be greater than 0, but got input (W: ",
|
| 123 |
+
input_width,
|
| 124 |
+
") and output (W: ",
|
| 125 |
+
output_width,
|
| 126 |
+
")");
|
| 127 |
+
|
| 128 |
+
return {nbatch, channels, output_width};
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
static C10_UNUSED std::array<int64_t, 4> upsample_2d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
| 132 |
+
TORCH_CHECK(
|
| 133 |
+
output_size.size() == 2,
|
| 134 |
+
"It is expected output_size equals to 2, but got size ",
|
| 135 |
+
output_size.size());
|
| 136 |
+
|
| 137 |
+
TORCH_CHECK(
|
| 138 |
+
input_size.size() == 4,
|
| 139 |
+
"It is expected input_size equals to 4, but got size ",
|
| 140 |
+
input_size.size());
|
| 141 |
+
|
| 142 |
+
int64_t output_height = output_size[0];
|
| 143 |
+
int64_t output_width = output_size[1];
|
| 144 |
+
|
| 145 |
+
int64_t nbatch = input_size[0];
|
| 146 |
+
int64_t channels = input_size[1];
|
| 147 |
+
int64_t input_height = input_size[2];
|
| 148 |
+
int64_t input_width = input_size[3];
|
| 149 |
+
|
| 150 |
+
TORCH_CHECK(
|
| 151 |
+
input_height > 0 && input_width > 0 && output_height > 0 &&
|
| 152 |
+
output_width > 0,
|
| 153 |
+
"Input and output sizes should be greater than 0,"
|
| 154 |
+
" but got input (H: ",
|
| 155 |
+
input_height,
|
| 156 |
+
", W: ",
|
| 157 |
+
input_width,
|
| 158 |
+
") output (H: ",
|
| 159 |
+
output_height,
|
| 160 |
+
", W: ",
|
| 161 |
+
output_width,
|
| 162 |
+
")");
|
| 163 |
+
|
| 164 |
+
return {nbatch, channels, output_height, output_width};
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
static C10_UNUSED
|
| 168 |
+
std::array<int64_t, 5> upsample_3d_common_check(IntArrayRef input_size, IntArrayRef output_size) {
|
| 169 |
+
TORCH_CHECK(
|
| 170 |
+
output_size.size() == 3,
|
| 171 |
+
"It is expected output_size equals to 3, but got size ",
|
| 172 |
+
output_size.size());
|
| 173 |
+
|
| 174 |
+
TORCH_CHECK(
|
| 175 |
+
input_size.size() == 5,
|
| 176 |
+
"It is expected input_size equals to 5, but got size ",
|
| 177 |
+
input_size.size());
|
| 178 |
+
|
| 179 |
+
int64_t output_depth = output_size[0];
|
| 180 |
+
int64_t output_height = output_size[1];
|
| 181 |
+
int64_t output_width = output_size[2];
|
| 182 |
+
|
| 183 |
+
int64_t nbatch = input_size[0];
|
| 184 |
+
int64_t channels = input_size[1];
|
| 185 |
+
int64_t input_depth = input_size[2];
|
| 186 |
+
int64_t input_height = input_size[3];
|
| 187 |
+
int64_t input_width = input_size[4];
|
| 188 |
+
|
| 189 |
+
TORCH_CHECK(
|
| 190 |
+
input_depth > 0 && input_height > 0 && input_width > 0 &&
|
| 191 |
+
output_depth > 0 && output_height > 0 && output_width > 0,
|
| 192 |
+
"Input and output sizes should be greater than 0, but got input (D: ",
|
| 193 |
+
input_depth,
|
| 194 |
+
", H: ",
|
| 195 |
+
input_height,
|
| 196 |
+
", W: ",
|
| 197 |
+
input_width,
|
| 198 |
+
") output (D: ",
|
| 199 |
+
output_depth,
|
| 200 |
+
", H: ",
|
| 201 |
+
output_height,
|
| 202 |
+
", W: ",
|
| 203 |
+
output_width,
|
| 204 |
+
")");
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
return {nbatch, channels, output_depth, output_height, output_width};
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
static inline void upsample_2d_shape_check(
|
| 211 |
+
const Tensor& input,
|
| 212 |
+
const Tensor& grad_output,
|
| 213 |
+
int64_t nbatch,
|
| 214 |
+
int64_t nchannels,
|
| 215 |
+
int64_t input_height,
|
| 216 |
+
int64_t input_width,
|
| 217 |
+
int64_t output_height,
|
| 218 |
+
int64_t output_width) {
|
| 219 |
+
TORCH_CHECK(
|
| 220 |
+
input_height > 0 && input_width > 0 && output_height > 0 &&
|
| 221 |
+
output_width > 0,
|
| 222 |
+
"Input and output sizes should be greater than 0,"
|
| 223 |
+
" but got input (H: ",
|
| 224 |
+
input_height,
|
| 225 |
+
", W: ",
|
| 226 |
+
input_width,
|
| 227 |
+
") output (H: ",
|
| 228 |
+
output_height,
|
| 229 |
+
", W: ",
|
| 230 |
+
output_width,
|
| 231 |
+
")");
|
| 232 |
+
|
| 233 |
+
if (input.defined()) {
|
| 234 |
+
// Allow for empty batch size but not other dimensions
|
| 235 |
+
TORCH_CHECK(
|
| 236 |
+
(input.numel() != 0 ||
|
| 237 |
+
(input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0)
|
| 238 |
+
) &&
|
| 239 |
+
input.dim() == 4,
|
| 240 |
+
"Non-empty 4D data tensor expected but got a tensor with sizes ",
|
| 241 |
+
input.sizes());
|
| 242 |
+
} else if (grad_output.defined()) {
|
| 243 |
+
check_dim_size(grad_output, 4, 0, nbatch);
|
| 244 |
+
check_dim_size(grad_output, 4, 1, nchannels);
|
| 245 |
+
check_dim_size(grad_output, 4, 2, output_height);
|
| 246 |
+
check_dim_size(grad_output, 4, 3, output_width);
|
| 247 |
+
}
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
template <typename scalar_t>
|
| 251 |
+
static inline scalar_t compute_scales_value(
|
| 252 |
+
const c10::optional<double> scale,
|
| 253 |
+
int64_t input_size,
|
| 254 |
+
int64_t output_size) {
|
| 255 |
+
// see Note [compute_scales_value]
|
| 256 |
+
// FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults.
|
| 257 |
+
return (scale.has_value() && scale.value() > 0.)
|
| 258 |
+
? static_cast<scalar_t>(1.0 / scale.value())
|
| 259 |
+
: (static_cast<scalar_t>(input_size) / output_size);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
template <typename scalar_t>
|
| 263 |
+
static inline scalar_t area_pixel_compute_scale(
|
| 264 |
+
int64_t input_size,
|
| 265 |
+
int64_t output_size,
|
| 266 |
+
bool align_corners,
|
| 267 |
+
const c10::optional<double> scale) {
|
| 268 |
+
// see Note [area_pixel_compute_scale]
|
| 269 |
+
if(align_corners) {
|
| 270 |
+
if(output_size > 1) {
|
| 271 |
+
return static_cast<scalar_t>(input_size - 1) / (output_size - 1);
|
| 272 |
+
} else {
|
| 273 |
+
return static_cast<scalar_t>(0);
|
| 274 |
+
}
|
| 275 |
+
} else {
|
| 276 |
+
return compute_scales_value<scalar_t>(scale, input_size, output_size);
|
| 277 |
+
}
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
template <typename scalar_t>
|
| 281 |
+
static inline scalar_t area_pixel_compute_source_index(
|
| 282 |
+
scalar_t scale,
|
| 283 |
+
int64_t dst_index,
|
| 284 |
+
bool align_corners,
|
| 285 |
+
bool cubic) {
|
| 286 |
+
if (align_corners) {
|
| 287 |
+
return scale * dst_index;
|
| 288 |
+
} else {
|
| 289 |
+
scalar_t src_idx = scale * (dst_index + static_cast<scalar_t>(0.5)) -
|
| 290 |
+
static_cast<scalar_t>(0.5);
|
| 291 |
+
// [Note] Follow Opencv resize logic:
|
| 292 |
+
// We allow negative src_idx here and later will use
|
| 293 |
+
// dx = src_idx - floorf(src_idx)
|
| 294 |
+
// to compute the "distance"(which affects weights).
|
| 295 |
+
// For linear modes, weight distribution doesn't matter
|
| 296 |
+
// for negative indices as they use 2 pixels to interpolate.
|
| 297 |
+
// For example, [-1, 0], they both use pixel 0 value so it
|
| 298 |
+
// doesn't affect if we bound the src_idx to 0 or not.
|
| 299 |
+
// TODO: Our current linear mode impls use unbound indices
|
| 300 |
+
// where we should and then remove this cubic flag.
|
| 301 |
+
// This matters in cubic mode, as we might need [-1, 0, 1, 2]
|
| 302 |
+
// to interpolate and the weights can be affected.
|
| 303 |
+
return (!cubic && src_idx < static_cast<scalar_t>(0)) ? scalar_t(0)
|
| 304 |
+
: src_idx;
|
| 305 |
+
}
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
static inline int64_t nearest_neighbor_compute_source_index(
|
| 309 |
+
const float scale,
|
| 310 |
+
int64_t dst_index,
|
| 311 |
+
int64_t input_size) {
|
| 312 |
+
// Index computation matching OpenCV INTER_NEAREST
|
| 313 |
+
// which is buggy and kept for BC
|
| 314 |
+
const int64_t src_index =
|
| 315 |
+
std::min(static_cast<int64_t>(floorf(dst_index * scale)), input_size - 1);
|
| 316 |
+
return src_index;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
static inline int64_t nearest_neighbor_exact_compute_source_index(
|
| 320 |
+
const float scale,
|
| 321 |
+
int64_t dst_index,
|
| 322 |
+
int64_t input_size) {
|
| 323 |
+
// index_f32 = (output_index + 0.5) * scale - 0.5
|
| 324 |
+
// input_index = round(index_f32)
|
| 325 |
+
// Same as Pillow and Scikit-Image/Scipy ndi.zoom
|
| 326 |
+
const int64_t src_index =
|
| 327 |
+
std::min(static_cast<int64_t>(floorf((dst_index + 0.5) * scale)), input_size - 1);
|
| 328 |
+
return src_index;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
static inline int64_t nearest_idx(
|
| 332 |
+
int64_t output_index,
|
| 333 |
+
int64_t input_size,
|
| 334 |
+
int64_t output_size,
|
| 335 |
+
c10::optional<double> scales) {
|
| 336 |
+
// This method specificly treats cases: output_size == input_size or
|
| 337 |
+
// output_size == 2 * input_size, that we would like to get rid of
|
| 338 |
+
// We keep this method for BC and consider as deprecated.
|
| 339 |
+
// See nearest_exact_idx as replacement
|
| 340 |
+
if (output_size == input_size) {
|
| 341 |
+
// scale_factor = 1, simply copy
|
| 342 |
+
return output_index;
|
| 343 |
+
} else if (output_size == 2 * input_size) {
|
| 344 |
+
// scale_factor = 2, shift input index
|
| 345 |
+
return output_index >> 1;
|
| 346 |
+
} else {
|
| 347 |
+
float scale = compute_scales_value<float>(scales, input_size, output_size);
|
| 348 |
+
return nearest_neighbor_compute_source_index(scale, output_index, input_size);
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
static inline int64_t nearest_exact_idx(
|
| 353 |
+
int64_t output_index,
|
| 354 |
+
int64_t input_size,
|
| 355 |
+
int64_t output_size,
|
| 356 |
+
c10::optional<double> scales) {
|
| 357 |
+
float scale = compute_scales_value<float>(scales, input_size, output_size);
|
| 358 |
+
return nearest_neighbor_exact_compute_source_index(scale, output_index, input_size);
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
// Define a typedef to dispatch to nearest_idx or nearest_exact_idx
|
| 362 |
+
typedef int64_t (*nearest_idx_fn_t)(int64_t, int64_t, int64_t, c10::optional<double>);
|
| 363 |
+
|
| 364 |
+
template <typename scalar_t>
|
| 365 |
+
static scalar_t upsample_get_value_bounded(
|
| 366 |
+
scalar_t* data,
|
| 367 |
+
int64_t width,
|
| 368 |
+
int64_t height,
|
| 369 |
+
int64_t x,
|
| 370 |
+
int64_t y) {
|
| 371 |
+
int64_t access_x = std::max(std::min(x, width - 1), static_cast<int64_t>(0));
|
| 372 |
+
int64_t access_y = std::max(std::min(y, height - 1), static_cast<int64_t>(0));
|
| 373 |
+
return data[access_y * width + access_x];
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
template <typename scalar_t>
|
| 377 |
+
static void upsample_increment_value_bounded(
|
| 378 |
+
scalar_t* data,
|
| 379 |
+
int64_t width,
|
| 380 |
+
int64_t height,
|
| 381 |
+
int64_t x,
|
| 382 |
+
int64_t y,
|
| 383 |
+
scalar_t value) {
|
| 384 |
+
int64_t access_x = std::max(std::min(x, width - 1), static_cast<int64_t>(0));
|
| 385 |
+
int64_t access_y = std::max(std::min(y, height - 1), static_cast<int64_t>(0));
|
| 386 |
+
data[access_y * width + access_x] += value;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
// Based on
|
| 390 |
+
// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
|
| 391 |
+
template <typename scalar_t>
|
| 392 |
+
static inline scalar_t cubic_convolution1(scalar_t x, scalar_t A) {
|
| 393 |
+
return ((A + 2) * x - (A + 3)) * x * x + 1;
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
template <typename scalar_t>
|
| 397 |
+
static inline scalar_t cubic_convolution2(scalar_t x, scalar_t A) {
|
| 398 |
+
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
template <typename scalar_t>
|
| 402 |
+
static inline void get_cubic_upsample_coefficients(
|
| 403 |
+
scalar_t coeffs[4],
|
| 404 |
+
scalar_t t) {
|
| 405 |
+
scalar_t A = -0.75;
|
| 406 |
+
|
| 407 |
+
scalar_t x1 = t;
|
| 408 |
+
coeffs[0] = cubic_convolution2<scalar_t>(x1 + 1.0, A);
|
| 409 |
+
coeffs[1] = cubic_convolution1<scalar_t>(x1, A);
|
| 410 |
+
|
| 411 |
+
// opposite coefficients
|
| 412 |
+
scalar_t x2 = 1.0 - t;
|
| 413 |
+
coeffs[2] = cubic_convolution1<scalar_t>(x2, A);
|
| 414 |
+
coeffs[3] = cubic_convolution2<scalar_t>(x2 + 1.0, A);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
template <typename scalar_t>
|
| 418 |
+
static inline scalar_t cubic_interp1d(
|
| 419 |
+
scalar_t x0,
|
| 420 |
+
scalar_t x1,
|
| 421 |
+
scalar_t x2,
|
| 422 |
+
scalar_t x3,
|
| 423 |
+
scalar_t t) {
|
| 424 |
+
scalar_t coeffs[4];
|
| 425 |
+
get_cubic_upsample_coefficients<scalar_t>(coeffs, t);
|
| 426 |
+
|
| 427 |
+
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
template<typename scalar_t>
|
| 431 |
+
static inline void compute_source_index_and_lambda(
|
| 432 |
+
int64_t& input_index0,
|
| 433 |
+
int64_t& input_index1,
|
| 434 |
+
scalar_t& lambda0,
|
| 435 |
+
scalar_t& lambda1,
|
| 436 |
+
scalar_t ratio,
|
| 437 |
+
int64_t output_index,
|
| 438 |
+
int64_t input_size,
|
| 439 |
+
int64_t output_size,
|
| 440 |
+
bool align_corners) {
|
| 441 |
+
if (output_size == input_size) {
|
| 442 |
+
// scale_factor = 1, simply copy
|
| 443 |
+
input_index0 = output_index;
|
| 444 |
+
input_index1 = output_index;
|
| 445 |
+
lambda0 = static_cast<scalar_t>(1);
|
| 446 |
+
lambda1 = static_cast<scalar_t>(0);
|
| 447 |
+
} else {
|
| 448 |
+
using opmath_t = at::opmath_type<scalar_t>;
|
| 449 |
+
const auto real_input_index =
|
| 450 |
+
area_pixel_compute_source_index<opmath_t>(
|
| 451 |
+
ratio, output_index, align_corners, /*cubic=*/false);
|
| 452 |
+
// when `real_input_index` becomes larger than the range the floating point
|
| 453 |
+
// type can accurately represent, the type casting to `int64_t` might exceed
|
| 454 |
+
// `input_size - 1`, causing overflow. So we guard it with `std::min` below.
|
| 455 |
+
input_index0 = std::min(static_cast<int64_t>(real_input_index), input_size - 1);
|
| 456 |
+
int64_t offset = (input_index0 < input_size - 1) ? 1 : 0;
|
| 457 |
+
input_index1 = input_index0 + offset;
|
| 458 |
+
lambda1 = std::min(
|
| 459 |
+
std::max(real_input_index - input_index0, static_cast<opmath_t>(0)),
|
| 460 |
+
static_cast<opmath_t>(1)
|
| 461 |
+
);
|
| 462 |
+
lambda0 = static_cast<scalar_t>(1.) - lambda1;
|
| 463 |
+
}
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
} // namespace native
|
| 467 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/batch_norm.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/native/DispatchStub.h>
|
| 5 |
+
|
| 6 |
+
namespace at {
|
| 7 |
+
|
| 8 |
+
namespace native {
|
| 9 |
+
|
| 10 |
+
using batch_norm_fn = void (*)(Tensor&, const Tensor&, const Tensor&,
|
| 11 |
+
const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
|
| 12 |
+
using batch_norm_collect_stats_fn = void (*)(Tensor&, Tensor&, const Tensor&);
|
| 13 |
+
using batch_norm_backward_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&,
|
| 14 |
+
const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
|
| 15 |
+
|
| 16 |
+
DECLARE_DISPATCH(batch_norm_fn, batch_norm_cpu_stub);
|
| 17 |
+
DECLARE_DISPATCH(batch_norm_collect_stats_fn, batch_norm_cpu_collect_stats_stub);
|
| 18 |
+
DECLARE_DISPATCH(batch_norm_backward_fn, batch_norm_cpu_backward_stub);
|
| 19 |
+
|
| 20 |
+
// TensorAccessor when it is defined to work around undefined...
|
| 21 |
+
template <typename scalar_t>
|
| 22 |
+
static TensorAccessor<scalar_t, 1> conditional_accessor_1d(const Tensor& t) {
|
| 23 |
+
if (! t.defined()) {
|
| 24 |
+
return TensorAccessor<scalar_t, 1>(nullptr, nullptr, nullptr);
|
| 25 |
+
}
|
| 26 |
+
return t.accessor<scalar_t, 1>();
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
template <typename scalar_t>
|
| 30 |
+
static scalar_t* conditional_data_ptr(const Tensor& t) {
|
| 31 |
+
return t.defined() ? t.contiguous().data_ptr<scalar_t>()
|
| 32 |
+
: nullptr;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
} // namespace native
|
| 36 |
+
|
| 37 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CatKernel.h
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/native/DispatchStub.h>
|
| 5 |
+
#include <ATen/core/IListRef.h>
|
| 6 |
+
|
| 7 |
+
namespace at { namespace native {
|
| 8 |
+
|
| 9 |
+
using cat_serial_fn = void(*)(const Tensor &, const MaterializedITensorListRef&, int64_t);
|
| 10 |
+
DECLARE_DISPATCH(cat_serial_fn, cat_serial_stub);
|
| 11 |
+
|
| 12 |
+
}} // namespace at::native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/CopyKernel.h
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace at {
|
| 4 |
+
struct TensorIteratorBase;
|
| 5 |
+
|
| 6 |
+
namespace native {
|
| 7 |
+
inline namespace CPU_CAPABILITY {
|
| 8 |
+
|
| 9 |
+
void direct_copy_kernel(TensorIteratorBase &iter);
|
| 10 |
+
void copy_kernel(TensorIterator& iter, bool /*non_blocking*/);
|
| 11 |
+
|
| 12 |
+
}}} // namespace at::native::CPU_CAPABILITY
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/IndexKernelUtils.h
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/native/TensorIterator.h>
|
| 3 |
+
#include <c10/util/irange.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
namespace native {
|
| 7 |
+
|
| 8 |
+
namespace {
|
| 9 |
+
static bool is_constant_index(int ntensor, const int64_t* strides) {
|
| 10 |
+
AT_ASSERT(ntensor >= 3);
|
| 11 |
+
for (const auto arg : c10::irange(2, ntensor)) {
|
| 12 |
+
if (strides[arg] != 0) {
|
| 13 |
+
return false;
|
| 14 |
+
}
|
| 15 |
+
}
|
| 16 |
+
return true;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
struct Indexer {
|
| 21 |
+
Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides,
|
| 22 |
+
IntArrayRef original_sizes, IntArrayRef original_strides)
|
| 23 |
+
: num_indexers(num_indexers)
|
| 24 |
+
, indexers(indexers)
|
| 25 |
+
, indexer_strides(indexer_strides)
|
| 26 |
+
, original_strides(original_strides.data())
|
| 27 |
+
, original_sizes(original_sizes.data()) {
|
| 28 |
+
AT_ASSERT(static_cast<int64_t>(original_strides.size()) == num_indexers);
|
| 29 |
+
AT_ASSERT(static_cast<int64_t>(original_sizes.size()) == num_indexers);
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
int64_t num_indexers;
|
| 33 |
+
char** indexers;
|
| 34 |
+
const int64_t* indexer_strides;
|
| 35 |
+
const int64_t* original_strides;
|
| 36 |
+
const int64_t* original_sizes;
|
| 37 |
+
|
| 38 |
+
int64_t get(int64_t idx) {
|
| 39 |
+
int64_t offset = 0;
|
| 40 |
+
for (const auto j : c10::irange(num_indexers)) {
|
| 41 |
+
int64_t value = *(int64_t*)&indexers[j][idx * indexer_strides[j]];
|
| 42 |
+
int64_t size = original_sizes[j];
|
| 43 |
+
TORCH_CHECK_INDEX(value >= -size && value < size,
|
| 44 |
+
"index ", value, " is out of bounds for dimension ", j, " with size ", size);
|
| 45 |
+
if (value < 0) {
|
| 46 |
+
value += size;
|
| 47 |
+
}
|
| 48 |
+
offset += value * original_strides[j];
|
| 49 |
+
}
|
| 50 |
+
return offset;
|
| 51 |
+
}
|
| 52 |
+
};
|
| 53 |
+
} // anonymous namespace
|
| 54 |
+
|
| 55 |
+
template <typename scalar_t, typename func_t>
|
| 56 |
+
void cpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride,
|
| 57 |
+
const func_t& f, bool serial_execution=false)
|
| 58 |
+
{
|
| 59 |
+
int ntensor = iter.ntensors();
|
| 60 |
+
// When launch the index parallel version, set a relative samll grain size less than the INTERNAL::GRAIN_SIZE
|
| 61 |
+
// to make the whole available thread numbers get more balanced work load and a better cache location.
|
| 62 |
+
// The grain size here is chosen by the op benchmark to overcome the thread launch overhead
|
| 63 |
+
const int index_parallel_grain_size = 3000;
|
| 64 |
+
auto loop = [&](char** data, const int64_t* strides, int64_t n) {
|
| 65 |
+
auto indexer = Indexer(ntensor - 2, &data[2], &strides[2], index_size, index_stride);
|
| 66 |
+
char* dst = data[0];
|
| 67 |
+
char* src = data[1];
|
| 68 |
+
if (is_constant_index(ntensor, strides)) {
|
| 69 |
+
// specialization for when every element uses the same index
|
| 70 |
+
int64_t offset = indexer.get(0);
|
| 71 |
+
if (strides[0] == sizeof(scalar_t) && strides[1] == sizeof(scalar_t)) {
|
| 72 |
+
for (const auto i : c10::irange(n)) {
|
| 73 |
+
f(dst + strides[0] * i, src + strides[1] * i, offset);
|
| 74 |
+
}
|
| 75 |
+
} else {
|
| 76 |
+
for (const auto i : c10::irange(n)) {
|
| 77 |
+
f(dst + strides[0] * i, src + strides[1] * i, offset);
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
} else {
|
| 81 |
+
for (const auto i : c10::irange(n)) {
|
| 82 |
+
int64_t offset = indexer.get(i);
|
| 83 |
+
f(dst + strides[0] * i, src + strides[1] * i, offset);
|
| 84 |
+
}
|
| 85 |
+
}
|
| 86 |
+
};
|
| 87 |
+
if (serial_execution) {
|
| 88 |
+
iter.serial_for_each(loop, {0, iter.numel()});
|
| 89 |
+
} else {
|
| 90 |
+
iter.for_each(loop, index_parallel_grain_size);
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
} // at
|
| 94 |
+
} // native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/MaxUnpoolKernel.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/native/DispatchStub.h>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
class Tensor;
|
| 6 |
+
|
| 7 |
+
namespace native {
|
| 8 |
+
|
| 9 |
+
using max_unpooling_fn = void(*)(Tensor&, const Tensor&, const Tensor&);
|
| 10 |
+
|
| 11 |
+
DECLARE_DISPATCH(max_unpooling_fn, max_unpool2d_kernel);
|
| 12 |
+
DECLARE_DISPATCH(max_unpooling_fn, max_unpool3d_kernel);
|
| 13 |
+
|
| 14 |
+
}} // at::native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/PixelShuffleKernel.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/native/DispatchStub.h>
|
| 3 |
+
|
| 4 |
+
namespace at {
|
| 5 |
+
class TensorBase;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
namespace at { namespace native {
|
| 9 |
+
|
| 10 |
+
using pixel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t);
|
| 11 |
+
DECLARE_DISPATCH(pixel_shuffle_fn, pixel_shuffle_kernel);
|
| 12 |
+
DECLARE_DISPATCH(pixel_shuffle_fn, pixel_unshuffle_kernel);
|
| 13 |
+
|
| 14 |
+
}} // at::native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/ReduceUtils.h
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Parallel.h>
|
| 4 |
+
#include <ATen/NumericUtils.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec.h>
|
| 6 |
+
#include <ATen/cpu/vec/functional.h>
|
| 7 |
+
#include <ATen/native/ReductionType.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
namespace at::native {
|
| 11 |
+
inline namespace CPU_CAPABILITY {
|
| 12 |
+
|
| 13 |
+
using namespace vec;
|
| 14 |
+
|
| 15 |
+
#define AT_DISPATCH_REDUCTION_TYPES(op, ...) \
|
| 16 |
+
[&] { \
|
| 17 |
+
switch (op) { \
|
| 18 |
+
case SUM: { \
|
| 19 |
+
static constexpr ReductionType reduce = SUM; \
|
| 20 |
+
return __VA_ARGS__(); \
|
| 21 |
+
} \
|
| 22 |
+
case MEAN: { \
|
| 23 |
+
static constexpr ReductionType reduce = MEAN; \
|
| 24 |
+
return __VA_ARGS__(); \
|
| 25 |
+
} \
|
| 26 |
+
case MIN: { \
|
| 27 |
+
static constexpr ReductionType reduce = MIN; \
|
| 28 |
+
return __VA_ARGS__(); \
|
| 29 |
+
} \
|
| 30 |
+
case MAX: { \
|
| 31 |
+
static constexpr ReductionType reduce = MAX; \
|
| 32 |
+
return __VA_ARGS__(); \
|
| 33 |
+
} \
|
| 34 |
+
case PROD: { \
|
| 35 |
+
static constexpr ReductionType reduce = PROD; \
|
| 36 |
+
return __VA_ARGS__(); \
|
| 37 |
+
} \
|
| 38 |
+
} \
|
| 39 |
+
}()
|
| 40 |
+
|
| 41 |
+
template <typename scalar_t, ReductionType reduce>
|
| 42 |
+
inline vec_scalar_t<scalar_t> init_value() {
|
| 43 |
+
using acc_t = vec_scalar_t<scalar_t>;
|
| 44 |
+
acc_t val;
|
| 45 |
+
if (reduce == ReductionType::SUM ||
|
| 46 |
+
reduce == ReductionType::MEAN) {
|
| 47 |
+
val = static_cast<acc_t>(0);
|
| 48 |
+
} else if (reduce == ReductionType::PROD) {
|
| 49 |
+
val = static_cast<acc_t>(1);
|
| 50 |
+
} else if (reduce == ReductionType::MAX) {
|
| 51 |
+
val = -std::numeric_limits<acc_t>::infinity();
|
| 52 |
+
} else {
|
| 53 |
+
TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
|
| 54 |
+
val = std::numeric_limits<acc_t>::infinity();
|
| 55 |
+
}
|
| 56 |
+
return val;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template <typename scalar_t, ReductionType reduce>
|
| 60 |
+
inline vec_scalar_t<scalar_t> init_value(const c10::optional<Scalar>& initial) {
|
| 61 |
+
using acc_t = vec_scalar_t<scalar_t>;
|
| 62 |
+
if (initial.has_value()) {
|
| 63 |
+
return initial.value().to<acc_t>();
|
| 64 |
+
} else {
|
| 65 |
+
return init_value<scalar_t, reduce>();
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
template <typename scalar_t>
|
| 70 |
+
inline void init(scalar_t* out, int64_t size, const vec_scalar_t<scalar_t>& val) {
|
| 71 |
+
using Vec = Vectorized<vec_scalar_t<scalar_t>>;
|
| 72 |
+
map<scalar_t>(
|
| 73 |
+
[val](Vec x) { return Vec(val); },
|
| 74 |
+
out,
|
| 75 |
+
out,
|
| 76 |
+
size);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
template <typename scalar_t, ReductionType reduce>
|
| 80 |
+
inline void init(scalar_t* out, int64_t size, const c10::optional<Scalar>& initial) {
|
| 81 |
+
using acc_t = vec_scalar_t<scalar_t>;
|
| 82 |
+
acc_t val = init_value<scalar_t, reduce>(initial);
|
| 83 |
+
init(out, size, val);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
// overload with `include_self`, used by scatter_reduce
|
| 87 |
+
template <typename scalar_t, ReductionType reduce>
|
| 88 |
+
inline void init(scalar_t* out, int64_t size, bool include_self = false) {
|
| 89 |
+
using acc_t = vec_scalar_t<scalar_t>;
|
| 90 |
+
if (!include_self) {
|
| 91 |
+
acc_t val = init_value<scalar_t, reduce>();
|
| 92 |
+
init(out, size, val);
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template <typename scalar_t>
|
| 97 |
+
inline scalar_t _max(const scalar_t& x, const scalar_t& y) {
|
| 98 |
+
return at::_isnan(y) ? y : std::max(x, y);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
template <typename scalar_t>
|
| 102 |
+
inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
|
| 103 |
+
// vec::maximum propagates NaN
|
| 104 |
+
return vec::maximum(x, y);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
template <typename scalar_t>
|
| 108 |
+
inline scalar_t _min(const scalar_t& x, const scalar_t& y) {
|
| 109 |
+
return at::_isnan(y) ? y : std::min(x, y);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template <typename scalar_t>
|
| 113 |
+
inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
|
| 114 |
+
// vec::minimum propagates NaN
|
| 115 |
+
return vec::minimum(x, y);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
// for Max and Min, propagate NaN:
|
| 119 |
+
template <typename T, ReductionType reduce>
|
| 120 |
+
inline T update(const T& x, const T& y) {
|
| 121 |
+
if (reduce == ReductionType::SUM ||
|
| 122 |
+
reduce == ReductionType::MEAN) {
|
| 123 |
+
return x + y;
|
| 124 |
+
} else if (reduce == ReductionType::PROD) {
|
| 125 |
+
return x * y;
|
| 126 |
+
} else if (reduce == ReductionType::MAX) {
|
| 127 |
+
return _max(x, y);
|
| 128 |
+
} else {
|
| 129 |
+
TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
|
| 130 |
+
return _min(x, y);
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
template <typename scalar_t, ReductionType reduce>
|
| 135 |
+
inline void update(scalar_t* out, scalar_t* data, int64_t K) {
|
| 136 |
+
using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
|
| 137 |
+
map2<scalar_t>(
|
| 138 |
+
[](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
|
| 139 |
+
out,
|
| 140 |
+
out,
|
| 141 |
+
data,
|
| 142 |
+
K);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <typename scalar_t, ReductionType reduce>
|
| 146 |
+
inline void write(scalar_t* out, int64_t count, int64_t K) {
|
| 147 |
+
using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
|
| 148 |
+
if (reduce == ReductionType::MEAN) {
|
| 149 |
+
if (count > 0) {
|
| 150 |
+
vec::map<scalar_t>(
|
| 151 |
+
[count](Vec x) { return x / Vec(count); },
|
| 152 |
+
out,
|
| 153 |
+
out,
|
| 154 |
+
K);
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
} // namespace CPU_CAPABILITY
|
| 160 |
+
} // namespace at::native
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/UpSampleKernelAVXAntialias.h
ADDED
|
@@ -0,0 +1,719 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
The Python Imaging Library (PIL) is
|
| 3 |
+
|
| 4 |
+
Copyright © 1997-2011 by Secret Labs AB
|
| 5 |
+
Copyright © 1995-2011 by Fredrik Lundh
|
| 6 |
+
|
| 7 |
+
Pillow is the friendly PIL fork. It is
|
| 8 |
+
|
| 9 |
+
Copyright © 2010-2022 by Alex Clark and contributors
|
| 10 |
+
|
| 11 |
+
Like PIL, Pillow is licensed under the open source HPND License
|
| 12 |
+
*/
|
| 13 |
+
|
| 14 |
+
// This code is heavily inspired from PILLOW-SIMD's implementation:
|
| 15 |
+
// https://github.com/uploadcare/pillow-simd/blob/simd/master/src/libImaging/Resample.c
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 19 |
+
// TODO: This file only supports AVX2. We could split the AVX kernels into
|
| 20 |
+
// smaller logical blocks in order to port them into the Vec.h logic. This would
|
| 21 |
+
// allow to support other vectorization architectures and perhaps also support
|
| 22 |
+
// the non-vectorized fallback (we'd need to make sure it's not slower than the
|
| 23 |
+
// current fallback).
|
| 24 |
+
|
| 25 |
+
#include <ATen/core/Tensor.h>
|
| 26 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 27 |
+
#include <c10/util/irange.h>
|
| 28 |
+
|
| 29 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 30 |
+
#include <ATen/Functions.h>
|
| 31 |
+
#else
|
| 32 |
+
#include <ATen/ops/empty.h>
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
namespace {
|
| 37 |
+
|
| 38 |
+
static __m128i inline mm_cvtepu8_epi32(const uint32_t* C10_RESTRICT ptr) {
|
| 39 |
+
return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(*(int32_t*)ptr));
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// TODO: We may want to hard-code an unrolled version for the case where
|
| 43 |
+
// num_channels=3 to hint the compiler to vectorize this (looks at original
|
| 44 |
+
// PIL-SIMD's code).
|
| 45 |
+
at::Tensor unpack_rgb(const at::Tensor& packed_tensor) {
|
| 46 |
+
// Convert a "packed" tensor (typically RGBRGBRGB if channels_last) into
|
| 47 |
+
// RGBARGBARGBA format where A is hard-coded to 255. Each pixel is encoded
|
| 48 |
+
// into as 32bits. This generalizes to num_channels <= 4 and also works for
|
| 49 |
+
// non-channels_last tensors.
|
| 50 |
+
|
| 51 |
+
const uint8_t* packed = (const uint8_t*)packed_tensor.data_ptr<uint8_t>();
|
| 52 |
+
auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
|
| 53 |
+
auto num_channels = packed_tensor.size(0);
|
| 54 |
+
|
| 55 |
+
constexpr int rgba_size = 4;
|
| 56 |
+
auto unpacked_tensor = at::empty({rgba_size, packed_tensor.size(1), packed_tensor.size(2)}, at::CPU(at::kByte));
|
| 57 |
+
uint8_t* unpacked = (uint8_t*) unpacked_tensor.data_ptr<uint8_t>();
|
| 58 |
+
|
| 59 |
+
auto stride_i = packed_tensor.stride(2);
|
| 60 |
+
auto stride_j = packed_tensor.stride(0);
|
| 61 |
+
|
| 62 |
+
for (const auto i : c10::irange(num_pixels)) {
|
| 63 |
+
for (const auto j : c10::irange(rgba_size)) {
|
| 64 |
+
unpacked[rgba_size * i + j] = (j < num_channels) ? packed[stride_i * i + stride_j * j] : 0;
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
return unpacked_tensor;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
void pack_rgb(
|
| 71 |
+
const at::Tensor& unpacked_tensor, // IN
|
| 72 |
+
const at::Tensor& packed_tensor // OUT
|
| 73 |
+
) {
|
| 74 |
+
constexpr int rgba_size = 4;
|
| 75 |
+
uint8_t* unpacked = (uint8_t*)unpacked_tensor.data_ptr<uint8_t>();
|
| 76 |
+
uint8_t* packed = (uint8_t*)packed_tensor.data_ptr<uint8_t>();
|
| 77 |
+
auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
|
| 78 |
+
auto num_channels = packed_tensor.size(0);
|
| 79 |
+
|
| 80 |
+
auto packed_increment = packed_tensor.stride(2);
|
| 81 |
+
auto packed_stride = packed_tensor.stride(0);
|
| 82 |
+
|
| 83 |
+
for (const auto i C10_UNUSED : c10::irange(num_pixels)) {
|
| 84 |
+
for (const auto j : c10::irange(num_channels)) {
|
| 85 |
+
packed[j * packed_stride] = unpacked[j];
|
| 86 |
+
}
|
| 87 |
+
unpacked += rgba_size;
|
| 88 |
+
packed += packed_increment;
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
void ImagingResampleHorizontalConvolution8u4x(
|
| 93 |
+
uint32_t* C10_RESTRICT lineOut0,
|
| 94 |
+
uint32_t* C10_RESTRICT lineOut1,
|
| 95 |
+
uint32_t* C10_RESTRICT lineOut2,
|
| 96 |
+
uint32_t* C10_RESTRICT lineOut3,
|
| 97 |
+
const uint32_t* C10_RESTRICT lineIn0,
|
| 98 |
+
const uint32_t* C10_RESTRICT lineIn1,
|
| 99 |
+
const uint32_t* C10_RESTRICT lineIn2,
|
| 100 |
+
const uint32_t* C10_RESTRICT lineIn3,
|
| 101 |
+
int xsize,
|
| 102 |
+
int* xbounds,
|
| 103 |
+
int16_t* kk,
|
| 104 |
+
int kmax,
|
| 105 |
+
int coefs_precision);
|
| 106 |
+
|
| 107 |
+
void ImagingResampleHorizontalConvolution8u(
|
| 108 |
+
uint32_t* C10_RESTRICT lineOut,
|
| 109 |
+
const uint32_t* C10_RESTRICT lineIn,
|
| 110 |
+
int xsize,
|
| 111 |
+
int* xbounds,
|
| 112 |
+
int16_t* kk,
|
| 113 |
+
int kmax,
|
| 114 |
+
int coefs_precision);
|
| 115 |
+
|
| 116 |
+
void ImagingResampleVerticalConvolution8u(
|
| 117 |
+
uint32_t* C10_RESTRICT lineOut,
|
| 118 |
+
const uint32_t* C10_RESTRICT imIn,
|
| 119 |
+
int xmin,
|
| 120 |
+
int xmax,
|
| 121 |
+
int16_t* k,
|
| 122 |
+
int coefs_precision,
|
| 123 |
+
int xin);
|
| 124 |
+
|
| 125 |
+
void ImagingResampleHorizontal(
|
| 126 |
+
const at::Tensor & unpacked_output,
|
| 127 |
+
const at::Tensor & unpacked_input,
|
| 128 |
+
int ksize,
|
| 129 |
+
const std::vector<at::Tensor>& horiz_indices_weights,
|
| 130 |
+
unsigned int horiz_weights_precision) {
|
| 131 |
+
// TODO: we may want to merge that into the fallback code (currently called
|
| 132 |
+
// basic_loop_aa_horizontal<uint8_t>)
|
| 133 |
+
// Although this may not be needed if / when we port all this code to use
|
| 134 |
+
// Vec.h since this would potentially give us another fall-back implem
|
| 135 |
+
int yy;
|
| 136 |
+
|
| 137 |
+
int16_t* kk = (int16_t*)(horiz_indices_weights[3].data_ptr<double>());
|
| 138 |
+
|
| 139 |
+
auto xout = unpacked_output.size(2);
|
| 140 |
+
auto yout = unpacked_output.size(1);
|
| 141 |
+
auto xin = unpacked_input.size(2);
|
| 142 |
+
|
| 143 |
+
std::vector<int> bounds_vec(2 * xout, 0);
|
| 144 |
+
int* bounds = bounds_vec.data();
|
| 145 |
+
|
| 146 |
+
int64_t* idx_ptr_xmin = horiz_indices_weights[0].data_ptr<int64_t>();
|
| 147 |
+
int64_t* idx_ptr_size = horiz_indices_weights[1].data_ptr<int64_t>();
|
| 148 |
+
for (int i = 0; i < xout; i++) {
|
| 149 |
+
bounds[2 * i + 0] = idx_ptr_xmin[i];
|
| 150 |
+
bounds[2 * i + 1] = idx_ptr_size[i];
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
uint32_t* unpacked_input_p = (uint32_t*) unpacked_input.data_ptr<uint8_t>();
|
| 154 |
+
uint32_t* unpacked_output_p = (uint32_t*) unpacked_output.data_ptr<uint8_t>();
|
| 155 |
+
|
| 156 |
+
yy = 0;
|
| 157 |
+
for (; yy < yout - 3; yy += 4) {
|
| 158 |
+
ImagingResampleHorizontalConvolution8u4x(
|
| 159 |
+
unpacked_output_p + yy * xout,
|
| 160 |
+
unpacked_output_p + (yy + 1) * xout,
|
| 161 |
+
unpacked_output_p + (yy + 2) * xout,
|
| 162 |
+
unpacked_output_p + (yy + 3) * xout,
|
| 163 |
+
unpacked_input_p + yy * xin,
|
| 164 |
+
unpacked_input_p + (yy + 1) * xin,
|
| 165 |
+
unpacked_input_p + (yy + 2) * xin,
|
| 166 |
+
unpacked_input_p + (yy + 3) * xin,
|
| 167 |
+
xout,
|
| 168 |
+
bounds,
|
| 169 |
+
kk,
|
| 170 |
+
ksize,
|
| 171 |
+
(int)horiz_weights_precision);
|
| 172 |
+
}
|
| 173 |
+
for (; yy < yout; yy++) {
|
| 174 |
+
ImagingResampleHorizontalConvolution8u(
|
| 175 |
+
unpacked_output_p + yy * xout,
|
| 176 |
+
unpacked_input_p + yy * xin,
|
| 177 |
+
xout,
|
| 178 |
+
bounds,
|
| 179 |
+
kk,
|
| 180 |
+
ksize,
|
| 181 |
+
(int)horiz_weights_precision);
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
void ImagingResampleVertical(
|
| 186 |
+
const at::Tensor & unpacked_output,
|
| 187 |
+
const at::Tensor & unpacked_input,
|
| 188 |
+
int ksize,
|
| 189 |
+
const std::vector<at::Tensor>& vert_indices_weights,
|
| 190 |
+
unsigned int vert_weights_precision) {
|
| 191 |
+
// TODO: we may want to merge that into the fallback code (currently called
|
| 192 |
+
// basic_loop_aa_vertical<uint8_t>)
|
| 193 |
+
// Although this may not be needed if / when we port all this code to use
|
| 194 |
+
// Vec.h since this would potentially give us another fall-back implem
|
| 195 |
+
int ymin, ymax;
|
| 196 |
+
int16_t* k = nullptr;
|
| 197 |
+
int16_t* kk = (int16_t*)(vert_indices_weights[3].data_ptr<double>());
|
| 198 |
+
|
| 199 |
+
int64_t* idx_ptr_xmin = vert_indices_weights[0].data_ptr<int64_t>();
|
| 200 |
+
int64_t* idx_ptr_size = vert_indices_weights[1].data_ptr<int64_t>();
|
| 201 |
+
|
| 202 |
+
uint32_t* unpacked_output_p = (uint32_t*) unpacked_output.data_ptr<uint8_t>();
|
| 203 |
+
uint32_t* unpacked_input_p = (uint32_t*) unpacked_input.data_ptr<uint8_t>();
|
| 204 |
+
|
| 205 |
+
auto xout = unpacked_output.size(2);
|
| 206 |
+
auto yout = unpacked_output.size(1);
|
| 207 |
+
|
| 208 |
+
for (const auto yy : c10::irange(yout)) {
|
| 209 |
+
k = &kk[yy * ksize];
|
| 210 |
+
|
| 211 |
+
ymin = idx_ptr_xmin[yy];
|
| 212 |
+
ymax = idx_ptr_size[yy];
|
| 213 |
+
ImagingResampleVerticalConvolution8u(
|
| 214 |
+
unpacked_output_p + yy * xout,
|
| 215 |
+
unpacked_input_p,
|
| 216 |
+
ymin,
|
| 217 |
+
ymax,
|
| 218 |
+
k,
|
| 219 |
+
(int)vert_weights_precision,
|
| 220 |
+
xout);
|
| 221 |
+
}
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
// This is the only public entry point in this file. It supports bilinear
|
| 225 |
+
// mode for uint8 dtype when C <= 4, with or without antialias. The
|
| 226 |
+
// implem is based on PIL-SIMD.
|
| 227 |
+
// Its equivalent implementation (fallback) for when AVX isn't supported or when
|
| 228 |
+
// C > 4 is separable_upsample_generic_Nd_kernel_impl() There are a bunch of
|
| 229 |
+
// future improvement that can be done: look for the TODOs in this file.
|
| 230 |
+
// For details on how the weights are computed and how the multiplications are
|
| 231 |
+
// run on int (instead of float weights), see
|
| 232 |
+
// [ Weights computation for uint8_t and multiplication trick ]
|
| 233 |
+
// For details on how the AVX kernels are implemented, see
|
| 234 |
+
// https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
|
| 235 |
+
// See also [ Support for antialias=False as a subcase of antilias=True ] to
|
| 236 |
+
// learn more about how the antialias=False case is computed. The same holds
|
| 237 |
+
// here: all these kernels are general enough to handle an arbitrary number of
|
| 238 |
+
// weights, but when aa=False they could be optimized further.
|
| 239 |
+
template <typename scale_type, class F>
|
| 240 |
+
void upsample_avx_bilinear_uint8(
|
| 241 |
+
const at::Tensor& input,
|
| 242 |
+
const at::Tensor& output,
|
| 243 |
+
bool align_corners,
|
| 244 |
+
const scale_type& scales,
|
| 245 |
+
bool antialias) {
|
| 246 |
+
auto batch_size = input.size(0);
|
| 247 |
+
auto num_channels = input.size(1);
|
| 248 |
+
auto xin = input.size(3);
|
| 249 |
+
auto yin = input.size(2);
|
| 250 |
+
auto xout = output.size(3);
|
| 251 |
+
auto yout = output.size(2);
|
| 252 |
+
|
| 253 |
+
if (xin == xout && yin == yout) {
|
| 254 |
+
output.copy_(input);
|
| 255 |
+
return;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
auto need_horizontal = xout != xin;
|
| 259 |
+
auto need_vertical = yout != yin;
|
| 260 |
+
|
| 261 |
+
int ksize_horiz, ksize_vert;
|
| 262 |
+
std::vector<at::Tensor> horiz_indices_weights, vert_indices_weights;
|
| 263 |
+
unsigned int horiz_weights_precision, vert_weights_precision;
|
| 264 |
+
|
| 265 |
+
if (need_horizontal) {
|
| 266 |
+
int interp_dim = 3;
|
| 267 |
+
std::tie(horiz_indices_weights, ksize_horiz, horiz_weights_precision) =
|
| 268 |
+
F::compute_indices_int16_weights_aa(
|
| 269 |
+
/*input_size=*/xin,
|
| 270 |
+
/*output_size=*/xout,
|
| 271 |
+
/*stride=*/1,
|
| 272 |
+
/*ndims=*/4,
|
| 273 |
+
/*reshape_dim=*/interp_dim,
|
| 274 |
+
/*align_corners=*/align_corners,
|
| 275 |
+
/*opt_scale=*/scales[interp_dim - 2],
|
| 276 |
+
/*antialias=*/antialias,
|
| 277 |
+
/*align_i32=*/true);
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
if (need_vertical) {
|
| 281 |
+
int interp_dim = 2;
|
| 282 |
+
std::tie(vert_indices_weights, ksize_vert, vert_weights_precision) =
|
| 283 |
+
F::compute_indices_int16_weights_aa(
|
| 284 |
+
/*input_size=*/yin,
|
| 285 |
+
/*output_size=*/yout,
|
| 286 |
+
/*stride=*/1,
|
| 287 |
+
/*ndims=*/4,
|
| 288 |
+
/*reshape_dim=*/interp_dim,
|
| 289 |
+
/*align_corners=*/align_corners,
|
| 290 |
+
/*opt_scale=*/scales[interp_dim - 2],
|
| 291 |
+
/*antialias=*/antialias,
|
| 292 |
+
/*align_i32=*/true);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
bool is_rgba = num_channels == 4 && input.is_contiguous(at::MemoryFormat::ChannelsLast);
|
| 296 |
+
|
| 297 |
+
at::Tensor buffer_horiz, buffer_vert;
|
| 298 |
+
if (need_horizontal && !(is_rgba && !need_vertical)) {
|
| 299 |
+
buffer_horiz = at::empty({4, yin, xout}, input.options());
|
| 300 |
+
}
|
| 301 |
+
if (need_vertical && !is_rgba) {
|
| 302 |
+
buffer_vert = at::empty({4, yout, xout}, input.options());
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
// TODO: The unpack / pack operations create a copy of the original input and
|
| 306 |
+
// output tensor. There should be a way to avoid these copies by instead
|
| 307 |
+
// modifying the low-level kernels. Or maybe at least avoid copying the entire
|
| 308 |
+
// tensors and just copy part of them (line by line).
|
| 309 |
+
for (const auto i : c10::irange(batch_size)) {
|
| 310 |
+
|
| 311 |
+
at::Tensor unpacked_input = (is_rgba) ? input[i] : unpack_rgb(input[i]);
|
| 312 |
+
at::Tensor unpacked_output;
|
| 313 |
+
|
| 314 |
+
if (need_horizontal) {
|
| 315 |
+
|
| 316 |
+
at::Tensor unpacked_output_temp = (is_rgba && !need_vertical) ? output[i] : buffer_horiz;
|
| 317 |
+
|
| 318 |
+
ImagingResampleHorizontal(
|
| 319 |
+
unpacked_output_temp,
|
| 320 |
+
unpacked_input,
|
| 321 |
+
ksize_horiz,
|
| 322 |
+
horiz_indices_weights,
|
| 323 |
+
horiz_weights_precision);
|
| 324 |
+
unpacked_output = unpacked_input = unpacked_output_temp;
|
| 325 |
+
}
|
| 326 |
+
if (need_vertical) {
|
| 327 |
+
unpacked_output = (is_rgba) ? output[i] : buffer_vert;
|
| 328 |
+
|
| 329 |
+
ImagingResampleVertical(
|
| 330 |
+
unpacked_output,
|
| 331 |
+
unpacked_input,
|
| 332 |
+
ksize_vert,
|
| 333 |
+
vert_indices_weights,
|
| 334 |
+
vert_weights_precision);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
TORCH_INTERNAL_ASSERT(unpacked_output.defined());
|
| 338 |
+
|
| 339 |
+
if (!is_rgba) {
|
| 340 |
+
pack_rgb(unpacked_output, output[i]);
|
| 341 |
+
}
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
// https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
|
| 346 |
+
void ImagingResampleHorizontalConvolution8u4x(
|
| 347 |
+
uint32_t* C10_RESTRICT lineOut0,
|
| 348 |
+
uint32_t* C10_RESTRICT lineOut1,
|
| 349 |
+
uint32_t* C10_RESTRICT lineOut2,
|
| 350 |
+
uint32_t* C10_RESTRICT lineOut3,
|
| 351 |
+
const uint32_t* C10_RESTRICT lineIn0,
|
| 352 |
+
const uint32_t* C10_RESTRICT lineIn1,
|
| 353 |
+
const uint32_t* C10_RESTRICT lineIn2,
|
| 354 |
+
const uint32_t* C10_RESTRICT lineIn3,
|
| 355 |
+
int xsize,
|
| 356 |
+
int* xbounds,
|
| 357 |
+
int16_t* kk,
|
| 358 |
+
int kmax,
|
| 359 |
+
int coefs_precision) {
|
| 360 |
+
int xmin, xmax, x;
|
| 361 |
+
int16_t* k;
|
| 362 |
+
|
| 363 |
+
for (const auto xx : c10::irange(xsize)) {
|
| 364 |
+
xmin = xbounds[xx * 2 + 0];
|
| 365 |
+
xmax = xbounds[xx * 2 + 1];
|
| 366 |
+
k = &kk[xx * kmax];
|
| 367 |
+
x = 0;
|
| 368 |
+
|
| 369 |
+
__m256i sss0, sss1;
|
| 370 |
+
__m256i zero = _mm256_setzero_si256();
|
| 371 |
+
__m256i initial = _mm256_set1_epi32(1 << (coefs_precision - 1));
|
| 372 |
+
sss0 = initial;
|
| 373 |
+
sss1 = initial;
|
| 374 |
+
|
| 375 |
+
for (; x < xmax - 3; x += 4) {
|
| 376 |
+
__m256i pix, mmk0, mmk1, source;
|
| 377 |
+
|
| 378 |
+
mmk0 = _mm256_set1_epi32(*(int32_t*)&k[x]);
|
| 379 |
+
mmk1 = _mm256_set1_epi32(*(int32_t*)&k[x + 2]);
|
| 380 |
+
|
| 381 |
+
source = _mm256_inserti128_si256(
|
| 382 |
+
_mm256_castsi128_si256(_mm_loadu_si128((__m128i*)&lineIn0[x + xmin])),
|
| 383 |
+
_mm_loadu_si128((__m128i*)&lineIn1[x + xmin]),
|
| 384 |
+
1);
|
| 385 |
+
// clang-format off
|
| 386 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 387 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0,
|
| 388 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 389 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk0));
|
| 390 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 391 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8,
|
| 392 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8));
|
| 393 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk1));
|
| 394 |
+
|
| 395 |
+
source = _mm256_inserti128_si256(
|
| 396 |
+
_mm256_castsi128_si256(_mm_loadu_si128((__m128i*)&lineIn2[x + xmin])),
|
| 397 |
+
_mm_loadu_si128((__m128i*)&lineIn3[x + xmin]),
|
| 398 |
+
1);
|
| 399 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 400 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0,
|
| 401 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 402 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk0));
|
| 403 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 404 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8,
|
| 405 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8));
|
| 406 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk1));
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
for (; x < xmax - 1; x += 2) {
|
| 410 |
+
__m256i pix, mmk;
|
| 411 |
+
|
| 412 |
+
mmk = _mm256_set1_epi32(*(int32_t*)&k[x]);
|
| 413 |
+
|
| 414 |
+
pix = _mm256_inserti128_si256(
|
| 415 |
+
_mm256_castsi128_si256(_mm_loadl_epi64((__m128i*)&lineIn0[x + xmin])),
|
| 416 |
+
_mm_loadl_epi64((__m128i*)&lineIn1[x + xmin]),
|
| 417 |
+
1);
|
| 418 |
+
pix = _mm256_shuffle_epi8(pix, _mm256_set_epi8(
|
| 419 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0,
|
| 420 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 421 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
|
| 422 |
+
|
| 423 |
+
pix = _mm256_inserti128_si256(
|
| 424 |
+
_mm256_castsi128_si256(_mm_loadl_epi64((__m128i*)&lineIn2[x + xmin])),
|
| 425 |
+
_mm_loadl_epi64((__m128i*)&lineIn3[x + xmin]),
|
| 426 |
+
1);
|
| 427 |
+
pix = _mm256_shuffle_epi8(pix, _mm256_set_epi8(
|
| 428 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0,
|
| 429 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 430 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
|
| 431 |
+
// clang-format on
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
for (; x < xmax; x++) {
|
| 435 |
+
__m256i pix, mmk;
|
| 436 |
+
|
| 437 |
+
// [16] xx k0 xx k0 xx k0 xx k0 xx k0 xx k0 xx k0 xx k0
|
| 438 |
+
mmk = _mm256_set1_epi32(k[x]);
|
| 439 |
+
|
| 440 |
+
// [16] xx a0 xx b0 xx g0 xx r0 xx a0 xx b0 xx g0 xx r0
|
| 441 |
+
pix = _mm256_inserti128_si256(
|
| 442 |
+
_mm256_castsi128_si256(mm_cvtepu8_epi32(&lineIn0[x + xmin])),
|
| 443 |
+
mm_cvtepu8_epi32(&lineIn1[x + xmin]),
|
| 444 |
+
1);
|
| 445 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
|
| 446 |
+
|
| 447 |
+
pix = _mm256_inserti128_si256(
|
| 448 |
+
_mm256_castsi128_si256(mm_cvtepu8_epi32(&lineIn2[x + xmin])),
|
| 449 |
+
mm_cvtepu8_epi32(&lineIn3[x + xmin]),
|
| 450 |
+
1);
|
| 451 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
sss0 = _mm256_srai_epi32(sss0, coefs_precision);
|
| 455 |
+
sss1 = _mm256_srai_epi32(sss1, coefs_precision);
|
| 456 |
+
sss0 = _mm256_packs_epi32(sss0, zero);
|
| 457 |
+
sss1 = _mm256_packs_epi32(sss1, zero);
|
| 458 |
+
sss0 = _mm256_packus_epi16(sss0, zero);
|
| 459 |
+
sss1 = _mm256_packus_epi16(sss1, zero);
|
| 460 |
+
lineOut0[xx] = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss0, 0));
|
| 461 |
+
lineOut1[xx] = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss0, 1));
|
| 462 |
+
lineOut2[xx] = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss1, 0));
|
| 463 |
+
lineOut3[xx] = _mm_cvtsi128_si32(_mm256_extracti128_si256(sss1, 1));
|
| 464 |
+
}
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
// https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
|
| 468 |
+
void ImagingResampleHorizontalConvolution8u(
|
| 469 |
+
uint32_t* C10_RESTRICT lineOut,
|
| 470 |
+
const uint32_t* C10_RESTRICT lineIn,
|
| 471 |
+
int xsize,
|
| 472 |
+
int* xbounds,
|
| 473 |
+
int16_t* kk,
|
| 474 |
+
int kmax,
|
| 475 |
+
int coefs_precision) {
|
| 476 |
+
int xmin, xmax, x;
|
| 477 |
+
int16_t* k;
|
| 478 |
+
|
| 479 |
+
for (const auto xx : c10::irange(xsize)) {
|
| 480 |
+
__m128i sss;
|
| 481 |
+
xmin = xbounds[xx * 2 + 0];
|
| 482 |
+
xmax = xbounds[xx * 2 + 1];
|
| 483 |
+
k = &kk[xx * kmax];
|
| 484 |
+
x = 0;
|
| 485 |
+
|
| 486 |
+
if (xmax < 8) {
|
| 487 |
+
sss = _mm_set1_epi32(1 << (coefs_precision - 1));
|
| 488 |
+
} else {
|
| 489 |
+
// Lower part will be added to higher, use only half of the error
|
| 490 |
+
__m256i sss256 = _mm256_set1_epi32(1 << (coefs_precision - 2));
|
| 491 |
+
|
| 492 |
+
for (; x < xmax - 7; x += 8) {
|
| 493 |
+
__m256i pix, mmk, source;
|
| 494 |
+
__m128i tmp = _mm_loadu_si128((__m128i*)&k[x]);
|
| 495 |
+
__m256i ksource =
|
| 496 |
+
_mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
|
| 497 |
+
|
| 498 |
+
// clang-format off
|
| 499 |
+
source = _mm256_loadu_si256((__m256i*)&lineIn[x + xmin]);
|
| 500 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 501 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0,
|
| 502 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 503 |
+
mmk = _mm256_shuffle_epi8(ksource, _mm256_set_epi8(
|
| 504 |
+
11,10, 9,8, 11,10, 9,8, 11,10, 9,8, 11,10, 9,8,
|
| 505 |
+
3,2, 1,0, 3,2, 1,0, 3,2, 1,0, 3,2, 1,0));
|
| 506 |
+
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
|
| 507 |
+
|
| 508 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 509 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8,
|
| 510 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8));
|
| 511 |
+
mmk = _mm256_shuffle_epi8(ksource, _mm256_set_epi8(
|
| 512 |
+
15,14, 13,12, 15,14, 13,12, 15,14, 13,12, 15,14, 13,12,
|
| 513 |
+
7,6, 5,4, 7,6, 5,4, 7,6, 5,4, 7,6, 5,4));
|
| 514 |
+
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
|
| 515 |
+
// clang-format on
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
for (; x < xmax - 3; x += 4) {
|
| 519 |
+
__m256i pix, mmk, source;
|
| 520 |
+
__m128i tmp = _mm_loadl_epi64((__m128i*)&k[x]);
|
| 521 |
+
__m256i ksource =
|
| 522 |
+
_mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
|
| 523 |
+
|
| 524 |
+
tmp = _mm_loadu_si128((__m128i*)&lineIn[x + xmin]);
|
| 525 |
+
source = _mm256_insertf128_si256(_mm256_castsi128_si256(tmp), tmp, 1);
|
| 526 |
+
|
| 527 |
+
// clang-format off
|
| 528 |
+
pix = _mm256_shuffle_epi8(source, _mm256_set_epi8(
|
| 529 |
+
-1,15, -1,11, -1,14, -1,10, -1,13, -1,9, -1,12, -1,8,
|
| 530 |
+
-1,7, -1,3, -1,6, -1,2, -1,5, -1,1, -1,4, -1,0));
|
| 531 |
+
mmk = _mm256_shuffle_epi8(ksource, _mm256_set_epi8(
|
| 532 |
+
7,6, 5,4, 7,6, 5,4, 7,6, 5,4, 7,6, 5,4,
|
| 533 |
+
3,2, 1,0, 3,2, 1,0, 3,2, 1,0, 3,2, 1,0));
|
| 534 |
+
sss256 = _mm256_add_epi32(sss256, _mm256_madd_epi16(pix, mmk));
|
| 535 |
+
// clang-format on
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
sss = _mm_add_epi32(
|
| 539 |
+
_mm256_extracti128_si256(sss256, 0),
|
| 540 |
+
_mm256_extracti128_si256(sss256, 1));
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
for (; x < xmax - 1; x += 2) {
|
| 544 |
+
__m128i mmk = _mm_set1_epi32(*(int32_t*)&k[x]);
|
| 545 |
+
__m128i source = _mm_loadl_epi64((__m128i*)&lineIn[x + xmin]);
|
| 546 |
+
__m128i pix = _mm_shuffle_epi8(
|
| 547 |
+
source,
|
| 548 |
+
_mm_set_epi8(-1, 7, -1, 3, -1, 6, -1, 2, -1, 5, -1, 1, -1, 4, -1, 0));
|
| 549 |
+
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
for (; x < xmax; x++) {
|
| 553 |
+
__m128i pix = mm_cvtepu8_epi32(&lineIn[x + xmin]);
|
| 554 |
+
__m128i mmk = _mm_set1_epi32(k[x]);
|
| 555 |
+
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
|
| 556 |
+
}
|
| 557 |
+
sss = _mm_srai_epi32(sss, coefs_precision);
|
| 558 |
+
sss = _mm_packs_epi32(sss, sss);
|
| 559 |
+
lineOut[xx] = _mm_cvtsi128_si32(_mm_packus_epi16(sss, sss));
|
| 560 |
+
}
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
// https://gist.github.com/NicolasHug/47c97d731f05eaad5694c173849b86f5
|
| 564 |
+
void ImagingResampleVerticalConvolution8u(
|
| 565 |
+
uint32_t* C10_RESTRICT lineOut,
|
| 566 |
+
const uint32_t* C10_RESTRICT imIn,
|
| 567 |
+
int xmin,
|
| 568 |
+
int xmax,
|
| 569 |
+
int16_t* k,
|
| 570 |
+
int coefs_precision,
|
| 571 |
+
int xin) {
|
| 572 |
+
int x;
|
| 573 |
+
int xx = 0;
|
| 574 |
+
int xsize = xin;
|
| 575 |
+
|
| 576 |
+
__m128i initial = _mm_set1_epi32(1 << (coefs_precision - 1));
|
| 577 |
+
__m256i initial_256 = _mm256_set1_epi32(1 << (coefs_precision - 1));
|
| 578 |
+
|
| 579 |
+
for (; xx < xsize - 7; xx += 8) {
|
| 580 |
+
__m256i sss0 = initial_256;
|
| 581 |
+
__m256i sss1 = initial_256;
|
| 582 |
+
__m256i sss2 = initial_256;
|
| 583 |
+
__m256i sss3 = initial_256;
|
| 584 |
+
x = 0;
|
| 585 |
+
for (; x < xmax - 1; x += 2) {
|
| 586 |
+
__m256i source, source1, source2;
|
| 587 |
+
__m256i pix, mmk;
|
| 588 |
+
|
| 589 |
+
// Load two coefficients at once
|
| 590 |
+
mmk = _mm256_set1_epi32(*(int32_t*)&k[x]);
|
| 591 |
+
|
| 592 |
+
// Load 2 lines
|
| 593 |
+
// (__m256i *) &imIn->image32[x + xmin][xx]
|
| 594 |
+
source1 = _mm256_loadu_si256((__m256i*)(imIn + (x + xmin) * xin + xx));
|
| 595 |
+
// (__m256i *) &imIn->image32[x + 1 + xmin][xx]
|
| 596 |
+
source2 =
|
| 597 |
+
_mm256_loadu_si256((__m256i*)(imIn + (x + 1 + xmin) * xin + xx));
|
| 598 |
+
|
| 599 |
+
source = _mm256_unpacklo_epi8(source1, source2);
|
| 600 |
+
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
|
| 601 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
|
| 602 |
+
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
|
| 603 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
|
| 604 |
+
|
| 605 |
+
source = _mm256_unpackhi_epi8(source1, source2);
|
| 606 |
+
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
|
| 607 |
+
sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix, mmk));
|
| 608 |
+
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
|
| 609 |
+
sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix, mmk));
|
| 610 |
+
}
|
| 611 |
+
for (; x < xmax; x += 1) {
|
| 612 |
+
__m256i source, source1, pix, mmk;
|
| 613 |
+
mmk = _mm256_set1_epi32(k[x]);
|
| 614 |
+
|
| 615 |
+
// (__m256i *) &imIn->image32[x + xmin][xx])
|
| 616 |
+
source1 = _mm256_loadu_si256((__m256i*)(imIn + (x + xmin) * xin + xx));
|
| 617 |
+
|
| 618 |
+
source = _mm256_unpacklo_epi8(source1, _mm256_setzero_si256());
|
| 619 |
+
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
|
| 620 |
+
sss0 = _mm256_add_epi32(sss0, _mm256_madd_epi16(pix, mmk));
|
| 621 |
+
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
|
| 622 |
+
sss1 = _mm256_add_epi32(sss1, _mm256_madd_epi16(pix, mmk));
|
| 623 |
+
|
| 624 |
+
source = _mm256_unpackhi_epi8(source1, _mm256_setzero_si256());
|
| 625 |
+
pix = _mm256_unpacklo_epi8(source, _mm256_setzero_si256());
|
| 626 |
+
sss2 = _mm256_add_epi32(sss2, _mm256_madd_epi16(pix, mmk));
|
| 627 |
+
pix = _mm256_unpackhi_epi8(source, _mm256_setzero_si256());
|
| 628 |
+
sss3 = _mm256_add_epi32(sss3, _mm256_madd_epi16(pix, mmk));
|
| 629 |
+
}
|
| 630 |
+
sss0 = _mm256_srai_epi32(sss0, coefs_precision);
|
| 631 |
+
sss1 = _mm256_srai_epi32(sss1, coefs_precision);
|
| 632 |
+
sss2 = _mm256_srai_epi32(sss2, coefs_precision);
|
| 633 |
+
sss3 = _mm256_srai_epi32(sss3, coefs_precision);
|
| 634 |
+
|
| 635 |
+
sss0 = _mm256_packs_epi32(sss0, sss1);
|
| 636 |
+
sss2 = _mm256_packs_epi32(sss2, sss3);
|
| 637 |
+
sss0 = _mm256_packus_epi16(sss0, sss2);
|
| 638 |
+
_mm256_storeu_si256((__m256i*)&lineOut[xx], sss0);
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
for (; xx < xsize - 1; xx += 2) {
|
| 642 |
+
__m128i sss0 = initial; // left row
|
| 643 |
+
__m128i sss1 = initial; // right row
|
| 644 |
+
x = 0;
|
| 645 |
+
for (; x < xmax - 1; x += 2) {
|
| 646 |
+
__m128i source, source1, source2;
|
| 647 |
+
__m128i pix, mmk;
|
| 648 |
+
|
| 649 |
+
// Load two coefficients at once
|
| 650 |
+
mmk = _mm_set1_epi32(*(int32_t*)&k[x]);
|
| 651 |
+
|
| 652 |
+
// Load 2 lines
|
| 653 |
+
// (__m128i *) &imIn->image32[x + xmin][xx])
|
| 654 |
+
source1 = _mm_loadl_epi64((__m128i*)(imIn + (x + xmin) * xin + xx));
|
| 655 |
+
// (__m128i *) &imIn->image32[x + 1 + xmin][xx]
|
| 656 |
+
source2 = _mm_loadl_epi64((__m128i*)(imIn + (x + 1 + xmin) * xin + xx));
|
| 657 |
+
|
| 658 |
+
source = _mm_unpacklo_epi8(source1, source2);
|
| 659 |
+
pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
| 660 |
+
sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
|
| 661 |
+
pix = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
| 662 |
+
sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
|
| 663 |
+
}
|
| 664 |
+
for (; x < xmax; x += 1) {
|
| 665 |
+
__m128i source, source1, pix, mmk;
|
| 666 |
+
mmk = _mm_set1_epi32(k[x]);
|
| 667 |
+
|
| 668 |
+
// (__m128i *) &imIn->image32[x + xmin][xx]);
|
| 669 |
+
source1 = _mm_loadl_epi64((__m128i*)(imIn + (x + xmin) * xin + xx));
|
| 670 |
+
|
| 671 |
+
source = _mm_unpacklo_epi8(source1, _mm_setzero_si128());
|
| 672 |
+
pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
| 673 |
+
sss0 = _mm_add_epi32(sss0, _mm_madd_epi16(pix, mmk));
|
| 674 |
+
pix = _mm_unpackhi_epi8(source, _mm_setzero_si128());
|
| 675 |
+
sss1 = _mm_add_epi32(sss1, _mm_madd_epi16(pix, mmk));
|
| 676 |
+
}
|
| 677 |
+
sss0 = _mm_srai_epi32(sss0, coefs_precision);
|
| 678 |
+
sss1 = _mm_srai_epi32(sss1, coefs_precision);
|
| 679 |
+
|
| 680 |
+
sss0 = _mm_packs_epi32(sss0, sss1);
|
| 681 |
+
sss0 = _mm_packus_epi16(sss0, sss0);
|
| 682 |
+
_mm_storel_epi64((__m128i*)&lineOut[xx], sss0);
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
for (; xx < xsize; xx++) {
|
| 686 |
+
__m128i sss = initial;
|
| 687 |
+
x = 0;
|
| 688 |
+
for (; x < xmax - 1; x += 2) {
|
| 689 |
+
__m128i source, source1, source2;
|
| 690 |
+
__m128i pix, mmk;
|
| 691 |
+
|
| 692 |
+
// Load two coefficients at once
|
| 693 |
+
mmk = _mm_set1_epi32(*(int32_t*)&k[x]);
|
| 694 |
+
|
| 695 |
+
// Load 2 lines
|
| 696 |
+
// *(int *) &imIn->image32[x + xmin][xx]
|
| 697 |
+
source1 = _mm_cvtsi32_si128(*(int*)(imIn + (x + xmin) * xin + xx));
|
| 698 |
+
// *(int *) &imIn->image32[x + 1 + xmin][xx]
|
| 699 |
+
source2 = _mm_cvtsi32_si128(*(int*)(imIn + (x + 1 + xmin) * xin + xx));
|
| 700 |
+
|
| 701 |
+
source = _mm_unpacklo_epi8(source1, source2);
|
| 702 |
+
pix = _mm_unpacklo_epi8(source, _mm_setzero_si128());
|
| 703 |
+
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
for (; x < xmax; x++) {
|
| 707 |
+
// &imIn->image32[x + xmin][xx]
|
| 708 |
+
__m128i pix = mm_cvtepu8_epi32(imIn + (x + xmin) * xin + xx);
|
| 709 |
+
__m128i mmk = _mm_set1_epi32(k[x]);
|
| 710 |
+
sss = _mm_add_epi32(sss, _mm_madd_epi16(pix, mmk));
|
| 711 |
+
}
|
| 712 |
+
sss = _mm_srai_epi32(sss, coefs_precision);
|
| 713 |
+
sss = _mm_packs_epi32(sss, sss);
|
| 714 |
+
lineOut[xx] = _mm_cvtsi128_si32(_mm_packus_epi16(sss, sss));
|
| 715 |
+
}
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
} // anonymous namespace
|
| 719 |
+
#endif // CPU_CAPABILITY_AVX2
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/avx_mathfun.h
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
/*
|
| 3 |
+
AVX implementation of sin, cos, sincos, exp and log
|
| 4 |
+
|
| 5 |
+
Based on "sse_mathfun.h", by Julien Pommier
|
| 6 |
+
http://gruntthepeon.free.fr/ssemath/
|
| 7 |
+
|
| 8 |
+
Copyright (C) 2012 Giovanni Garberoglio
|
| 9 |
+
Interdisciplinary Laboratory for Computational Science (LISC)
|
| 10 |
+
Fondazione Bruno Kessler and University of Trento
|
| 11 |
+
via Sommarive, 18
|
| 12 |
+
I-38123 Trento (Italy)
|
| 13 |
+
|
| 14 |
+
This software is provided 'as-is', without any express or implied
|
| 15 |
+
warranty. In no event will the authors be held liable for any damages
|
| 16 |
+
arising from the use of this software.
|
| 17 |
+
|
| 18 |
+
Permission is granted to anyone to use this software for any purpose,
|
| 19 |
+
including commercial applications, and to alter it and redistribute it
|
| 20 |
+
freely, subject to the following restrictions:
|
| 21 |
+
|
| 22 |
+
1. The origin of this software must not be misrepresented; you must not
|
| 23 |
+
claim that you wrote the original software. If you use this software
|
| 24 |
+
in a product, an acknowledgment in the product documentation would be
|
| 25 |
+
appreciated but is not required.
|
| 26 |
+
2. Altered source versions must be plainly marked as such, and must not be
|
| 27 |
+
misrepresented as being the original software.
|
| 28 |
+
3. This notice may not be removed or altered from any source distribution.
|
| 29 |
+
|
| 30 |
+
(this is the zlib license)
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
#include <ATen/native/cpu/Intrinsics.h>
|
| 34 |
+
|
| 35 |
+
/* The original source of this file has been modified. */
|
| 36 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 37 |
+
|
| 38 |
+
#if defined(__GNUC__)
|
| 39 |
+
# define ALIGN32_BEG __attribute__((aligned(32)))
|
| 40 |
+
#elif defined(_WIN32)
|
| 41 |
+
# define ALIGN32_BEG __declspec(align(32))
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
typedef __m256 v8sf; // vector of 8 float (avx2)
|
| 45 |
+
typedef __m256i v8si; // vector of 8 int (avx2)
|
| 46 |
+
|
| 47 |
+
/* declare some AVX constants -- why can't I figure a better way to do that? */
|
| 48 |
+
#define _PS256_CONST(Name, Val) \
|
| 49 |
+
static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
|
| 50 |
+
#define _PI32_CONST256(Name, Val) \
|
| 51 |
+
static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
|
| 52 |
+
#define _PS256_CONST_TYPE(Name, Type, Val) \
|
| 53 |
+
static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
|
| 54 |
+
|
| 55 |
+
_PS256_CONST(1 , 1.0f);
|
| 56 |
+
_PS256_CONST(0p5, 0.5f);
|
| 57 |
+
/* the smallest non denormalized float number */
|
| 58 |
+
_PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
|
| 59 |
+
_PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
|
| 60 |
+
_PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
|
| 61 |
+
|
| 62 |
+
_PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
|
| 63 |
+
_PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
|
| 64 |
+
|
| 65 |
+
_PI32_CONST256(0, 0);
|
| 66 |
+
_PI32_CONST256(1, 1);
|
| 67 |
+
_PI32_CONST256(inv1, ~1);
|
| 68 |
+
_PI32_CONST256(2, 2);
|
| 69 |
+
_PI32_CONST256(4, 4);
|
| 70 |
+
_PI32_CONST256(0x7f, 0x7f);
|
| 71 |
+
|
| 72 |
+
_PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
|
| 73 |
+
_PS256_CONST(cephes_log_p0, 7.0376836292E-2);
|
| 74 |
+
_PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
|
| 75 |
+
_PS256_CONST(cephes_log_p2, 1.1676998740E-1);
|
| 76 |
+
_PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
|
| 77 |
+
_PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
|
| 78 |
+
_PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
|
| 79 |
+
_PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
|
| 80 |
+
_PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
|
| 81 |
+
_PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
|
| 82 |
+
_PS256_CONST(cephes_log_q1, -2.12194440e-4);
|
| 83 |
+
_PS256_CONST(cephes_log_q2, 0.693359375);
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
/* natural logarithm computed for 8 simultaneous float
|
| 87 |
+
return NaN for x <= 0
|
| 88 |
+
*/
|
| 89 |
+
inline v8sf log256_ps(v8sf x) {
|
| 90 |
+
v8si imm0;
|
| 91 |
+
v8sf one = *(v8sf*)_ps256_1;
|
| 92 |
+
|
| 93 |
+
//v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
|
| 94 |
+
v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
|
| 95 |
+
|
| 96 |
+
x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
|
| 97 |
+
|
| 98 |
+
// can be done with AVX2
|
| 99 |
+
imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
|
| 100 |
+
|
| 101 |
+
/* keep only the fractional part */
|
| 102 |
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
|
| 103 |
+
x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
|
| 104 |
+
|
| 105 |
+
// this is again another AVX2 instruction
|
| 106 |
+
imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
|
| 107 |
+
v8sf e = _mm256_cvtepi32_ps(imm0);
|
| 108 |
+
|
| 109 |
+
e = _mm256_add_ps(e, one);
|
| 110 |
+
|
| 111 |
+
/* part2:
|
| 112 |
+
if( x < SQRTHF ) {
|
| 113 |
+
e -= 1;
|
| 114 |
+
x = x + x - 1.0;
|
| 115 |
+
} else { x = x - 1.0; }
|
| 116 |
+
*/
|
| 117 |
+
//v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
|
| 118 |
+
v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
|
| 119 |
+
v8sf tmp = _mm256_and_ps(x, mask);
|
| 120 |
+
x = _mm256_sub_ps(x, one);
|
| 121 |
+
e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
|
| 122 |
+
x = _mm256_add_ps(x, tmp);
|
| 123 |
+
|
| 124 |
+
v8sf z = _mm256_mul_ps(x,x);
|
| 125 |
+
|
| 126 |
+
v8sf y = *(v8sf*)_ps256_cephes_log_p0;
|
| 127 |
+
y = _mm256_mul_ps(y, x);
|
| 128 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
|
| 129 |
+
y = _mm256_mul_ps(y, x);
|
| 130 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
|
| 131 |
+
y = _mm256_mul_ps(y, x);
|
| 132 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
|
| 133 |
+
y = _mm256_mul_ps(y, x);
|
| 134 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
|
| 135 |
+
y = _mm256_mul_ps(y, x);
|
| 136 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
|
| 137 |
+
y = _mm256_mul_ps(y, x);
|
| 138 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
|
| 139 |
+
y = _mm256_mul_ps(y, x);
|
| 140 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
|
| 141 |
+
y = _mm256_mul_ps(y, x);
|
| 142 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
|
| 143 |
+
y = _mm256_mul_ps(y, x);
|
| 144 |
+
|
| 145 |
+
y = _mm256_mul_ps(y, z);
|
| 146 |
+
|
| 147 |
+
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
|
| 148 |
+
y = _mm256_add_ps(y, tmp);
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
| 152 |
+
y = _mm256_sub_ps(y, tmp);
|
| 153 |
+
|
| 154 |
+
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
|
| 155 |
+
x = _mm256_add_ps(x, y);
|
| 156 |
+
x = _mm256_add_ps(x, tmp);
|
| 157 |
+
x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
|
| 158 |
+
return x;
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
_PS256_CONST(exp_hi, 88.3762626647949f);
|
| 162 |
+
_PS256_CONST(exp_lo, -88.3762626647949f);
|
| 163 |
+
|
| 164 |
+
_PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
|
| 165 |
+
_PS256_CONST(cephes_exp_C1, 0.693359375);
|
| 166 |
+
_PS256_CONST(cephes_exp_C2, -2.12194440e-4);
|
| 167 |
+
|
| 168 |
+
_PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
|
| 169 |
+
_PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
|
| 170 |
+
_PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
|
| 171 |
+
_PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
|
| 172 |
+
_PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
|
| 173 |
+
_PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
|
| 174 |
+
|
| 175 |
+
inline v8sf exp256_ps(v8sf x) {
|
| 176 |
+
v8sf tmp = _mm256_setzero_ps(), fx;
|
| 177 |
+
v8si imm0;
|
| 178 |
+
v8sf one = *(v8sf*)_ps256_1;
|
| 179 |
+
|
| 180 |
+
x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
|
| 181 |
+
x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
|
| 182 |
+
|
| 183 |
+
/* express exp(x) as exp(g + n*log(2)) */
|
| 184 |
+
fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
|
| 185 |
+
fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
|
| 186 |
+
|
| 187 |
+
/* how to perform a floorf with SSE: just below */
|
| 188 |
+
//imm0 = _mm256_cvttps_epi32(fx);
|
| 189 |
+
//tmp = _mm256_cvtepi32_ps(imm0);
|
| 190 |
+
|
| 191 |
+
tmp = _mm256_floor_ps(fx);
|
| 192 |
+
|
| 193 |
+
/* if greater, subtract 1 */
|
| 194 |
+
//v8sf mask = _mm256_cmpgt_ps(tmp, fx);
|
| 195 |
+
v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
|
| 196 |
+
mask = _mm256_and_ps(mask, one);
|
| 197 |
+
fx = _mm256_sub_ps(tmp, mask);
|
| 198 |
+
|
| 199 |
+
tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
|
| 200 |
+
v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
|
| 201 |
+
x = _mm256_sub_ps(x, tmp);
|
| 202 |
+
x = _mm256_sub_ps(x, z);
|
| 203 |
+
|
| 204 |
+
z = _mm256_mul_ps(x,x);
|
| 205 |
+
|
| 206 |
+
v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
|
| 207 |
+
y = _mm256_mul_ps(y, x);
|
| 208 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
|
| 209 |
+
y = _mm256_mul_ps(y, x);
|
| 210 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
|
| 211 |
+
y = _mm256_mul_ps(y, x);
|
| 212 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
|
| 213 |
+
y = _mm256_mul_ps(y, x);
|
| 214 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
|
| 215 |
+
y = _mm256_mul_ps(y, x);
|
| 216 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
|
| 217 |
+
y = _mm256_mul_ps(y, z);
|
| 218 |
+
y = _mm256_add_ps(y, x);
|
| 219 |
+
y = _mm256_add_ps(y, one);
|
| 220 |
+
|
| 221 |
+
/* build 2^n */
|
| 222 |
+
imm0 = _mm256_cvttps_epi32(fx);
|
| 223 |
+
// another two AVX2 instructions
|
| 224 |
+
imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
|
| 225 |
+
imm0 = _mm256_slli_epi32(imm0, 23);
|
| 226 |
+
v8sf pow2n = _mm256_castsi256_ps(imm0);
|
| 227 |
+
y = _mm256_mul_ps(y, pow2n);
|
| 228 |
+
return y;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
_PS256_CONST(minus_cephes_DP1, -0.78515625);
|
| 232 |
+
_PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
|
| 233 |
+
_PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
|
| 234 |
+
_PS256_CONST(sincof_p0, -1.9515295891E-4);
|
| 235 |
+
_PS256_CONST(sincof_p1, 8.3321608736E-3);
|
| 236 |
+
_PS256_CONST(sincof_p2, -1.6666654611E-1);
|
| 237 |
+
_PS256_CONST(coscof_p0, 2.443315711809948E-005);
|
| 238 |
+
_PS256_CONST(coscof_p1, -1.388731625493765E-003);
|
| 239 |
+
_PS256_CONST(coscof_p2, 4.166664568298827E-002);
|
| 240 |
+
_PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
/* evaluation of 8 sines at onces using AVX intrisics
|
| 244 |
+
|
| 245 |
+
The code is the exact rewriting of the cephes sinf function.
|
| 246 |
+
Precision is excellent as long as x < 8192 (I did not bother to
|
| 247 |
+
take into account the special handling they have for greater values
|
| 248 |
+
-- it does not return garbage for arguments over 8192, though, but
|
| 249 |
+
the extra precision is missing).
|
| 250 |
+
|
| 251 |
+
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
|
| 252 |
+
surprising but correct result.
|
| 253 |
+
|
| 254 |
+
*/
|
| 255 |
+
inline v8sf sin256_ps(v8sf x) { // any x
|
| 256 |
+
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
|
| 257 |
+
v8si imm0, imm2;
|
| 258 |
+
|
| 259 |
+
sign_bit = x;
|
| 260 |
+
/* take the absolute value */
|
| 261 |
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
| 262 |
+
/* extract the sign bit (upper one) */
|
| 263 |
+
sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
|
| 264 |
+
|
| 265 |
+
/* scale by 4/Pi */
|
| 266 |
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
| 267 |
+
|
| 268 |
+
/*
|
| 269 |
+
Here we start a series of integer operations, which are in the
|
| 270 |
+
realm of AVX2.
|
| 271 |
+
If we don't have AVX, let's perform them using SSE2 directives
|
| 272 |
+
*/
|
| 273 |
+
|
| 274 |
+
/* store the integer part of y in mm0 */
|
| 275 |
+
imm2 = _mm256_cvttps_epi32(y);
|
| 276 |
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
| 277 |
+
// another two AVX2 instruction
|
| 278 |
+
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
| 279 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
| 280 |
+
y = _mm256_cvtepi32_ps(imm2);
|
| 281 |
+
|
| 282 |
+
/* get the swap sign flag */
|
| 283 |
+
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
|
| 284 |
+
imm0 = _mm256_slli_epi32(imm0, 29);
|
| 285 |
+
/* get the polynom selection mask
|
| 286 |
+
there is one polynom for 0 <= x <= Pi/4
|
| 287 |
+
and another one for Pi/4<x<=Pi/2
|
| 288 |
+
|
| 289 |
+
Both branches will be computed.
|
| 290 |
+
*/
|
| 291 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
| 292 |
+
imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
|
| 293 |
+
|
| 294 |
+
v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
|
| 295 |
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
| 296 |
+
sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
|
| 297 |
+
|
| 298 |
+
/* The magic pass: "Extended precision modular arithmetic"
|
| 299 |
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
| 300 |
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
| 301 |
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
| 302 |
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
| 303 |
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
| 304 |
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
| 305 |
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
| 306 |
+
x = _mm256_add_ps(x, xmm1);
|
| 307 |
+
x = _mm256_add_ps(x, xmm2);
|
| 308 |
+
x = _mm256_add_ps(x, xmm3);
|
| 309 |
+
|
| 310 |
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
| 311 |
+
y = *(v8sf*)_ps256_coscof_p0;
|
| 312 |
+
v8sf z = _mm256_mul_ps(x,x);
|
| 313 |
+
|
| 314 |
+
y = _mm256_mul_ps(y, z);
|
| 315 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
| 316 |
+
y = _mm256_mul_ps(y, z);
|
| 317 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
| 318 |
+
y = _mm256_mul_ps(y, z);
|
| 319 |
+
y = _mm256_mul_ps(y, z);
|
| 320 |
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
| 321 |
+
y = _mm256_sub_ps(y, tmp);
|
| 322 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
| 323 |
+
|
| 324 |
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
| 325 |
+
|
| 326 |
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
| 327 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 328 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
| 329 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 330 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
| 331 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 332 |
+
y2 = _mm256_mul_ps(y2, x);
|
| 333 |
+
y2 = _mm256_add_ps(y2, x);
|
| 334 |
+
|
| 335 |
+
/* select the correct result from the two polynoms */
|
| 336 |
+
xmm3 = poly_mask;
|
| 337 |
+
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
|
| 338 |
+
y = _mm256_andnot_ps(xmm3, y);
|
| 339 |
+
y = _mm256_add_ps(y,y2);
|
| 340 |
+
/* update the sign */
|
| 341 |
+
y = _mm256_xor_ps(y, sign_bit);
|
| 342 |
+
|
| 343 |
+
return y;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
/* almost the same as sin_ps */
|
| 347 |
+
inline v8sf cos256_ps(v8sf x) { // any x
|
| 348 |
+
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
|
| 349 |
+
v8si imm0, imm2;
|
| 350 |
+
|
| 351 |
+
/* take the absolute value */
|
| 352 |
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
| 353 |
+
|
| 354 |
+
/* scale by 4/Pi */
|
| 355 |
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
| 356 |
+
|
| 357 |
+
/* store the integer part of y in mm0 */
|
| 358 |
+
imm2 = _mm256_cvttps_epi32(y);
|
| 359 |
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
| 360 |
+
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
| 361 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
| 362 |
+
y = _mm256_cvtepi32_ps(imm2);
|
| 363 |
+
imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
|
| 364 |
+
|
| 365 |
+
/* get the swap sign flag */
|
| 366 |
+
imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
|
| 367 |
+
imm0 = _mm256_slli_epi32(imm0, 29);
|
| 368 |
+
/* get the polynom selection mask */
|
| 369 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
| 370 |
+
imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
|
| 371 |
+
|
| 372 |
+
v8sf sign_bit = _mm256_castsi256_ps(imm0);
|
| 373 |
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
| 374 |
+
|
| 375 |
+
/* The magic pass: "Extended precision modular arithmetic"
|
| 376 |
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
| 377 |
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
| 378 |
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
| 379 |
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
| 380 |
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
| 381 |
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
| 382 |
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
| 383 |
+
x = _mm256_add_ps(x, xmm1);
|
| 384 |
+
x = _mm256_add_ps(x, xmm2);
|
| 385 |
+
x = _mm256_add_ps(x, xmm3);
|
| 386 |
+
|
| 387 |
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
| 388 |
+
y = *(v8sf*)_ps256_coscof_p0;
|
| 389 |
+
v8sf z = _mm256_mul_ps(x,x);
|
| 390 |
+
|
| 391 |
+
y = _mm256_mul_ps(y, z);
|
| 392 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
| 393 |
+
y = _mm256_mul_ps(y, z);
|
| 394 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
| 395 |
+
y = _mm256_mul_ps(y, z);
|
| 396 |
+
y = _mm256_mul_ps(y, z);
|
| 397 |
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
| 398 |
+
y = _mm256_sub_ps(y, tmp);
|
| 399 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
| 400 |
+
|
| 401 |
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
| 402 |
+
|
| 403 |
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
| 404 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 405 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
| 406 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 407 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
| 408 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 409 |
+
y2 = _mm256_mul_ps(y2, x);
|
| 410 |
+
y2 = _mm256_add_ps(y2, x);
|
| 411 |
+
|
| 412 |
+
/* select the correct result from the two polynoms */
|
| 413 |
+
xmm3 = poly_mask;
|
| 414 |
+
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
|
| 415 |
+
y = _mm256_andnot_ps(xmm3, y);
|
| 416 |
+
y = _mm256_add_ps(y,y2);
|
| 417 |
+
/* update the sign */
|
| 418 |
+
y = _mm256_xor_ps(y, sign_bit);
|
| 419 |
+
|
| 420 |
+
return y;
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
/* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
|
| 424 |
+
it is almost as fast, and gives you a free cosine with your sine */
|
| 425 |
+
inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
|
| 426 |
+
|
| 427 |
+
v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
|
| 428 |
+
v8si imm0, imm2, imm4;
|
| 429 |
+
|
| 430 |
+
sign_bit_sin = x;
|
| 431 |
+
/* take the absolute value */
|
| 432 |
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
| 433 |
+
/* extract the sign bit (upper one) */
|
| 434 |
+
sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
|
| 435 |
+
|
| 436 |
+
/* scale by 4/Pi */
|
| 437 |
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
| 438 |
+
|
| 439 |
+
/* store the integer part of y in imm2 */
|
| 440 |
+
imm2 = _mm256_cvttps_epi32(y);
|
| 441 |
+
|
| 442 |
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
| 443 |
+
imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
| 444 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
| 445 |
+
|
| 446 |
+
y = _mm256_cvtepi32_ps(imm2);
|
| 447 |
+
imm4 = imm2;
|
| 448 |
+
|
| 449 |
+
/* get the swap sign flag for the sine */
|
| 450 |
+
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
|
| 451 |
+
imm0 = _mm256_slli_epi32(imm0, 29);
|
| 452 |
+
//v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
|
| 453 |
+
|
| 454 |
+
/* get the polynom selection mask for the sine*/
|
| 455 |
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
| 456 |
+
imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
|
| 457 |
+
//v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
| 458 |
+
|
| 459 |
+
v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
|
| 460 |
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
| 461 |
+
|
| 462 |
+
/* The magic pass: "Extended precision modular arithmetic"
|
| 463 |
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
| 464 |
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
| 465 |
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
| 466 |
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
| 467 |
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
| 468 |
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
| 469 |
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
| 470 |
+
x = _mm256_add_ps(x, xmm1);
|
| 471 |
+
x = _mm256_add_ps(x, xmm2);
|
| 472 |
+
x = _mm256_add_ps(x, xmm3);
|
| 473 |
+
|
| 474 |
+
imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
|
| 475 |
+
imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
|
| 476 |
+
imm4 = _mm256_slli_epi32(imm4, 29);
|
| 477 |
+
|
| 478 |
+
v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
|
| 479 |
+
|
| 480 |
+
sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
|
| 481 |
+
|
| 482 |
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
| 483 |
+
v8sf z = _mm256_mul_ps(x,x);
|
| 484 |
+
y = *(v8sf*)_ps256_coscof_p0;
|
| 485 |
+
|
| 486 |
+
y = _mm256_mul_ps(y, z);
|
| 487 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
| 488 |
+
y = _mm256_mul_ps(y, z);
|
| 489 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
| 490 |
+
y = _mm256_mul_ps(y, z);
|
| 491 |
+
y = _mm256_mul_ps(y, z);
|
| 492 |
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
| 493 |
+
y = _mm256_sub_ps(y, tmp);
|
| 494 |
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
| 495 |
+
|
| 496 |
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
| 497 |
+
|
| 498 |
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
| 499 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 500 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
| 501 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 502 |
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
| 503 |
+
y2 = _mm256_mul_ps(y2, z);
|
| 504 |
+
y2 = _mm256_mul_ps(y2, x);
|
| 505 |
+
y2 = _mm256_add_ps(y2, x);
|
| 506 |
+
|
| 507 |
+
/* select the correct result from the two polynoms */
|
| 508 |
+
xmm3 = poly_mask;
|
| 509 |
+
v8sf ysin2 = _mm256_and_ps(xmm3, y2);
|
| 510 |
+
v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
|
| 511 |
+
y2 = _mm256_sub_ps(y2,ysin2);
|
| 512 |
+
y = _mm256_sub_ps(y, ysin1);
|
| 513 |
+
|
| 514 |
+
xmm1 = _mm256_add_ps(ysin1,ysin2);
|
| 515 |
+
xmm2 = _mm256_add_ps(y,y2);
|
| 516 |
+
|
| 517 |
+
/* update the sign */
|
| 518 |
+
*s = _mm256_xor_ps(xmm1, sign_bit_sin);
|
| 519 |
+
*c = _mm256_xor_ps(xmm2, sign_bit_cos);
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
#endif // CPU_CAPABILITY_AVX2
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/moments_utils.h
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <array>
|
| 4 |
+
#include <cstring>
|
| 5 |
+
#include <numeric>
|
| 6 |
+
#include <utility>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
#include <ATen/Parallel.h>
|
| 10 |
+
#include <ATen/OpMathType.h>
|
| 11 |
+
#include <ATen/cpu/vec/vec.h>
|
| 12 |
+
#include <ATen/native/cpu/utils.h>
|
| 13 |
+
#include <c10/util/SmallVector.h>
|
| 14 |
+
#include <c10/util/irange.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
namespace native {
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
template<typename T> using acc_t = at::opmath_type<T>;
|
| 21 |
+
|
| 22 |
+
constexpr int64_t kChunkSize = 16;
|
| 23 |
+
|
| 24 |
+
template <typename T>
|
| 25 |
+
void AddMoments(
|
| 26 |
+
int64_t m0_add,
|
| 27 |
+
const T& m1_add,
|
| 28 |
+
const T& m2_add,
|
| 29 |
+
int64_t& m0,
|
| 30 |
+
T& m1,
|
| 31 |
+
T& m2) {
|
| 32 |
+
const int64_t n = m0 + m0_add;
|
| 33 |
+
const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
|
| 34 |
+
const T delta = m1_add - m1;
|
| 35 |
+
m1 += c * delta;
|
| 36 |
+
m2 += m2_add + delta * delta * c * static_cast<T>(m0);
|
| 37 |
+
m0 = n;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
template <typename T>
|
| 41 |
+
C10_ALWAYS_INLINE void AddMomentsVec(
|
| 42 |
+
int64_t m0_add,
|
| 43 |
+
const vec::Vectorized<T>& m1_add,
|
| 44 |
+
const vec::Vectorized<T>& m2_add,
|
| 45 |
+
int64_t& m0,
|
| 46 |
+
vec::Vectorized<T>& m1,
|
| 47 |
+
vec::Vectorized<T>& m2) {
|
| 48 |
+
using Vec = vec::Vectorized<T>;
|
| 49 |
+
const int64_t n = m0 + m0_add;
|
| 50 |
+
const T c = n == 0 ? static_cast<T>(0) : static_cast<T>(m0_add) / static_cast<T>(n);
|
| 51 |
+
const Vec c_vec(c);
|
| 52 |
+
const Vec delta = m1_add - m1;
|
| 53 |
+
m1 += c_vec * delta;
|
| 54 |
+
m2 += m2_add + delta * delta * c_vec * Vec(static_cast<T>(m0));
|
| 55 |
+
m0 = n;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
template <typename T>
|
| 59 |
+
inline void UpdateMomentsVec(
|
| 60 |
+
int64_t m0,
|
| 61 |
+
const T* X_ptr,
|
| 62 |
+
const std::array<vec::Vectorized<acc_t<T>>, kChunkSize>& c_vecs,
|
| 63 |
+
int64_t& m0_stk0,
|
| 64 |
+
vec::Vectorized<acc_t<T>>& m1_stk0,
|
| 65 |
+
vec::Vectorized<acc_t<T>>& m2_stk0) {
|
| 66 |
+
using Vec = vec::Vectorized<acc_t<T>>;
|
| 67 |
+
Vec m1_vec(0);
|
| 68 |
+
Vec m2_vec(0);
|
| 69 |
+
for (const auto j : c10::irange(m0)) {
|
| 70 |
+
const Vec x_vec = Vec::loadu(X_ptr + j * Vec::size());
|
| 71 |
+
const Vec delta_vec = x_vec - m1_vec;
|
| 72 |
+
m1_vec += delta_vec * c_vecs[j];
|
| 73 |
+
m2_vec += delta_vec * (x_vec - m1_vec);
|
| 74 |
+
}
|
| 75 |
+
AddMomentsVec(m0, m1_vec, m2_vec, m0_stk0, m1_stk0, m2_stk0);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// each bfloat16 vector will be converted to two float vectors,
|
| 79 |
+
// and accumulated successively on m1_stk0/m2_stk0.
|
| 80 |
+
template <>
|
| 81 |
+
inline void UpdateMomentsVec<BFloat16>(
|
| 82 |
+
int64_t m0,
|
| 83 |
+
const BFloat16* X_ptr,
|
| 84 |
+
const std::array<vec::Vectorized<float>, kChunkSize>& c_vecs,
|
| 85 |
+
int64_t& m0_stk0,
|
| 86 |
+
vec::Vectorized<float>& m1_stk0,
|
| 87 |
+
vec::Vectorized<float>& m2_stk0) {
|
| 88 |
+
using bVec = vec::Vectorized<BFloat16>;
|
| 89 |
+
using fVec = vec::Vectorized<float>;
|
| 90 |
+
fVec m1_fvec0(0), m1_fvec1(0);
|
| 91 |
+
fVec m2_fvec0(0), m2_fvec1(0);
|
| 92 |
+
for (const auto j : c10::irange(m0)) {
|
| 93 |
+
const bVec x_bvec = bVec::loadu(X_ptr + j * bVec::size());
|
| 94 |
+
fVec x_fvec0, x_fvec1;
|
| 95 |
+
std::tie(x_fvec0, x_fvec1) = convert_bfloat16_float(x_bvec);
|
| 96 |
+
const fVec delta_fvec0 = x_fvec0 - m1_fvec0;
|
| 97 |
+
const fVec delta_fvec1 = x_fvec1 - m1_fvec1;
|
| 98 |
+
m1_fvec0 += delta_fvec0 * c_vecs[j];
|
| 99 |
+
m1_fvec1 += delta_fvec1 * c_vecs[j];
|
| 100 |
+
m2_fvec0 += delta_fvec0 * (x_fvec0 - m1_fvec0);
|
| 101 |
+
m2_fvec1 += delta_fvec1 * (x_fvec1 - m1_fvec1);
|
| 102 |
+
}
|
| 103 |
+
AddMomentsVec(m0, m1_fvec0, m2_fvec0, m0_stk0, m1_stk0, m2_stk0);
|
| 104 |
+
AddMomentsVec(m0, m1_fvec1, m2_fvec1, m0_stk0, m1_stk0, m2_stk0);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
// Compute rowwise moments by Welford algorithm and cascade sum to improve
|
| 108 |
+
// numerical stability.
|
| 109 |
+
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
|
| 110 |
+
// https://en.wikipedia.org/wiki/Pairwise_summation
|
| 111 |
+
template <typename T, int64_t kMaxDepth>
|
| 112 |
+
std::pair<acc_t<T>, acc_t<T>> RowwiseMomentsImpl(const T* X, int64_t N, int64_t ddof = 0) {
|
| 113 |
+
using T_ACC = acc_t<T>;
|
| 114 |
+
|
| 115 |
+
constexpr int64_t kVecSize = vec::Vectorized<T>::size();
|
| 116 |
+
constexpr int64_t kAccVecSize = vec::Vectorized<T_ACC>::size();
|
| 117 |
+
const int64_t n = N / kVecSize;
|
| 118 |
+
const int64_t m = divup(n, kChunkSize);
|
| 119 |
+
const int64_t depth = utils::CeilLog2(m);
|
| 120 |
+
|
| 121 |
+
using Vec = vec::Vectorized<T_ACC>;
|
| 122 |
+
const Vec kZeroVec(T_ACC(0));
|
| 123 |
+
c10::SmallVector<int64_t, kMaxDepth> m0_stk(depth, 0);
|
| 124 |
+
c10::SmallVector<Vec, kMaxDepth> m1_stk(depth, kZeroVec);
|
| 125 |
+
c10::SmallVector<Vec, kMaxDepth> m2_stk(depth, kZeroVec);
|
| 126 |
+
|
| 127 |
+
for (const auto i : c10::irange(m)) {
|
| 128 |
+
const T* X_ptr = X + i * kChunkSize * kVecSize;
|
| 129 |
+
const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize);
|
| 130 |
+
static std::array<Vec, kChunkSize> c_vecs = ([]() {
|
| 131 |
+
std::array<Vec, kChunkSize> result;
|
| 132 |
+
for (const auto i : c10::irange(kChunkSize)) {
|
| 133 |
+
result[i] = Vec(T_ACC(1) / static_cast<T_ACC>(i + 1));
|
| 134 |
+
}
|
| 135 |
+
return result;
|
| 136 |
+
})();
|
| 137 |
+
UpdateMomentsVec(m0, X_ptr, c_vecs, m0_stk[0], m1_stk[0], m2_stk[0]);
|
| 138 |
+
|
| 139 |
+
int64_t mask = i + 1;
|
| 140 |
+
for (int64_t j = 1; j < depth && (mask & 1) == 0; ++j) {
|
| 141 |
+
AddMomentsVec(
|
| 142 |
+
m0_stk[j - 1],
|
| 143 |
+
m1_stk[j - 1],
|
| 144 |
+
m2_stk[j - 1],
|
| 145 |
+
m0_stk[j],
|
| 146 |
+
m1_stk[j],
|
| 147 |
+
m2_stk[j]);
|
| 148 |
+
m0_stk[j - 1] = 0;
|
| 149 |
+
m1_stk[j - 1] = kZeroVec;
|
| 150 |
+
m2_stk[j - 1] = kZeroVec;
|
| 151 |
+
mask >>= 1;
|
| 152 |
+
}
|
| 153 |
+
}
|
| 154 |
+
for (const auto i : c10::irange(1, depth)) {
|
| 155 |
+
AddMomentsVec(
|
| 156 |
+
m0_stk[i], m1_stk[i], m2_stk[i], m0_stk[0], m1_stk[0], m2_stk[0]);
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
std::array<T_ACC, kAccVecSize> m1_arr{};
|
| 160 |
+
std::array<T_ACC, kAccVecSize> m2_arr{};
|
| 161 |
+
m1_stk[0].store(m1_arr.data());
|
| 162 |
+
m2_stk[0].store(m2_arr.data());
|
| 163 |
+
|
| 164 |
+
int64_t m0 = 0;
|
| 165 |
+
T_ACC m1 = 0;
|
| 166 |
+
T_ACC m2 = 0;
|
| 167 |
+
for (int64_t i = n * kVecSize; i < N; ++i) {
|
| 168 |
+
T_ACC x = static_cast<T_ACC>(X[i]);
|
| 169 |
+
const T_ACC delta = x - m1;
|
| 170 |
+
++m0;
|
| 171 |
+
m1 += delta / static_cast<T_ACC>(m0);
|
| 172 |
+
m2 += delta * (x - m1);
|
| 173 |
+
}
|
| 174 |
+
// for BFloat16, each vector in m1_arr/m2_arr holds 2*n accumulated result
|
| 175 |
+
int64_t m0_add = n * kVecSize / kAccVecSize;
|
| 176 |
+
for (const auto i : c10::irange(kAccVecSize)) {
|
| 177 |
+
AddMoments(m0_add, m1_arr[i], m2_arr[i], m0, m1, m2);
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
return std::make_pair(m1, m2 / static_cast<T_ACC>(N - ddof));
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
template <typename T>
|
| 184 |
+
std::pair<acc_t<T>, acc_t<T>> RowwiseMoments(const T* X, int64_t N, int64_t ddof = 0) {
|
| 185 |
+
using Vec = vec::Vectorized<T>;
|
| 186 |
+
constexpr int64_t kVecSize = Vec::size();
|
| 187 |
+
const int64_t n = N / kVecSize;
|
| 188 |
+
const int64_t m = divup(n, kChunkSize);
|
| 189 |
+
const int64_t depth = utils::CeilLog2(m);
|
| 190 |
+
if (depth <= 4) {
|
| 191 |
+
return RowwiseMomentsImpl<T, 4>(X, N, ddof);
|
| 192 |
+
} else if (depth <= 8) {
|
| 193 |
+
return RowwiseMomentsImpl<T, 8>(X, N, ddof);
|
| 194 |
+
} else if (depth <= 16) {
|
| 195 |
+
return RowwiseMomentsImpl<T, 16>(X, N, ddof);
|
| 196 |
+
} else if (depth <= 32) {
|
| 197 |
+
return RowwiseMomentsImpl<T, 32>(X, N, ddof);
|
| 198 |
+
} else {
|
| 199 |
+
return RowwiseMomentsImpl<T, 64>(X, N, ddof);
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
} // namespace CPU_CAPABILITY
|
| 204 |
+
} // namespace native
|
| 205 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/cpu/radix_sort.h
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/Config.h>
|
| 3 |
+
|
| 4 |
+
#if !AT_PARALLEL_OPENMP
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
constexpr bool is_radix_sort_available() { return false; }
|
| 9 |
+
|
| 10 |
+
template <typename K, typename V>
|
| 11 |
+
std::pair<K*, V*> radix_sort_parallel(
|
| 12 |
+
K* inp_key_buf,
|
| 13 |
+
V* inp_value_buf,
|
| 14 |
+
K* tmp_key_buf,
|
| 15 |
+
V* tmp_value_buf,
|
| 16 |
+
int64_t elements_count,
|
| 17 |
+
int64_t max_value) {
|
| 18 |
+
TORCH_CHECK(false, "radix_sort_parallel: ATen is not compiled with OpenMP support");
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
} // at::native
|
| 22 |
+
|
| 23 |
+
#else
|
| 24 |
+
|
| 25 |
+
#include <omp.h>
|
| 26 |
+
#include <c10/util/llvmMathExtras.h>
|
| 27 |
+
|
| 28 |
+
namespace at::native {
|
| 29 |
+
|
| 30 |
+
namespace {
|
| 31 |
+
|
| 32 |
+
// `radix_sort_parallel` is primarily used for converting COO to CSR when sorting
|
| 33 |
+
// the indices, which is used in scatter_reduce optimization on CPU.
|
| 34 |
+
//
|
| 35 |
+
// Copied from fbgemm implementation here:
|
| 36 |
+
// https://github.com/pytorch/FBGEMM/blob/main/fbgemm_gpu/src/cpu_utils.cpp
|
| 37 |
+
//
|
| 38 |
+
// `radix_sort_parallel` is only available when ATen is compiled with OpenMP,
|
| 39 |
+
// since the algorithm requires sync between omp threads, which can not be perfectly
|
| 40 |
+
// mapped to `at::parallel_for` at the current stage.
|
| 41 |
+
//
|
| 42 |
+
// TODO: fix dependency of radix sort with fbgemm_gpu and use `fbgemm::radix_sort_parallel`
|
| 43 |
+
// directly, and remove this file.
|
| 44 |
+
|
| 45 |
+
// histogram size per thread
|
| 46 |
+
constexpr int RDX_HIST_SIZE = 256;
|
| 47 |
+
|
| 48 |
+
template <typename K, typename V>
|
| 49 |
+
void radix_sort_kernel(
|
| 50 |
+
K* input_keys,
|
| 51 |
+
V* input_values,
|
| 52 |
+
K* output_keys,
|
| 53 |
+
V* output_values,
|
| 54 |
+
int elements_count,
|
| 55 |
+
int* histogram,
|
| 56 |
+
int* histogram_ps,
|
| 57 |
+
int pass) {
|
| 58 |
+
int tid = omp_get_thread_num();
|
| 59 |
+
int nthreads = omp_get_num_threads();
|
| 60 |
+
int elements_count_4 = elements_count / 4 * 4;
|
| 61 |
+
|
| 62 |
+
int* local_histogram = &histogram[RDX_HIST_SIZE * tid];
|
| 63 |
+
int* local_histogram_ps = &histogram_ps[RDX_HIST_SIZE * tid];
|
| 64 |
+
|
| 65 |
+
// Step 1: compute histogram
|
| 66 |
+
for (int i = 0; i < RDX_HIST_SIZE; i++) {
|
| 67 |
+
local_histogram[i] = 0;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
#pragma omp for schedule(static)
|
| 71 |
+
for (int64_t i = 0; i < elements_count_4; i += 4) {
|
| 72 |
+
K key_1 = input_keys[i];
|
| 73 |
+
K key_2 = input_keys[i + 1];
|
| 74 |
+
K key_3 = input_keys[i + 2];
|
| 75 |
+
K key_4 = input_keys[i + 3];
|
| 76 |
+
|
| 77 |
+
local_histogram[(key_1 >> (pass * 8)) & 0xFF]++;
|
| 78 |
+
local_histogram[(key_2 >> (pass * 8)) & 0xFF]++;
|
| 79 |
+
local_histogram[(key_3 >> (pass * 8)) & 0xFF]++;
|
| 80 |
+
local_histogram[(key_4 >> (pass * 8)) & 0xFF]++;
|
| 81 |
+
}
|
| 82 |
+
if (tid == (nthreads - 1)) {
|
| 83 |
+
for (int64_t i = elements_count_4; i < elements_count; ++i) {
|
| 84 |
+
K key = input_keys[i];
|
| 85 |
+
local_histogram[(key >> (pass * 8)) & 0xFF]++;
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
#pragma omp barrier
|
| 89 |
+
// Step 2: prefix sum
|
| 90 |
+
if (tid == 0) {
|
| 91 |
+
int sum = 0, prev_sum = 0;
|
| 92 |
+
for (int bins = 0; bins < RDX_HIST_SIZE; bins++) {
|
| 93 |
+
for (int t = 0; t < nthreads; t++) {
|
| 94 |
+
sum += histogram[t * RDX_HIST_SIZE + bins];
|
| 95 |
+
histogram_ps[t * RDX_HIST_SIZE + bins] = prev_sum;
|
| 96 |
+
prev_sum = sum;
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
histogram_ps[RDX_HIST_SIZE * nthreads] = prev_sum;
|
| 100 |
+
TORCH_CHECK(prev_sum == elements_count);
|
| 101 |
+
}
|
| 102 |
+
#pragma omp barrier
|
| 103 |
+
|
| 104 |
+
// Step 3: scatter
|
| 105 |
+
#pragma omp for schedule(static)
|
| 106 |
+
for (int64_t i = 0; i < elements_count_4; i += 4) {
|
| 107 |
+
K key_1 = input_keys[i];
|
| 108 |
+
K key_2 = input_keys[i + 1];
|
| 109 |
+
K key_3 = input_keys[i + 2];
|
| 110 |
+
K key_4 = input_keys[i + 3];
|
| 111 |
+
|
| 112 |
+
int bin_1 = (key_1 >> (pass * 8)) & 0xFF;
|
| 113 |
+
int bin_2 = (key_2 >> (pass * 8)) & 0xFF;
|
| 114 |
+
int bin_3 = (key_3 >> (pass * 8)) & 0xFF;
|
| 115 |
+
int bin_4 = (key_4 >> (pass * 8)) & 0xFF;
|
| 116 |
+
|
| 117 |
+
int pos;
|
| 118 |
+
pos = local_histogram_ps[bin_1]++;
|
| 119 |
+
output_keys[pos] = key_1;
|
| 120 |
+
output_values[pos] = input_values[i];
|
| 121 |
+
pos = local_histogram_ps[bin_2]++;
|
| 122 |
+
output_keys[pos] = key_2;
|
| 123 |
+
output_values[pos] = input_values[i + 1];
|
| 124 |
+
pos = local_histogram_ps[bin_3]++;
|
| 125 |
+
output_keys[pos] = key_3;
|
| 126 |
+
output_values[pos] = input_values[i + 2];
|
| 127 |
+
pos = local_histogram_ps[bin_4]++;
|
| 128 |
+
output_keys[pos] = key_4;
|
| 129 |
+
output_values[pos] = input_values[i + 3];
|
| 130 |
+
}
|
| 131 |
+
if (tid == (nthreads - 1)) {
|
| 132 |
+
for (int64_t i = elements_count_4; i < elements_count; ++i) {
|
| 133 |
+
K key = input_keys[i];
|
| 134 |
+
int pos = local_histogram_ps[(key >> (pass * 8)) & 0xFF]++;
|
| 135 |
+
output_keys[pos] = key;
|
| 136 |
+
output_values[pos] = input_values[i];
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
} // namespace
|
| 142 |
+
|
| 143 |
+
constexpr bool is_radix_sort_available() { return true; }
|
| 144 |
+
|
| 145 |
+
template <typename K, typename V>
|
| 146 |
+
std::pair<K*, V*> radix_sort_parallel(
|
| 147 |
+
K* inp_key_buf,
|
| 148 |
+
V* inp_value_buf,
|
| 149 |
+
K* tmp_key_buf,
|
| 150 |
+
V* tmp_value_buf,
|
| 151 |
+
int64_t elements_count,
|
| 152 |
+
int64_t max_value) {
|
| 153 |
+
int maxthreads = omp_get_max_threads();
|
| 154 |
+
std::unique_ptr<int []> histogram_tmp(new int[RDX_HIST_SIZE * maxthreads]);
|
| 155 |
+
std::unique_ptr<int []> histogram_ps_tmp(new int[RDX_HIST_SIZE * maxthreads + 1]);
|
| 156 |
+
int* histogram = histogram_tmp.get();
|
| 157 |
+
int* histogram_ps = histogram_ps_tmp.get();
|
| 158 |
+
if (max_value == 0) {
|
| 159 |
+
return std::make_pair(inp_key_buf, inp_value_buf);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
// __builtin_clz is not portable
|
| 163 |
+
int num_bits = sizeof(K) * 8 - llvm::countLeadingZeros(static_cast<std::make_unsigned_t<K>>(max_value));
|
| 164 |
+
unsigned int num_passes = (num_bits + 7) / 8;
|
| 165 |
+
|
| 166 |
+
#pragma omp parallel
|
| 167 |
+
{
|
| 168 |
+
K* input_keys = inp_key_buf;
|
| 169 |
+
V* input_values = inp_value_buf;
|
| 170 |
+
K* output_keys = tmp_key_buf;
|
| 171 |
+
V* output_values = tmp_value_buf;
|
| 172 |
+
|
| 173 |
+
for (unsigned int pass = 0; pass < num_passes; pass++) {
|
| 174 |
+
radix_sort_kernel(
|
| 175 |
+
input_keys,
|
| 176 |
+
input_values,
|
| 177 |
+
output_keys,
|
| 178 |
+
output_values,
|
| 179 |
+
elements_count,
|
| 180 |
+
histogram,
|
| 181 |
+
histogram_ps,
|
| 182 |
+
pass);
|
| 183 |
+
|
| 184 |
+
std::swap(input_keys, output_keys);
|
| 185 |
+
std::swap(input_values, output_values);
|
| 186 |
+
#pragma omp barrier
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
return (
|
| 190 |
+
num_passes % 2 == 0 ? std::make_pair(inp_key_buf, inp_value_buf)
|
| 191 |
+
: std::make_pair(tmp_key_buf, tmp_value_buf));
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
} // at::native
|
| 195 |
+
|
| 196 |
+
#endif
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/AffineQuantizer.h
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/Dispatch.h>
|
| 5 |
+
#include <ATen/native/DispatchStub.h>
|
| 6 |
+
#include <ATen/native/quantized/AffineQuantizerBase.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
namespace native {
|
| 10 |
+
|
| 11 |
+
Tensor& quantize_tensor_per_tensor_affine(
|
| 12 |
+
const Tensor& rtensor,
|
| 13 |
+
Tensor& qtensor,
|
| 14 |
+
double scale,
|
| 15 |
+
int64_t zero_point);
|
| 16 |
+
Tensor& quantize_tensor_per_channel_affine(
|
| 17 |
+
const Tensor& rtensor,
|
| 18 |
+
Tensor& qtensor,
|
| 19 |
+
Tensor scales,
|
| 20 |
+
Tensor zero_points,
|
| 21 |
+
int64_t axis);
|
| 22 |
+
|
| 23 |
+
Tensor& quantize_tensor_per_channel_float_qparams(
|
| 24 |
+
const Tensor& rtensor,
|
| 25 |
+
Tensor& qtensor,
|
| 26 |
+
Tensor scales,
|
| 27 |
+
Tensor zero_points,
|
| 28 |
+
int64_t axis);
|
| 29 |
+
|
| 30 |
+
Tensor& dequantize_tensor_per_tensor_affine(
|
| 31 |
+
const Tensor& qtensor,
|
| 32 |
+
Tensor& rtensor,
|
| 33 |
+
double scale,
|
| 34 |
+
int64_t zero_point);
|
| 35 |
+
Tensor& dequantize_tensor_per_channel_affine(
|
| 36 |
+
const Tensor& qtensor,
|
| 37 |
+
Tensor& rtensor,
|
| 38 |
+
Tensor scales,
|
| 39 |
+
Tensor zero_points,
|
| 40 |
+
int64_t axis);
|
| 41 |
+
Tensor& dequantize_tensor_per_channel_float_qparams(
|
| 42 |
+
const Tensor& qtensor,
|
| 43 |
+
Tensor& rtensor,
|
| 44 |
+
Tensor scales,
|
| 45 |
+
Tensor zero_points,
|
| 46 |
+
int64_t axis);
|
| 47 |
+
|
| 48 |
+
using quantize_tensor_per_tensor_affine_fn =
|
| 49 |
+
void (*)(const Tensor& rtensor, Tensor& qtensor, double scale, int64_t zero_point);
|
| 50 |
+
|
| 51 |
+
using quantize_tensor_per_channel_affine_fn = void (*)(
|
| 52 |
+
const Tensor& rtensor,
|
| 53 |
+
Tensor& qtensor,
|
| 54 |
+
const Tensor& scales,
|
| 55 |
+
const Tensor& zero_points,
|
| 56 |
+
int64_t axis);
|
| 57 |
+
|
| 58 |
+
using quantize_tensor_per_channel_float_qparams_fn = void (*)(
|
| 59 |
+
const Tensor& rtensor,
|
| 60 |
+
Tensor& qtensor,
|
| 61 |
+
const Tensor& scales,
|
| 62 |
+
const Tensor& zero_points,
|
| 63 |
+
int64_t axis);
|
| 64 |
+
|
| 65 |
+
using dequantize_tensor_per_tensor_affine_fn =
|
| 66 |
+
void (*)(const Tensor& qtensor, Tensor& rtensor, double scale, int64_t zero_point);
|
| 67 |
+
|
| 68 |
+
using dequantize_tensor_per_channel_affine_fn = void (*)(
|
| 69 |
+
const Tensor& qtensor,
|
| 70 |
+
Tensor& rtensor,
|
| 71 |
+
const Tensor& scales,
|
| 72 |
+
const Tensor& zero_points,
|
| 73 |
+
int64_t axis);
|
| 74 |
+
|
| 75 |
+
using dequantize_tensor_per_channel_float_qparams_fn = void (*)(
|
| 76 |
+
const Tensor& qtensor,
|
| 77 |
+
Tensor& rtensor,
|
| 78 |
+
const Tensor& scales,
|
| 79 |
+
const Tensor& zero_points,
|
| 80 |
+
int64_t axis);
|
| 81 |
+
|
| 82 |
+
using quantize_tensor_per_tensor_affine_sub_byte_fn =
|
| 83 |
+
void (*)(const Tensor& rtensor, Tensor& qtensor, float scale, float zero_point);
|
| 84 |
+
|
| 85 |
+
using dequantize_tensor_per_tensor_affine_sub_byte_fn =
|
| 86 |
+
void (*)(const Tensor& qtensor, Tensor& rtensor, float scale, float zero_point);
|
| 87 |
+
|
| 88 |
+
DECLARE_DISPATCH(
|
| 89 |
+
quantize_tensor_per_tensor_affine_fn,
|
| 90 |
+
quantize_tensor_per_tensor_affine_stub);
|
| 91 |
+
DECLARE_DISPATCH(
|
| 92 |
+
quantize_tensor_per_channel_affine_fn,
|
| 93 |
+
quantize_tensor_per_channel_affine_stub);
|
| 94 |
+
DECLARE_DISPATCH(
|
| 95 |
+
quantize_tensor_per_channel_float_qparams_fn,
|
| 96 |
+
quantize_tensor_per_channel_float_qparams_stub);
|
| 97 |
+
|
| 98 |
+
DECLARE_DISPATCH(
|
| 99 |
+
dequantize_tensor_per_tensor_affine_fn,
|
| 100 |
+
dequantize_tensor_per_tensor_affine_stub);
|
| 101 |
+
DECLARE_DISPATCH(
|
| 102 |
+
dequantize_tensor_per_channel_affine_fn,
|
| 103 |
+
dequantize_tensor_per_channel_affine_stub);
|
| 104 |
+
DECLARE_DISPATCH(
|
| 105 |
+
dequantize_tensor_per_channel_float_qparams_fn,
|
| 106 |
+
dequantize_tensor_per_channel_float_qparams_stub);
|
| 107 |
+
|
| 108 |
+
DECLARE_DISPATCH(
|
| 109 |
+
quantize_tensor_per_tensor_affine_sub_byte_fn,
|
| 110 |
+
quantize_tensor_per_tensor_affine_sub_byte_stub);
|
| 111 |
+
|
| 112 |
+
DECLARE_DISPATCH(
|
| 113 |
+
dequantize_tensor_per_tensor_affine_sub_byte_fn,
|
| 114 |
+
dequantize_tensor_per_tensor_affine_sub_byte_stub);
|
| 115 |
+
|
| 116 |
+
template <typename T>
|
| 117 |
+
TORCH_API Tensor quantize_tensor(
|
| 118 |
+
Tensor rtensor,
|
| 119 |
+
Tensor qtensor,
|
| 120 |
+
double scale,
|
| 121 |
+
int64_t zero_point);
|
| 122 |
+
template <typename T>
|
| 123 |
+
TORCH_API Tensor dequantize_tensor(
|
| 124 |
+
Tensor qtensor,
|
| 125 |
+
Tensor rtensor,
|
| 126 |
+
double scale,
|
| 127 |
+
int64_t zero_point);
|
| 128 |
+
|
| 129 |
+
} // namespace native
|
| 130 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/PackedParams.h
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/core/ivalue.h>
|
| 5 |
+
|
| 6 |
+
struct LinearPackedParamsBase : public torch::jit::CustomClassHolder {
|
| 7 |
+
virtual at::Tensor apply(
|
| 8 |
+
at::Tensor input,
|
| 9 |
+
double output_scale,
|
| 10 |
+
int64_t output_zero_point) = 0;
|
| 11 |
+
virtual at::Tensor apply_relu(
|
| 12 |
+
at::Tensor input,
|
| 13 |
+
double output_scale,
|
| 14 |
+
int64_t output_zero_point) = 0;
|
| 15 |
+
|
| 16 |
+
// out variant of LinearPackedParamsBase::apply
|
| 17 |
+
virtual at::Tensor& apply_out(
|
| 18 |
+
const at::Tensor& /*input*/,
|
| 19 |
+
double /*output_scale*/,
|
| 20 |
+
int64_t /*output_zero_point*/,
|
| 21 |
+
at::Tensor& output) {
|
| 22 |
+
throw std::runtime_error(
|
| 23 |
+
"apply_out is not implemented for this packed "
|
| 24 |
+
"parameter type");
|
| 25 |
+
return output;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
virtual at::Tensor& apply_relu_out(
|
| 29 |
+
const at::Tensor& /*input*/,
|
| 30 |
+
double /*output_scale*/,
|
| 31 |
+
int64_t /*output_zero_point*/,
|
| 32 |
+
at::Tensor& output) {
|
| 33 |
+
throw std::runtime_error(
|
| 34 |
+
"apply_relu_out is not implemented for this packed "
|
| 35 |
+
"parameter type");
|
| 36 |
+
return output;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// Corresponding pattern (the ops with `*` are part of the pattern that
|
| 40 |
+
// represents the computation of quantized::linear_with_input_q_dq_qweight_dq_output_fp32):
|
| 41 |
+
// input -> q* -> dq* -> linear* ->
|
| 42 |
+
// qweight -> dq* /
|
| 43 |
+
//
|
| 44 |
+
// After fusion:
|
| 45 |
+
// input -> quantized::linear_with_input_q_dq_qweight_dq_output_fp32* ->
|
| 46 |
+
// qweight /
|
| 47 |
+
//
|
| 48 |
+
// Additional Note: the weight is packed as well
|
| 49 |
+
// Params:
|
| 50 |
+
// X: float32 Tensor, will be quantized to quint8 in the op
|
| 51 |
+
// W_prepack: packed qint8 quantized weight and bias
|
| 52 |
+
// Returns:
|
| 53 |
+
// Y: float32 Tensor
|
| 54 |
+
virtual at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
|
| 55 |
+
at::Tensor input,
|
| 56 |
+
double input_scale,
|
| 57 |
+
int64_t input_zero_point) {
|
| 58 |
+
throw std::runtime_error(
|
| 59 |
+
"apply_with_input_q_dq_qweight_dq_output_fp32 is not implemented for this packed "
|
| 60 |
+
"parameter type");
|
| 61 |
+
return {};
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
// Corresponding pattern (the ops with `*` are part of the pattern that
|
| 65 |
+
// represents the computation of quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32):
|
| 66 |
+
// input -> q* -> dq* -> linear* -> relu* ->
|
| 67 |
+
// qweight -> dq* /
|
| 68 |
+
//
|
| 69 |
+
// After fusion:
|
| 70 |
+
// input -> quantized::linear_with_input_q_dq_qweight_dq_relu_output_fp32* ->
|
| 71 |
+
// qweight /
|
| 72 |
+
//
|
| 73 |
+
// Additional Note: the weight is packed as well
|
| 74 |
+
// Params:
|
| 75 |
+
// input: float32 Tensor, will be quantized to quint8 in the op
|
| 76 |
+
// Returns:
|
| 77 |
+
// float32 Tensor
|
| 78 |
+
virtual at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
|
| 79 |
+
at::Tensor input,
|
| 80 |
+
double input_scale,
|
| 81 |
+
int64_t input_zero_point) {
|
| 82 |
+
throw std::runtime_error(
|
| 83 |
+
"apply_with_input_q_dq_qweight_dq_relu_output_fp32 is not implemented for this packed "
|
| 84 |
+
"parameter type");
|
| 85 |
+
return {};
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
virtual at::Tensor apply_dynamic(
|
| 89 |
+
at::Tensor input,
|
| 90 |
+
bool reduce_range = false) = 0;
|
| 91 |
+
virtual at::Tensor apply_dynamic_relu(
|
| 92 |
+
at::Tensor input,
|
| 93 |
+
bool reduce_range = false) = 0;
|
| 94 |
+
|
| 95 |
+
virtual at::Tensor& apply_dynamic_out(
|
| 96 |
+
const at::Tensor& /* input */,
|
| 97 |
+
at::Tensor& output,
|
| 98 |
+
bool /* reduce_range */) {
|
| 99 |
+
throw std::runtime_error(
|
| 100 |
+
"apply_dynamic_out is not implemented for this packed "
|
| 101 |
+
"parameter type");
|
| 102 |
+
return output;
|
| 103 |
+
}
|
| 104 |
+
virtual at::Tensor& apply_dynamic_relu_out(
|
| 105 |
+
const at::Tensor& /* input */,
|
| 106 |
+
at::Tensor& output,
|
| 107 |
+
bool /* reduce_range */) {
|
| 108 |
+
throw std::runtime_error(
|
| 109 |
+
"apply_dynamic_relu_out is not implemented for this packed "
|
| 110 |
+
"parameter type");
|
| 111 |
+
return output;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
|
| 115 |
+
|
| 116 |
+
virtual c10::optional<at::Tensor> bias() = 0;
|
| 117 |
+
|
| 118 |
+
virtual void set_bias(c10::optional<at::Tensor> /*bias*/) {
|
| 119 |
+
throw std::runtime_error(
|
| 120 |
+
"set_bias is not implemented for this packed "
|
| 121 |
+
"parameter type");
|
| 122 |
+
}
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
template <int kSpatialDim = 2>
|
| 126 |
+
struct ConvPackedParamsBase : public torch::jit::CustomClassHolder {
|
| 127 |
+
virtual at::Tensor apply(
|
| 128 |
+
const at::Tensor& input,
|
| 129 |
+
double output_scale,
|
| 130 |
+
int64_t output_zero_point) = 0;
|
| 131 |
+
virtual at::Tensor apply_relu(
|
| 132 |
+
const at::Tensor& input,
|
| 133 |
+
double output_scale,
|
| 134 |
+
int64_t output_zero_point) = 0;
|
| 135 |
+
virtual at::Tensor apply_dynamic(
|
| 136 |
+
const at::Tensor& input,
|
| 137 |
+
bool reduce_range) = 0;
|
| 138 |
+
|
| 139 |
+
virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
|
| 140 |
+
|
| 141 |
+
virtual torch::List<int64_t> stride() const = 0;
|
| 142 |
+
virtual torch::List<int64_t> padding() const = 0;
|
| 143 |
+
virtual torch::List<int64_t> output_padding() const = 0;
|
| 144 |
+
virtual torch::List<int64_t> dilation() const = 0;
|
| 145 |
+
virtual int64_t groups() const = 0;
|
| 146 |
+
virtual bool transpose() const = 0;
|
| 147 |
+
};
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_PYTORCH_QNNPACK
|
| 4 |
+
#include <ATen/core/Tensor.h>
|
| 5 |
+
#include <c10/util/irange.h>
|
| 6 |
+
#include <pytorch_qnnpack.h>
|
| 7 |
+
#include <qnnpack_func.h>
|
| 8 |
+
#include <ATen/native/quantized/cpu/XnnpackUtils.h>
|
| 9 |
+
#include <ATen/native/quantized/PackedParams.h>
|
| 10 |
+
#include <ATen/native/utils/Factory.h>
|
| 11 |
+
|
| 12 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 13 |
+
#include <ATen/Functions.h>
|
| 14 |
+
#else
|
| 15 |
+
#include <ATen/ops/empty.h>
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
#include <utility>
|
| 19 |
+
|
| 20 |
+
struct QnnpackOperatorDeleter {
|
| 21 |
+
void operator()(pytorch_qnnp_operator_t op) {
|
| 22 |
+
pytorch_qnnp_delete_operator(op);
|
| 23 |
+
}
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
// PackedWeight struct for QNNPACK stores the original Weight and Bias as
|
| 27 |
+
// QNNPACK currently does not support an unpack function.
|
| 28 |
+
// For PyTorch Mobile, once the model is scripted and serialized we don't need
|
| 29 |
+
// to call unpack, so we can save some memory by checking for this case and free
|
| 30 |
+
// the original weights after packing.
|
| 31 |
+
// Input scale is set to null in pre-pack step. QNNPACK needs bias quantized
|
| 32 |
+
// with input scale which is available at runtime in pytorch. During runtime if
|
| 33 |
+
// input scale value changes then we requantize bias with the updated scale. For
|
| 34 |
+
// inference we expect the graph to be static so the input scale should not
|
| 35 |
+
// change across consecutive inference calls.
|
| 36 |
+
struct PackedLinearWeightsQnnp : public LinearPackedParamsBase {
|
| 37 |
+
PackedLinearWeightsQnnp(
|
| 38 |
+
std::unique_ptr<qnnpack::PackBMatrix> w,
|
| 39 |
+
at::Tensor orig_weight,
|
| 40 |
+
at::Tensor bias,
|
| 41 |
+
c10::optional<double> input_scale,
|
| 42 |
+
at::Tensor w_scales,
|
| 43 |
+
std::vector<uint8_t>&& w_zps)
|
| 44 |
+
: w(std::move(w)),
|
| 45 |
+
orig_weight(std::move(orig_weight)),
|
| 46 |
+
bias_(at::native::mobile::allocate_padded_contiguous_if_needed(
|
| 47 |
+
bias, bias.suggest_memory_format())),
|
| 48 |
+
per_channel_(this->orig_weight.qscheme() == at::kPerChannelAffine),
|
| 49 |
+
input_scale(std::move(input_scale)),
|
| 50 |
+
w_scales(std::move(w_scales)),
|
| 51 |
+
w_zero_points(std::move(w_zps)) {
|
| 52 |
+
weight_sizes = this->orig_weight.sizes().vec();
|
| 53 |
+
n_elements = std::accumulate(std::begin(weight_sizes), std::end(weight_sizes), 1, std::multiplies<double>());
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
std::unique_ptr<qnnpack::PackBMatrix> w;
|
| 57 |
+
at::Tensor orig_weight;
|
| 58 |
+
at::Tensor bias_;
|
| 59 |
+
bool per_channel_;
|
| 60 |
+
c10::optional<double> input_scale;
|
| 61 |
+
at::Tensor w_scales;
|
| 62 |
+
std::vector<uint8_t> w_zero_points;
|
| 63 |
+
std::vector<float> requantization_scales;
|
| 64 |
+
std::vector<int64_t> weight_sizes;
|
| 65 |
+
int n_elements;
|
| 66 |
+
|
| 67 |
+
at::Tensor apply(
|
| 68 |
+
at::Tensor input,
|
| 69 |
+
double output_scale,
|
| 70 |
+
int64_t output_zero_point) override;
|
| 71 |
+
at::Tensor apply_relu(
|
| 72 |
+
at::Tensor input,
|
| 73 |
+
double output_scale,
|
| 74 |
+
int64_t output_zero_point) override;
|
| 75 |
+
|
| 76 |
+
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override;
|
| 77 |
+
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override;
|
| 78 |
+
|
| 79 |
+
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
|
| 80 |
+
|
| 81 |
+
c10::optional<at::Tensor> bias() override {
|
| 82 |
+
return bias_;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
|
| 86 |
+
at::Tensor weight,
|
| 87 |
+
c10::optional<at::Tensor> bias);
|
| 88 |
+
|
| 89 |
+
bool per_channel() const {
|
| 90 |
+
return per_channel_;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
private:
|
| 94 |
+
std::mutex qnnp_mutex_;
|
| 95 |
+
|
| 96 |
+
#ifdef USE_XNNPACK
|
| 97 |
+
xnnpack_operator xnnp_linear_op;
|
| 98 |
+
|
| 99 |
+
template <typename scalar_t, bool kReluFused>
|
| 100 |
+
at::Tensor apply_impl_xnnp(
|
| 101 |
+
const at::Tensor& input,
|
| 102 |
+
double output_scale,
|
| 103 |
+
int64_t output_zero_point);
|
| 104 |
+
#endif // USE_XNNPACK
|
| 105 |
+
|
| 106 |
+
template <bool ReluFused>
|
| 107 |
+
at::Tensor apply_impl(
|
| 108 |
+
at::Tensor input,
|
| 109 |
+
double output_scale,
|
| 110 |
+
int64_t output_zero_point);
|
| 111 |
+
|
| 112 |
+
template <bool ReluFused>
|
| 113 |
+
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range);
|
| 114 |
+
};
|
| 115 |
+
|
| 116 |
+
template <int kSpatialDim = 2>
|
| 117 |
+
struct PackedConvWeightsQnnp : public ConvPackedParamsBase<kSpatialDim> {
|
| 118 |
+
PackedConvWeightsQnnp(
|
| 119 |
+
std::unique_ptr<qnnpack::PrePackConvWeights> w,
|
| 120 |
+
at::Tensor orig_weight,
|
| 121 |
+
at::Tensor bias,
|
| 122 |
+
torch::List<int64_t> stride,
|
| 123 |
+
torch::List<int64_t> padding,
|
| 124 |
+
torch::List<int64_t> output_padding,
|
| 125 |
+
torch::List<int64_t> dilation,
|
| 126 |
+
int64_t groups,
|
| 127 |
+
bool transpose,
|
| 128 |
+
c10::optional<double> input_scale,
|
| 129 |
+
std::vector<int64_t> kernel,
|
| 130 |
+
at::Tensor w_scale,
|
| 131 |
+
std::vector<uint8_t>&& w_zps,
|
| 132 |
+
bool is_per_channel)
|
| 133 |
+
: w(std::move(w)),
|
| 134 |
+
orig_weight(std::move(orig_weight)),
|
| 135 |
+
bias(std::move(bias)),
|
| 136 |
+
stride_(std::move(stride)),
|
| 137 |
+
padding_(std::move(padding)),
|
| 138 |
+
output_padding_(std::move(output_padding)),
|
| 139 |
+
dilation_(std::move(dilation)),
|
| 140 |
+
groups_(groups),
|
| 141 |
+
transpose_(transpose),
|
| 142 |
+
is_per_channel_(is_per_channel),
|
| 143 |
+
input_scale(input_scale),
|
| 144 |
+
kernel_(std::move(kernel)),
|
| 145 |
+
w_scales(std::move(w_scale)),
|
| 146 |
+
w_zero_points(std::move(w_zps)) {
|
| 147 |
+
const bool any_padding = std::any_of(
|
| 148 |
+
padding_.begin(), padding_.end(), [](const auto& e) { return e != 0; });
|
| 149 |
+
const size_t kernel_size =
|
| 150 |
+
std::accumulate(kernel_.begin(), kernel_.end(), 1, std::multiplies<>());
|
| 151 |
+
|
| 152 |
+
const size_t group_input_channels = transpose
|
| 153 |
+
? this->orig_weight.size(0) / groups
|
| 154 |
+
: this->orig_weight.size(1);
|
| 155 |
+
const size_t group_output_channels = transpose
|
| 156 |
+
? this->orig_weight.size(1)
|
| 157 |
+
: this->orig_weight.size(0) / groups;
|
| 158 |
+
|
| 159 |
+
const size_t kernel_depth = kSpatialDim == 3 ? kernel_[0] : 1;
|
| 160 |
+
const size_t kernel_height = kernel_[kSpatialDim - 2];
|
| 161 |
+
const size_t kernel_width = kernel_[kSpatialDim - 1];
|
| 162 |
+
|
| 163 |
+
pytorch_qnnp_ukernel_type ukernel_type;
|
| 164 |
+
if (transpose_) {
|
| 165 |
+
ukernel_type = pytorch_qnnp_ukernel_type_conv;
|
| 166 |
+
} else {
|
| 167 |
+
ukernel_type = pytorch_qnnp_ukernel_type_none;
|
| 168 |
+
|
| 169 |
+
const bool has_depthwise_dimensions =
|
| 170 |
+
(kSpatialDim == 2 &&
|
| 171 |
+
((kernel_height == 3 && kernel_width == 3) ||
|
| 172 |
+
(kernel_height == 5 && kernel_width == 5))) ||
|
| 173 |
+
(kSpatialDim == 3 && kernel_height == 3 && kernel_width == 3 &&
|
| 174 |
+
kernel_depth == 3);
|
| 175 |
+
const bool has_depthwise_grouping =
|
| 176 |
+
group_input_channels == 1 && group_output_channels == 1 && groups > 1;
|
| 177 |
+
|
| 178 |
+
if (has_depthwise_dimensions && has_depthwise_grouping) {
|
| 179 |
+
ukernel_type = pytorch_qnnp_ukernel_type_dwconv;
|
| 180 |
+
} else if (
|
| 181 |
+
kernel_size == 1 &&
|
| 182 |
+
std::all_of(
|
| 183 |
+
stride_.begin(),
|
| 184 |
+
stride_.end(),
|
| 185 |
+
[](const auto& e) { return e == 1; }) &&
|
| 186 |
+
!any_padding) {
|
| 187 |
+
ukernel_type = group_input_channels >= SIZE_MAX
|
| 188 |
+
? pytorch_qnnp_ukernel_type_xzp_gemm
|
| 189 |
+
: pytorch_qnnp_ukernel_type_gemm;
|
| 190 |
+
} else {
|
| 191 |
+
ukernel_type = pytorch_qnnp_ukernel_type_conv;
|
| 192 |
+
}
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
if (is_per_channel && ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) {
|
| 196 |
+
TORCH_INTERNAL_ASSERT(
|
| 197 |
+
false, "Per channel quantized weights are not supported for XZP kernels");
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
pytorch_qnnp_operator_t convolution{nullptr};
|
| 201 |
+
// Initially all the params are set to zero.
|
| 202 |
+
convolution = static_cast<pytorch_qnnp_operator_t>(
|
| 203 |
+
calloc(1, sizeof(struct pytorch_qnnp_operator)));
|
| 204 |
+
if (convolution == nullptr) {
|
| 205 |
+
TORCH_INTERNAL_ASSERT(
|
| 206 |
+
false, "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
|
| 207 |
+
sizeof(struct pytorch_qnnp_operator));
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
convolution_op =
|
| 211 |
+
std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter>(
|
| 212 |
+
convolution);
|
| 213 |
+
|
| 214 |
+
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
|
| 215 |
+
convolution->ukernel_type = ukernel_type;
|
| 216 |
+
convolution->groups = groups;
|
| 217 |
+
convolution->group_input_channels = group_input_channels;
|
| 218 |
+
convolution->group_output_channels = group_output_channels;
|
| 219 |
+
convolution->kernel_depth = kernel_depth;
|
| 220 |
+
convolution->kernel_height = kernel_height;
|
| 221 |
+
convolution->kernel_width = kernel_width;
|
| 222 |
+
convolution->stride_depth = kSpatialDim == 3 ? stride_[0] : 1;
|
| 223 |
+
convolution->stride_height = stride_[kSpatialDim - 2];
|
| 224 |
+
convolution->stride_width = stride_[kSpatialDim - 1];
|
| 225 |
+
convolution->dilation_depth = kSpatialDim == 3 ? dilation_[0] : 1;
|
| 226 |
+
convolution->dilation_height = dilation_[kSpatialDim - 2];
|
| 227 |
+
convolution->dilation_width = dilation_[kSpatialDim - 1];
|
| 228 |
+
convolution->input_padding_height = padding_[kSpatialDim - 2];
|
| 229 |
+
convolution->input_padding_width = padding_[kSpatialDim - 1];
|
| 230 |
+
convolution->input_padding_depth = kSpatialDim == 3 ? padding_[0] : 0;
|
| 231 |
+
convolution->per_channel = is_per_channel_;
|
| 232 |
+
convolution->transpose = transpose_;
|
| 233 |
+
|
| 234 |
+
const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
|
| 235 |
+
const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
|
| 236 |
+
|
| 237 |
+
size_t zero_size = sizeof(uint8_t) * k_stride;
|
| 238 |
+
size_t zero_offset = 0;
|
| 239 |
+
|
| 240 |
+
if (transpose_) {
|
| 241 |
+
convolution->adjustment_width = output_padding_[1];
|
| 242 |
+
convolution->adjustment_height = output_padding_[0];
|
| 243 |
+
if (group_input_channels < 8) {
|
| 244 |
+
zero_size += 8;
|
| 245 |
+
zero_offset = 8;
|
| 246 |
+
}
|
| 247 |
+
} else {
|
| 248 |
+
zero_buffer_size = 0;
|
| 249 |
+
if (any_padding) {
|
| 250 |
+
zero_size = 0;
|
| 251 |
+
zero_offset = 0;
|
| 252 |
+
if (ukernel_type == pytorch_qnnp_ukernel_type_dwconv) {
|
| 253 |
+
const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
|
| 254 |
+
const size_t group_stride = (groups + (cr - 1)) & -cr;
|
| 255 |
+
if (groups >= 8) {
|
| 256 |
+
zero_size = sizeof(uint8_t) * group_stride;
|
| 257 |
+
zero_offset = 0;
|
| 258 |
+
} else {
|
| 259 |
+
zero_size = sizeof(uint8_t) * group_stride + 8;
|
| 260 |
+
zero_offset = sizeof(uint8_t) * 8;
|
| 261 |
+
}
|
| 262 |
+
} else if (
|
| 263 |
+
ukernel_type == pytorch_qnnp_ukernel_type_conv ||
|
| 264 |
+
ukernel_type == pytorch_qnnp_ukernel_type_gemm) {
|
| 265 |
+
if (group_input_channels >= 8) {
|
| 266 |
+
zero_size = sizeof(uint8_t) * k_stride;
|
| 267 |
+
zero_offset = 0;
|
| 268 |
+
} else {
|
| 269 |
+
zero_size = sizeof(uint8_t) * k_stride + 8;
|
| 270 |
+
zero_offset = 8;
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
}
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
// NOLINTNEXTLINE(clang-analyzer-optin.portability.UnixAPI)
|
| 277 |
+
void* zero_buffer = malloc(zero_size);
|
| 278 |
+
if (zero_buffer == nullptr) {
|
| 279 |
+
pytorch_qnnp_delete_operator(convolution);
|
| 280 |
+
TORCH_INTERNAL_ASSERT(
|
| 281 |
+
false, "failed to allocate %zu bytes for zero padding",
|
| 282 |
+
zero_size);
|
| 283 |
+
}
|
| 284 |
+
// Need to set to input zero point
|
| 285 |
+
// memset(zero_buffer, input_zero_point, zero_size);
|
| 286 |
+
zero_buffer_size = zero_size;
|
| 287 |
+
convolution->zero_buffer = zero_buffer;
|
| 288 |
+
convolution->zero_pointer = (void*)((uintptr_t)zero_buffer + zero_offset);
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter> convolution_op;
|
| 292 |
+
#ifdef USE_XNNPACK
|
| 293 |
+
xnnpack_operator xnnp_convolution_op;
|
| 294 |
+
#endif // USE_XNNPACK
|
| 295 |
+
std::unique_ptr<qnnpack::PrePackConvWeights> w;
|
| 296 |
+
at::Tensor orig_weight;
|
| 297 |
+
at::Tensor bias;
|
| 298 |
+
torch::List<int64_t> stride_;
|
| 299 |
+
torch::List<int64_t> padding_;
|
| 300 |
+
torch::List<int64_t> output_padding_;
|
| 301 |
+
torch::List<int64_t> dilation_;
|
| 302 |
+
int64_t groups_;
|
| 303 |
+
bool transpose_;
|
| 304 |
+
bool is_per_channel_;
|
| 305 |
+
c10::optional<double> input_scale;
|
| 306 |
+
std::vector<int64_t> kernel_;
|
| 307 |
+
at::Tensor w_scales;
|
| 308 |
+
std::vector<uint8_t> w_zero_points;
|
| 309 |
+
std::vector<float> requantization_scales;
|
| 310 |
+
size_t zero_buffer_size;
|
| 311 |
+
|
| 312 |
+
at::Tensor apply(
|
| 313 |
+
const at::Tensor& input,
|
| 314 |
+
double output_scale,
|
| 315 |
+
int64_t output_zero_point) override;
|
| 316 |
+
|
| 317 |
+
at::Tensor apply_relu(
|
| 318 |
+
const at::Tensor& input,
|
| 319 |
+
double output_scale,
|
| 320 |
+
int64_t output_zero_point) override;
|
| 321 |
+
|
| 322 |
+
at::Tensor apply_dynamic(
|
| 323 |
+
const at::Tensor& input,
|
| 324 |
+
bool reduce_range=false) override;
|
| 325 |
+
|
| 326 |
+
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
|
| 327 |
+
|
| 328 |
+
static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
|
| 329 |
+
at::Tensor weight,
|
| 330 |
+
c10::optional<at::Tensor> bias,
|
| 331 |
+
torch::List<int64_t> stride,
|
| 332 |
+
torch::List<int64_t> padding,
|
| 333 |
+
torch::List<int64_t> output_padding,
|
| 334 |
+
torch::List<int64_t> dilation,
|
| 335 |
+
int64_t groups,
|
| 336 |
+
bool transpose);
|
| 337 |
+
|
| 338 |
+
torch::List<int64_t> stride() const override {
|
| 339 |
+
return stride_;
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
torch::List<int64_t> padding() const override {
|
| 343 |
+
return padding_;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
torch::List<int64_t> output_padding() const override {
|
| 347 |
+
return output_padding_;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
torch::List<int64_t> dilation() const override {
|
| 351 |
+
return dilation_;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
int64_t groups() const override {
|
| 355 |
+
return groups_;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
bool transpose() const override {
|
| 359 |
+
return transpose_;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
bool per_channel() const {
|
| 363 |
+
return is_per_channel_;
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
private:
|
| 367 |
+
std::mutex qnnp_mutex_;
|
| 368 |
+
template <bool ReluFused>
|
| 369 |
+
at::Tensor apply_impl(
|
| 370 |
+
const at::Tensor& input,
|
| 371 |
+
double output_scale,
|
| 372 |
+
int64_t output_zero_point);
|
| 373 |
+
|
| 374 |
+
#ifdef USE_XNNPACK
|
| 375 |
+
template <typename scalar_t, bool ReluFused>
|
| 376 |
+
at::Tensor apply_impl_xnnp(
|
| 377 |
+
const at::Tensor& input,
|
| 378 |
+
double output_scale,
|
| 379 |
+
int64_t output_zero_point);
|
| 380 |
+
#endif // USE_XNNPACK
|
| 381 |
+
};
|
| 382 |
+
|
| 383 |
+
enum class Activation : uint8_t { NONE = 0, RELU = 1 };
|
| 384 |
+
|
| 385 |
+
#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
|
| 386 |
+
template <class T>
|
| 387 |
+
inline float Round(const float x) {
|
| 388 |
+
return ::nearbyintf(x);
|
| 389 |
+
}
|
| 390 |
+
inline double Round(const double x) {
|
| 391 |
+
return ::nearbyint(x);
|
| 392 |
+
}
|
| 393 |
+
#else
|
| 394 |
+
template <class T>
|
| 395 |
+
inline T Round(const T x) {
|
| 396 |
+
return std::nearbyint(x);
|
| 397 |
+
}
|
| 398 |
+
#endif
|
| 399 |
+
|
| 400 |
+
template<typename T>
|
| 401 |
+
inline T QuantizeValue(float scale, int32_t zero_point, float value) {
|
| 402 |
+
const int32_t qmin = std::numeric_limits<T>::min();
|
| 403 |
+
const int32_t qmax = std::numeric_limits<T>::max();
|
| 404 |
+
auto r = zero_point + static_cast<int32_t>(Round(value / scale));
|
| 405 |
+
r = std::max(r, qmin);
|
| 406 |
+
r = std::min(r, qmax);
|
| 407 |
+
return static_cast<T>(r);
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
template<typename T>
|
| 411 |
+
inline std::pair<T, T> activationLimits(
|
| 412 |
+
float scale,
|
| 413 |
+
int32_t zero_point,
|
| 414 |
+
Activation Ac) {
|
| 415 |
+
switch (Ac) {
|
| 416 |
+
case Activation::NONE:
|
| 417 |
+
return {std::numeric_limits<T>::min(),
|
| 418 |
+
std::numeric_limits<T>::max()};
|
| 419 |
+
case Activation::RELU:
|
| 420 |
+
return {QuantizeValue<T>(scale, zero_point, 0.0),
|
| 421 |
+
std::numeric_limits<T>::max()};
|
| 422 |
+
default:
|
| 423 |
+
#ifdef _MSC_VER
|
| 424 |
+
__assume(0);
|
| 425 |
+
#else
|
| 426 |
+
__builtin_unreachable();
|
| 427 |
+
#endif
|
| 428 |
+
}
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
namespace at {
|
| 432 |
+
namespace native {
|
| 433 |
+
namespace qnnp_avgpool_helper {
|
| 434 |
+
Tensor qnnpack_avg_pool2d(
|
| 435 |
+
Tensor input,
|
| 436 |
+
IntArrayRef kernel_size,
|
| 437 |
+
IntArrayRef stride,
|
| 438 |
+
IntArrayRef padding,
|
| 439 |
+
bool ceil_mode,
|
| 440 |
+
bool count_include_pad,
|
| 441 |
+
c10::optional<int64_t> divisor_override);
|
| 442 |
+
} // qnnp_avgpool_helper
|
| 443 |
+
} // namespace native
|
| 444 |
+
} // namespace at
|
| 445 |
+
|
| 446 |
+
namespace {
|
| 447 |
+
C10_UNUSED std::vector<float> generate_requantization_scales(
|
| 448 |
+
const at::Tensor& weight_scales,
|
| 449 |
+
const float input_scale,
|
| 450 |
+
const float output_scale,
|
| 451 |
+
std::vector<float>& requant_scales) {
|
| 452 |
+
// Since weight scale is allocated with padding
|
| 453 |
+
// weight_scales.numel() gives us padded num elements.
|
| 454 |
+
const auto num_output_channels_padded = weight_scales.numel();
|
| 455 |
+
float *const weight_scales_data = weight_scales.data_ptr<float>();
|
| 456 |
+
if (static_cast<int64_t>(requant_scales.size()) < num_output_channels_padded) {
|
| 457 |
+
requant_scales.resize(num_output_channels_padded);
|
| 458 |
+
}
|
| 459 |
+
for (const auto i : c10::irange(num_output_channels_padded)) {
|
| 460 |
+
const auto inverse_output_scale = 1.f /output_scale;
|
| 461 |
+
requant_scales[i] = (weight_scales_data[i] * input_scale) * inverse_output_scale;
|
| 462 |
+
TORCH_CHECK(
|
| 463 |
+
(requant_scales[i] > 0.0f && std::isnormal(requant_scales[i])),
|
| 464 |
+
"failed to create op with requantization scale: ",
|
| 465 |
+
requant_scales[i],
|
| 466 |
+
": requantization scale must be finite and positive");
|
| 467 |
+
}
|
| 468 |
+
return requant_scales;
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
C10_UNUSED std::pair<std::vector<uint8_t>, at::Tensor> make_zero_points_and_scales_tensor(
|
| 472 |
+
const at::Tensor& weight_contig,
|
| 473 |
+
bool transpose = false,
|
| 474 |
+
uint32_t groups = 1
|
| 475 |
+
) {
|
| 476 |
+
const int out_ch_idx = transpose ? 1 : 0;
|
| 477 |
+
const auto num_output_channels = weight_contig.size(out_ch_idx) * (transpose ? groups : 1);
|
| 478 |
+
// Add 8 to account for bufferring needed by QNNPACK.
|
| 479 |
+
const auto num_output_channels_padded = num_output_channels + 8;
|
| 480 |
+
const auto qtype = weight_contig.qscheme();
|
| 481 |
+
std::vector<uint8_t> weight_zp(num_output_channels_padded, 0);
|
| 482 |
+
// Adjust weight zero point, similar to weight data.
|
| 483 |
+
if (qtype == at::kPerTensorAffine) {
|
| 484 |
+
for (const auto i : c10::irange(num_output_channels)) {
|
| 485 |
+
weight_zp[i] = (uint8_t)(weight_contig.q_zero_point() + 128);
|
| 486 |
+
}
|
| 487 |
+
} else if (qtype == at::kPerChannelAffine) {
|
| 488 |
+
TORCH_CHECK(
|
| 489 |
+
weight_contig.q_per_channel_zero_points().scalar_type() == at::kLong,
|
| 490 |
+
"Per channel zero points dtype must be long int.");
|
| 491 |
+
const int64_t* per_channel_zero_points =
|
| 492 |
+
weight_contig.q_per_channel_zero_points().data_ptr<int64_t>();
|
| 493 |
+
for (const auto i : c10::irange(num_output_channels)) {
|
| 494 |
+
weight_zp[i] = (uint8_t)(per_channel_zero_points[i] + 128);
|
| 495 |
+
}
|
| 496 |
+
} else {
|
| 497 |
+
TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
|
| 498 |
+
}
|
| 499 |
+
at:: Tensor weight_scales =
|
| 500 |
+
at::empty(
|
| 501 |
+
{num_output_channels_padded},
|
| 502 |
+
at::device(at::kCPU).dtype(at::kFloat));
|
| 503 |
+
float *const weight_scales_data = weight_scales.data_ptr<float>();
|
| 504 |
+
if (qtype == at::kPerTensorAffine) {
|
| 505 |
+
for (const auto i : c10::irange(num_output_channels)) {
|
| 506 |
+
weight_scales_data[i] = weight_contig.q_scale();
|
| 507 |
+
}
|
| 508 |
+
} else if (qtype == at::kPerChannelAffine) {
|
| 509 |
+
TORCH_CHECK(
|
| 510 |
+
weight_contig.q_per_channel_scales().scalar_type() == at::kDouble,
|
| 511 |
+
"Per channel scales dtype must be double.");
|
| 512 |
+
const double *const per_channel_scales =
|
| 513 |
+
weight_contig.q_per_channel_scales().data_ptr<double>();
|
| 514 |
+
for (const auto i : c10::irange(num_output_channels)) {
|
| 515 |
+
weight_scales_data[i] = static_cast<float>(per_channel_scales[i]);
|
| 516 |
+
}
|
| 517 |
+
} else {
|
| 518 |
+
TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme.");
|
| 519 |
+
}
|
| 520 |
+
for (const auto i : c10::irange(num_output_channels, num_output_channels_padded)) {
|
| 521 |
+
weight_scales_data[i] = 1.f;
|
| 522 |
+
}
|
| 523 |
+
return {weight_zp, weight_scales};
|
| 524 |
+
}
|
| 525 |
+
} // namespace
|
| 526 |
+
|
| 527 |
+
#endif
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/Tensor.h>
|
| 3 |
+
#include <ATen/core/IListRef.h>
|
| 4 |
+
#include <ATen/Dispatch.h>
|
| 5 |
+
#include <ATen/TensorIterator.h>
|
| 6 |
+
#include <ATen/native/Activation.h>
|
| 7 |
+
#include <ATen/native/DispatchStub.h>
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
namespace native {
|
| 11 |
+
|
| 12 |
+
using qrelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
|
| 13 |
+
using qrelu_leaky_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
|
| 14 |
+
const Scalar& /*negval_*/);
|
| 15 |
+
using qgelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, GeluType /* approximate */);
|
| 16 |
+
using qsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, double output_scale, int64_t output_zero_point);
|
| 17 |
+
using qhardsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
|
| 18 |
+
using qclamp_fn = void (*)(
|
| 19 |
+
const at::Tensor& /*qx*/,
|
| 20 |
+
const Scalar& min,
|
| 21 |
+
const Scalar& max,
|
| 22 |
+
at::Tensor& /*qy*/);
|
| 23 |
+
using qclamp_minmax_fn = void (*)(
|
| 24 |
+
const at::Tensor& /*qx*/,
|
| 25 |
+
const Scalar& /*min or max*/,
|
| 26 |
+
at::Tensor& /*qy*/);
|
| 27 |
+
using qthreshold_fn = void (*)(
|
| 28 |
+
const at::Tensor& /*qx*/,
|
| 29 |
+
const Scalar& threshold,
|
| 30 |
+
const Scalar& value,
|
| 31 |
+
at::Tensor& /*qy*/);
|
| 32 |
+
using qtanh_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
|
| 33 |
+
using qelu_fn = void(*)(
|
| 34 |
+
const at::Tensor& /*qx*/,
|
| 35 |
+
const Scalar& /*alpha*/,
|
| 36 |
+
const Scalar& /*scale*/,
|
| 37 |
+
const Scalar& /*input_scale*/,
|
| 38 |
+
at::Tensor& /*qy*/);
|
| 39 |
+
using qbinary_fn =
|
| 40 |
+
void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Tensor& /*other*/);
|
| 41 |
+
using qadd_scalar_fn =
|
| 42 |
+
void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Scalar& other /*other*/);
|
| 43 |
+
using qhardswish_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
|
| 44 |
+
using qdropout_fn = void(*)(
|
| 45 |
+
const at::Tensor& /*qx*/,
|
| 46 |
+
const Scalar& /*p*/,
|
| 47 |
+
bool training /*training*/,
|
| 48 |
+
at::Tensor& /*qy*/);
|
| 49 |
+
using qmaxpool_2d_fn = void (*)(
|
| 50 |
+
const Tensor& qx,
|
| 51 |
+
int64_t iC, // input/output channels
|
| 52 |
+
int64_t iH,
|
| 53 |
+
int64_t iW, // input sizes
|
| 54 |
+
int64_t oH,
|
| 55 |
+
int64_t oW, // output sizes
|
| 56 |
+
int64_t kH,
|
| 57 |
+
int64_t kW, // kernel size
|
| 58 |
+
int64_t sH,
|
| 59 |
+
int64_t sW, // strides
|
| 60 |
+
int64_t pH,
|
| 61 |
+
int64_t pW, // padding
|
| 62 |
+
int64_t dH,
|
| 63 |
+
int64_t dW, // dilation
|
| 64 |
+
Tensor& qy);
|
| 65 |
+
using qadaptive_avg_pool2d_fn = void (*)(
|
| 66 |
+
const Tensor& qx,
|
| 67 |
+
Tensor& qy,
|
| 68 |
+
int64_t sizeB,
|
| 69 |
+
int64_t sizeC,
|
| 70 |
+
int64_t isizeH,
|
| 71 |
+
int64_t isizeW,
|
| 72 |
+
int64_t osizeH,
|
| 73 |
+
int64_t osizeW,
|
| 74 |
+
int64_t istrideB,
|
| 75 |
+
int64_t istrideC,
|
| 76 |
+
int64_t istrideH,
|
| 77 |
+
int64_t istrideW);
|
| 78 |
+
using qadaptive_avg_pool3d_fn = void (*)(
|
| 79 |
+
const Tensor& qx,
|
| 80 |
+
Tensor& qy,
|
| 81 |
+
int64_t sizeB,
|
| 82 |
+
int64_t sizeC,
|
| 83 |
+
int64_t isizeD,
|
| 84 |
+
int64_t isizeH,
|
| 85 |
+
int64_t isizeW,
|
| 86 |
+
int64_t osizeD,
|
| 87 |
+
int64_t osizeH,
|
| 88 |
+
int64_t osizeW,
|
| 89 |
+
int64_t istrideB,
|
| 90 |
+
int64_t istrideC,
|
| 91 |
+
int64_t istrideD,
|
| 92 |
+
int64_t istrideH,
|
| 93 |
+
int64_t istrideW);
|
| 94 |
+
using qavg_pool2d_fn = void (*)(
|
| 95 |
+
const Tensor& qx,
|
| 96 |
+
Tensor& qy,
|
| 97 |
+
int64_t nBatch,
|
| 98 |
+
int64_t nInputPlane,
|
| 99 |
+
int64_t inputWidth,
|
| 100 |
+
int64_t inputHeight,
|
| 101 |
+
int64_t outputWidth,
|
| 102 |
+
int64_t outputHeight,
|
| 103 |
+
int kW,
|
| 104 |
+
int kH,
|
| 105 |
+
int dW,
|
| 106 |
+
int dH,
|
| 107 |
+
int padW,
|
| 108 |
+
int padH,
|
| 109 |
+
bool count_include_pad,
|
| 110 |
+
c10::optional<int64_t> divisor_override);
|
| 111 |
+
|
| 112 |
+
using qavg_pool3d_fn = void (*)(
|
| 113 |
+
const Tensor& qx,
|
| 114 |
+
Tensor& qy,
|
| 115 |
+
int64_t nBatch,
|
| 116 |
+
int64_t nInputPlane,
|
| 117 |
+
int64_t inputWidth,
|
| 118 |
+
int64_t inputHeight,
|
| 119 |
+
int64_t inputDepth,
|
| 120 |
+
int64_t outputWidth,
|
| 121 |
+
int64_t outputHeight,
|
| 122 |
+
int64_t outputDepth,
|
| 123 |
+
int kW,
|
| 124 |
+
int kH,
|
| 125 |
+
int kD,
|
| 126 |
+
int dW,
|
| 127 |
+
int dH,
|
| 128 |
+
int dD,
|
| 129 |
+
int padW,
|
| 130 |
+
int padH,
|
| 131 |
+
int padD,
|
| 132 |
+
bool count_include_pad,
|
| 133 |
+
c10::optional<int64_t> divisor_override);
|
| 134 |
+
|
| 135 |
+
using qupsample_bilinear2d_fn = void (*)(
|
| 136 |
+
Tensor& output,
|
| 137 |
+
const Tensor& input,
|
| 138 |
+
int64_t input_height,
|
| 139 |
+
int64_t input_width,
|
| 140 |
+
int64_t output_height,
|
| 141 |
+
int64_t output_width,
|
| 142 |
+
int64_t nbatch,
|
| 143 |
+
int64_t channels,
|
| 144 |
+
bool align_corners,
|
| 145 |
+
c10::optional<double> scales_h,
|
| 146 |
+
c10::optional<double> scales_w);
|
| 147 |
+
|
| 148 |
+
using qcat_nhwc_fn = Tensor (*)(
|
| 149 |
+
const MaterializedITensorListRef& qxs,
|
| 150 |
+
int64_t dim,
|
| 151 |
+
double scale,
|
| 152 |
+
int64_t zero_point);
|
| 153 |
+
using qtopk_fn = void(*)(Tensor&, Tensor&, const Tensor&, int64_t, int64_t, bool, bool);
|
| 154 |
+
|
| 155 |
+
using qbatch_norm_fn = void(*)(int64_t, int64_t, int64_t, int64_t, int64_t, const Tensor&, const Tensor&, const Tensor&, Tensor&);
|
| 156 |
+
|
| 157 |
+
using qnormalize_fn = void (*)(
|
| 158 |
+
const Tensor& /* X */,
|
| 159 |
+
const Tensor& /* gamma */,
|
| 160 |
+
const Tensor& /* beta */,
|
| 161 |
+
bool /* affine_per_channel */,
|
| 162 |
+
int /* num_channels */,
|
| 163 |
+
int /* num_groups */,
|
| 164 |
+
int64_t /* M */,
|
| 165 |
+
int64_t /* N */,
|
| 166 |
+
double /* eps */,
|
| 167 |
+
Tensor* /* Y */);
|
| 168 |
+
|
| 169 |
+
using qmean_inner_dim_fn = void (*)(
|
| 170 |
+
const Tensor& /* X */,
|
| 171 |
+
OptionalIntArrayRef /* opt_dim */,
|
| 172 |
+
bool /* keepdim */,
|
| 173 |
+
c10::optional<ScalarType> /* opt_dtype */,
|
| 174 |
+
Tensor& /* Y */);
|
| 175 |
+
|
| 176 |
+
using qstd_inner_dim_fn = void (*)(
|
| 177 |
+
const Tensor& /* X */,
|
| 178 |
+
OptionalIntArrayRef /* dim */,
|
| 179 |
+
optional<int64_t> /* unbiased */,
|
| 180 |
+
bool /* keepdim */,
|
| 181 |
+
Tensor& /* Y */);
|
| 182 |
+
|
| 183 |
+
using qnormalize_nhwc_fn = void (*)(
|
| 184 |
+
const Tensor& /* X */,
|
| 185 |
+
const Tensor& /* gamma */,
|
| 186 |
+
const Tensor& /* beta */,
|
| 187 |
+
bool /* affine_per_channel */,
|
| 188 |
+
int /* num_channels */,
|
| 189 |
+
int /* num_groups */,
|
| 190 |
+
int64_t /* M */,
|
| 191 |
+
int64_t /* N */,
|
| 192 |
+
double /* eps */,
|
| 193 |
+
Tensor* /* Y */);
|
| 194 |
+
|
| 195 |
+
using qprelu_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
|
| 196 |
+
const Tensor& /*qw*/);
|
| 197 |
+
|
| 198 |
+
DECLARE_DISPATCH(qadaptive_avg_pool2d_fn, qadaptive_avg_pool2d_nhwc_stub);
|
| 199 |
+
DECLARE_DISPATCH(qadaptive_avg_pool3d_fn, qadaptive_avg_pool3d_ndhwc_stub);
|
| 200 |
+
DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_relu_stub);
|
| 201 |
+
DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_stub);
|
| 202 |
+
DECLARE_DISPATCH(qavg_pool2d_fn, qavg_pool2d_nhwc_stub);
|
| 203 |
+
DECLARE_DISPATCH(qavg_pool3d_fn, qavg_pool3d_nhwc_stub);
|
| 204 |
+
DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_relu_stub);
|
| 205 |
+
DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_stub);
|
| 206 |
+
DECLARE_DISPATCH(qbinary_fn, qadd_relu_stub);
|
| 207 |
+
DECLARE_DISPATCH(qbinary_fn, qadd_stub);
|
| 208 |
+
DECLARE_DISPATCH(qbinary_fn, qmul_relu_stub);
|
| 209 |
+
DECLARE_DISPATCH(qbinary_fn, qmul_stub);
|
| 210 |
+
DECLARE_DISPATCH(qcat_nhwc_fn, qcat_nhwc_stub);
|
| 211 |
+
DECLARE_DISPATCH(qcat_nhwc_fn, qcat_relu_nhwc_stub);
|
| 212 |
+
DECLARE_DISPATCH(qclamp_fn, qclamp_stub);
|
| 213 |
+
DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_min_stub);
|
| 214 |
+
DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_max_stub);
|
| 215 |
+
DECLARE_DISPATCH(qelu_fn, qelu_stub);
|
| 216 |
+
DECLARE_DISPATCH(qhardsigmoid_fn, qhardsigmoid_stub);
|
| 217 |
+
DECLARE_DISPATCH(qhardswish_fn, qhardswish_stub);
|
| 218 |
+
DECLARE_DISPATCH(qdropout_fn, qdropout_stub);
|
| 219 |
+
DECLARE_DISPATCH(qmaxpool_2d_fn, qmaxpool_2d_nhwc_stub);
|
| 220 |
+
DECLARE_DISPATCH(qnormalize_fn, quantized_normalize_stub);
|
| 221 |
+
DECLARE_DISPATCH(qnormalize_nhwc_fn, quantized_groupnorm_nhwc_stub);
|
| 222 |
+
DECLARE_DISPATCH(qrelu_fn, qrelu_stub);
|
| 223 |
+
DECLARE_DISPATCH(qrelu_leaky_fn, qrelu_leaky_stub);
|
| 224 |
+
DECLARE_DISPATCH(qgelu_fn, qgelu_stub);
|
| 225 |
+
DECLARE_DISPATCH(qsigmoid_fn, qsigmoid_stub);
|
| 226 |
+
DECLARE_DISPATCH(qtanh_fn, qtanh_stub);
|
| 227 |
+
DECLARE_DISPATCH(qthreshold_fn, qthreshold_stub);
|
| 228 |
+
DECLARE_DISPATCH(qtopk_fn, qtopk_stub);
|
| 229 |
+
DECLARE_DISPATCH(qupsample_bilinear2d_fn, qupsample_bilinear2d_nhwc_stub);
|
| 230 |
+
DECLARE_DISPATCH(qmean_inner_dim_fn, qmean_inner_dim_stub);
|
| 231 |
+
DECLARE_DISPATCH(qstd_inner_dim_fn, qstd_inner_dim_stub);
|
| 232 |
+
DECLARE_DISPATCH(qprelu_fn, qprelu_stub);
|
| 233 |
+
|
| 234 |
+
} // namespace native
|
| 235 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_RUY_QMATMUL
|
| 4 |
+
|
| 5 |
+
#include <ruy/ruy.h>
|
| 6 |
+
|
| 7 |
+
namespace at {
|
| 8 |
+
namespace native {
|
| 9 |
+
namespace ruy_utils {
|
| 10 |
+
|
| 11 |
+
ruy::Context* get_ruy_context();
|
| 12 |
+
|
| 13 |
+
void quantize_multiplier(double scale,
|
| 14 |
+
int* multiplier_fixedpoint,
|
| 15 |
+
int* multiplier_exponent);
|
| 16 |
+
|
| 17 |
+
} // namespace ruy_utils
|
| 18 |
+
} // namespace native
|
| 19 |
+
} // namesplace
|
| 20 |
+
|
| 21 |
+
#endif // USE_RUY_QMATMUL
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/native/quantized/PackedParams.h>
|
| 5 |
+
#include <ATen/native/quantized/cpu/EmbeddingPackedParams.h>
|
| 6 |
+
#include <c10/core/QScheme.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
|
| 9 |
+
#ifdef USE_FBGEMM
|
| 10 |
+
#include <fbgemm/Fbgemm.h>
|
| 11 |
+
#include <fbgemm/FbgemmFP16.h>
|
| 12 |
+
#include <fbgemm/QuantUtils.h>
|
| 13 |
+
|
| 14 |
+
// The struct for the packed weight matrix (PackBMatrix) and the corresponding
|
| 15 |
+
// column offsets used for the fully connect layer, which are both prepared in
|
| 16 |
+
// the prepacking step to save the computations in the inference. Note the
|
| 17 |
+
// column offsets include the sum of the B columns as well as the scalar term
|
| 18 |
+
// B_zero_point * K, whereas the row offsets created by
|
| 19 |
+
// PackAWithQuantRowOffset/PackAWithIm2Col/PackAWithRowOffset are only the sum
|
| 20 |
+
// of the A rows. The column offsets are needed for the asymmetric quantization
|
| 21 |
+
// (affine quantization) of input matrix.
|
| 22 |
+
// Note that in JIT mode we can think of a way to fuse col_offsets with bias.
|
| 23 |
+
struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase {
|
| 24 |
+
PackedLinearWeight(
|
| 25 |
+
std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w,
|
| 26 |
+
c10::optional<at::Tensor> bias,
|
| 27 |
+
std::vector<int32_t> col_offsets,
|
| 28 |
+
std::vector<float> w_scale,
|
| 29 |
+
std::vector<int32_t> w_zp,
|
| 30 |
+
c10::QScheme q_scheme)
|
| 31 |
+
: w(std::move(w)),
|
| 32 |
+
bias_(std::move(bias)),
|
| 33 |
+
col_offsets(std::move(col_offsets)),
|
| 34 |
+
w_scale(std::move(w_scale)),
|
| 35 |
+
w_zp(std::move(w_zp)),
|
| 36 |
+
q_scheme(std::move(q_scheme)) {}
|
| 37 |
+
std::unique_ptr<fbgemm::PackBMatrix<int8_t>> w;
|
| 38 |
+
c10::optional<at::Tensor> bias_;
|
| 39 |
+
std::vector<int32_t> col_offsets;
|
| 40 |
+
std::vector<float> w_scale;
|
| 41 |
+
std::vector<int32_t> w_zp;
|
| 42 |
+
c10::QScheme q_scheme;
|
| 43 |
+
|
| 44 |
+
at::Tensor apply(
|
| 45 |
+
at::Tensor input,
|
| 46 |
+
double output_scale,
|
| 47 |
+
int64_t output_zero_point) override;
|
| 48 |
+
|
| 49 |
+
at::Tensor apply_relu(
|
| 50 |
+
at::Tensor input,
|
| 51 |
+
double output_scale,
|
| 52 |
+
int64_t output_zero_point) override;
|
| 53 |
+
|
| 54 |
+
at::Tensor& apply_out(
|
| 55 |
+
const at::Tensor& input,
|
| 56 |
+
double output_scale,
|
| 57 |
+
int64_t output_zero_point,
|
| 58 |
+
at::Tensor& output) override;
|
| 59 |
+
|
| 60 |
+
at::Tensor& apply_relu_out(
|
| 61 |
+
const at::Tensor& input,
|
| 62 |
+
double output_scale,
|
| 63 |
+
int64_t output_zero_point,
|
| 64 |
+
at::Tensor& output) override;
|
| 65 |
+
|
| 66 |
+
at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32(
|
| 67 |
+
at::Tensor input,
|
| 68 |
+
double input_scale,
|
| 69 |
+
int64_t input_zero_point) override;
|
| 70 |
+
|
| 71 |
+
at::Tensor apply_with_input_q_dq_qweight_dq_relu_output_fp32(
|
| 72 |
+
at::Tensor input,
|
| 73 |
+
double input_scale,
|
| 74 |
+
int64_t input_zero_point) override;
|
| 75 |
+
|
| 76 |
+
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
|
| 77 |
+
override;
|
| 78 |
+
|
| 79 |
+
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
|
| 80 |
+
override;
|
| 81 |
+
|
| 82 |
+
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
|
| 83 |
+
|
| 84 |
+
c10::optional<at::Tensor> bias() override {
|
| 85 |
+
return bias_;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
|
| 89 |
+
at::Tensor weight,
|
| 90 |
+
c10::optional<at::Tensor> bias);
|
| 91 |
+
|
| 92 |
+
private:
|
| 93 |
+
template <bool ReluFused>
|
| 94 |
+
at::Tensor& apply_impl(
|
| 95 |
+
const at::Tensor& input,
|
| 96 |
+
double output_scale,
|
| 97 |
+
int64_t output_zero_point,
|
| 98 |
+
at::Tensor& output);
|
| 99 |
+
|
| 100 |
+
template <bool ReluFused>
|
| 101 |
+
at::Tensor apply_with_input_q_dq_qweight_dq_output_fp32_impl(
|
| 102 |
+
const at::Tensor& input,
|
| 103 |
+
double input_scale,
|
| 104 |
+
int64_t input_zero_point);
|
| 105 |
+
|
| 106 |
+
template <bool ReluFused>
|
| 107 |
+
at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range = false);
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase {
|
| 111 |
+
PackedLinearWeightFp16(
|
| 112 |
+
std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w,
|
| 113 |
+
c10::optional<at::Tensor> bias)
|
| 114 |
+
: w(std::move(w)), bias_(std::move(bias)) {}
|
| 115 |
+
|
| 116 |
+
std::unique_ptr<fbgemm::PackedGemmMatrixFP16> w;
|
| 117 |
+
c10::optional<at::Tensor> bias_;
|
| 118 |
+
|
| 119 |
+
at::Tensor apply(
|
| 120 |
+
at::Tensor /*input*/,
|
| 121 |
+
double /*output_scale*/,
|
| 122 |
+
int64_t /*output_zero_point*/) override {
|
| 123 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 124 |
+
}
|
| 125 |
+
at::Tensor apply_relu(
|
| 126 |
+
at::Tensor /*input*/,
|
| 127 |
+
double /*output_scale*/,
|
| 128 |
+
int64_t /*output_zero_point*/) override {
|
| 129 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false)
|
| 133 |
+
override;
|
| 134 |
+
at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false)
|
| 135 |
+
override;
|
| 136 |
+
|
| 137 |
+
at::Tensor& apply_dynamic_out(
|
| 138 |
+
const at::Tensor& input,
|
| 139 |
+
at::Tensor& output,
|
| 140 |
+
bool reduce_range = false) override;
|
| 141 |
+
at::Tensor& apply_dynamic_relu_out(
|
| 142 |
+
const at::Tensor& input,
|
| 143 |
+
at::Tensor& output,
|
| 144 |
+
bool reduce_range = false) override;
|
| 145 |
+
|
| 146 |
+
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
|
| 147 |
+
|
| 148 |
+
c10::optional<at::Tensor> bias() override {
|
| 149 |
+
return bias_;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
|
| 153 |
+
at::Tensor weight,
|
| 154 |
+
c10::optional<at::Tensor> bias);
|
| 155 |
+
|
| 156 |
+
void set_bias(c10::optional<at::Tensor> bias) override;
|
| 157 |
+
|
| 158 |
+
private:
|
| 159 |
+
template <bool ReluFused>
|
| 160 |
+
at::Tensor& apply_dynamic_impl(const at::Tensor& input, at::Tensor& output);
|
| 161 |
+
};
|
| 162 |
+
|
| 163 |
+
template <int kSpatialDim = 2>
|
| 164 |
+
struct TORCH_API PackedConvWeight : public ConvPackedParamsBase<kSpatialDim> {
|
| 165 |
+
PackedConvWeight(
|
| 166 |
+
std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w,
|
| 167 |
+
c10::optional<at::Tensor> bias,
|
| 168 |
+
torch::List<int64_t> stride,
|
| 169 |
+
torch::List<int64_t> padding,
|
| 170 |
+
torch::List<int64_t> output_padding,
|
| 171 |
+
torch::List<int64_t> dilation,
|
| 172 |
+
int64_t groups,
|
| 173 |
+
uint8_t transpose,
|
| 174 |
+
std::vector<int32_t> col_offsets,
|
| 175 |
+
std::vector<int64_t> kernel,
|
| 176 |
+
std::vector<float> w_scale,
|
| 177 |
+
std::vector<int32_t> w_zp,
|
| 178 |
+
c10::QScheme q_scheme)
|
| 179 |
+
: w(std::move(w)),
|
| 180 |
+
bias(std::move(bias)),
|
| 181 |
+
stride_(std::move(stride)),
|
| 182 |
+
padding_(std::move(padding)),
|
| 183 |
+
output_padding_(std::move(output_padding)),
|
| 184 |
+
dilation_(std::move(dilation)),
|
| 185 |
+
groups_(groups),
|
| 186 |
+
transpose_(transpose),
|
| 187 |
+
col_offsets(std::move(col_offsets)),
|
| 188 |
+
kernel(std::move(kernel)),
|
| 189 |
+
w_scale(std::move(w_scale)),
|
| 190 |
+
w_zp(std::move(w_zp)),
|
| 191 |
+
q_scheme(q_scheme) {}
|
| 192 |
+
|
| 193 |
+
std::unique_ptr<fbgemm::PackWeightsForConv<kSpatialDim>> w;
|
| 194 |
+
c10::optional<at::Tensor> bias;
|
| 195 |
+
torch::List<int64_t> stride_;
|
| 196 |
+
torch::List<int64_t> padding_;
|
| 197 |
+
torch::List<int64_t> output_padding_;
|
| 198 |
+
torch::List<int64_t> dilation_;
|
| 199 |
+
int64_t groups_;
|
| 200 |
+
uint8_t transpose_;
|
| 201 |
+
std::vector<int32_t> col_offsets;
|
| 202 |
+
std::vector<int64_t> kernel;
|
| 203 |
+
std::vector<float> w_scale;
|
| 204 |
+
std::vector<int32_t> w_zp;
|
| 205 |
+
c10::QScheme q_scheme;
|
| 206 |
+
|
| 207 |
+
at::Tensor apply(
|
| 208 |
+
const at::Tensor& input,
|
| 209 |
+
double output_scale,
|
| 210 |
+
int64_t output_zero_point) override;
|
| 211 |
+
|
| 212 |
+
at::Tensor apply_relu(
|
| 213 |
+
const at::Tensor& input,
|
| 214 |
+
double output_scale,
|
| 215 |
+
int64_t output_zero_point) override;
|
| 216 |
+
|
| 217 |
+
at::Tensor apply_dynamic(
|
| 218 |
+
const at::Tensor& input,
|
| 219 |
+
bool reduce_range) override;
|
| 220 |
+
|
| 221 |
+
std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() override;
|
| 222 |
+
|
| 223 |
+
static c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> prepack(
|
| 224 |
+
at::Tensor weight,
|
| 225 |
+
c10::optional<at::Tensor> bias,
|
| 226 |
+
torch::List<int64_t> stride,
|
| 227 |
+
torch::List<int64_t> padding,
|
| 228 |
+
torch::List<int64_t> output_padding,
|
| 229 |
+
torch::List<int64_t> dilation,
|
| 230 |
+
int64_t groups,
|
| 231 |
+
bool transpose);
|
| 232 |
+
|
| 233 |
+
const float* GetBiasData(at::Tensor* bias);
|
| 234 |
+
|
| 235 |
+
void GetQuantizationParams(
|
| 236 |
+
float act_scale,
|
| 237 |
+
float out_scale,
|
| 238 |
+
std::vector<float>* output_multiplier_float,
|
| 239 |
+
std::vector<float>* act_times_w_scale);
|
| 240 |
+
|
| 241 |
+
torch::List<int64_t> stride() const override {
|
| 242 |
+
return stride_;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
torch::List<int64_t> padding() const override {
|
| 246 |
+
return padding_;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
torch::List<int64_t> output_padding() const override {
|
| 250 |
+
return output_padding_;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
torch::List<int64_t> dilation() const override {
|
| 254 |
+
return dilation_;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
int64_t groups() const override {
|
| 258 |
+
return groups_;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
bool transpose() const override {
|
| 262 |
+
return (bool)transpose_;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
private:
|
| 266 |
+
template <bool ReluFused>
|
| 267 |
+
at::Tensor apply_impl(
|
| 268 |
+
const at::Tensor& input,
|
| 269 |
+
double output_scale,
|
| 270 |
+
int64_t output_zero_point);
|
| 271 |
+
};
|
| 272 |
+
|
| 273 |
+
// PackWeight: Convert the weight from uint8 to int8.
|
| 274 |
+
inline void convert_uint8_int8(
|
| 275 |
+
int len,
|
| 276 |
+
const uint8_t* src_uint8,
|
| 277 |
+
int8_t* dst_int8) {
|
| 278 |
+
for (const auto i : c10::irange(len)) {
|
| 279 |
+
dst_int8[i] = static_cast<int8_t>(static_cast<int32_t>(src_uint8[i]) - 128);
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
// UnpackWeight: Convert the weight from int8 to uint8.
|
| 284 |
+
inline void convert_int8_uint8(
|
| 285 |
+
int len,
|
| 286 |
+
const int8_t* src_int8,
|
| 287 |
+
uint8_t* dst_uint8) {
|
| 288 |
+
for (const auto i : c10::irange(len)) {
|
| 289 |
+
dst_uint8[i] =
|
| 290 |
+
static_cast<uint8_t>(static_cast<int32_t>(src_int8[i]) + 128);
|
| 291 |
+
}
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
namespace at {
|
| 295 |
+
namespace native {
|
| 296 |
+
namespace fbgemm_utils {
|
| 297 |
+
|
| 298 |
+
template <int kSpatialDim = 2>
|
| 299 |
+
fbgemm::conv_param_t<kSpatialDim> MakeFbgemmConvParam(
|
| 300 |
+
int N,
|
| 301 |
+
int C,
|
| 302 |
+
int M,
|
| 303 |
+
const std::vector<int>& image_shape,
|
| 304 |
+
int groups,
|
| 305 |
+
const std::vector<int>& kernels,
|
| 306 |
+
const std::vector<int>& strides,
|
| 307 |
+
const std::vector<int>& pads,
|
| 308 |
+
const std::vector<int>& dilations,
|
| 309 |
+
const std::vector<int>& output_padding = std::vector<int>(kSpatialDim, 0),
|
| 310 |
+
bool transposed = false);
|
| 311 |
+
|
| 312 |
+
// TODO: Remove functions below when ChannelsLast3d is ready.
|
| 313 |
+
Tensor MakeStridedQTensorCPU(
|
| 314 |
+
const IntArrayRef& sizes,
|
| 315 |
+
const IntArrayRef& strides,
|
| 316 |
+
const TensorOptions& options,
|
| 317 |
+
QuantizerPtr quantizer);
|
| 318 |
+
|
| 319 |
+
Tensor MakeEmptyAffineQuantizedChannelsLast3dTensor(
|
| 320 |
+
int64_t N,
|
| 321 |
+
int64_t C,
|
| 322 |
+
int64_t D,
|
| 323 |
+
int64_t H,
|
| 324 |
+
int64_t W,
|
| 325 |
+
const TensorOptions& options,
|
| 326 |
+
double scale,
|
| 327 |
+
int64_t zero_point);
|
| 328 |
+
|
| 329 |
+
Tensor MakeEmptyPerChannelAffineQuantizedChannelsLast3dTensor(
|
| 330 |
+
int64_t N,
|
| 331 |
+
int64_t C,
|
| 332 |
+
int64_t D,
|
| 333 |
+
int64_t H,
|
| 334 |
+
int64_t W,
|
| 335 |
+
const TensorOptions& options,
|
| 336 |
+
const Tensor& scales,
|
| 337 |
+
const Tensor& zero_points);
|
| 338 |
+
|
| 339 |
+
Tensor ConvertToChannelsLast3dTensor(const Tensor& src);
|
| 340 |
+
|
| 341 |
+
template <int kSpatialDim = 2>
|
| 342 |
+
Tensor TransposeConvTensorUnpackConversion(const Tensor& src, int groups);
|
| 343 |
+
|
| 344 |
+
template <int kSpatialDim>
|
| 345 |
+
Tensor ConvertConvWeightsToChannelLastTensor(
|
| 346 |
+
const at::Tensor& src,
|
| 347 |
+
int groups,
|
| 348 |
+
bool transpose);
|
| 349 |
+
} // namespace fbgemm_utils
|
| 350 |
+
} // namespace native
|
| 351 |
+
} // namespace at
|
| 352 |
+
|
| 353 |
+
#endif // USE_FBGEMM
|
| 354 |
+
|
| 355 |
+
struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
|
| 356 |
+
PackedEmbeddingBagWeight(
|
| 357 |
+
at::Tensor packed_w,
|
| 358 |
+
std::vector<float> w_scale,
|
| 359 |
+
std::vector<float> w_zp,
|
| 360 |
+
int64_t bit_rate,
|
| 361 |
+
c10::QScheme q_scheme,
|
| 362 |
+
int64_t version)
|
| 363 |
+
: packed_w(std::move(packed_w)),
|
| 364 |
+
w_scale(std::move(w_scale)),
|
| 365 |
+
w_zp(std::move(w_zp)),
|
| 366 |
+
bit_rate_(bit_rate),
|
| 367 |
+
q_scheme(q_scheme),
|
| 368 |
+
version_(version) {
|
| 369 |
+
// NOLINTNEXTLINE(clang-analyzer-cplusplus.Move)
|
| 370 |
+
if (!packed_w.is_contiguous()) {
|
| 371 |
+
packed_w = packed_w.contiguous();
|
| 372 |
+
}
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
at::Tensor packed_w;
|
| 376 |
+
std::vector<float> w_scale;
|
| 377 |
+
std::vector<float> w_zp;
|
| 378 |
+
int64_t bit_rate_;
|
| 379 |
+
c10::QScheme q_scheme;
|
| 380 |
+
int64_t version_;
|
| 381 |
+
|
| 382 |
+
at::Tensor unpack() override;
|
| 383 |
+
static c10::intrusive_ptr<EmbeddingPackedParamsBase> prepack(
|
| 384 |
+
at::Tensor weight);
|
| 385 |
+
|
| 386 |
+
int64_t bit_rate() const override {
|
| 387 |
+
return bit_rate_;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
int64_t version() const override {
|
| 391 |
+
return version_;
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
at::Tensor embeddingbag_byte(
|
| 395 |
+
const at::Tensor& indices,
|
| 396 |
+
const c10::optional<at::Tensor>& offsets,
|
| 397 |
+
bool pruned_weights,
|
| 398 |
+
const c10::optional<at::Tensor>& per_sample_weights_,
|
| 399 |
+
const c10::optional<at::Tensor>& compressed_indices_mapping,
|
| 400 |
+
bool include_last_offset,
|
| 401 |
+
bool is_embedding_op) override;
|
| 402 |
+
|
| 403 |
+
at::Tensor embeddingbag_4bit(
|
| 404 |
+
const at::Tensor& indices,
|
| 405 |
+
const c10::optional<at::Tensor>& offsets,
|
| 406 |
+
bool pruned_weights,
|
| 407 |
+
const c10::optional<at::Tensor>& per_sample_weights_,
|
| 408 |
+
const c10::optional<at::Tensor>& compressed_indices_mapping,
|
| 409 |
+
bool include_last_offset,
|
| 410 |
+
bool is_embedding_op) override;
|
| 411 |
+
};
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/init_qnnpack.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_PYTORCH_QNNPACK
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
namespace native {
|
| 7 |
+
|
| 8 |
+
void initQNNPACK();
|
| 9 |
+
|
| 10 |
+
} // namespace native
|
| 11 |
+
} // namespace at
|
| 12 |
+
|
| 13 |
+
#endif
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag.h
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/Tensor.h>
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
namespace native {
|
| 7 |
+
Tensor& embedding_bag_byte_rowwise_offsets_out(
|
| 8 |
+
Tensor& output,
|
| 9 |
+
const Tensor& weight,
|
| 10 |
+
const Tensor& indices,
|
| 11 |
+
const c10::optional<Tensor>& offsets_in,
|
| 12 |
+
const bool /* scale_grad_by_freq */,
|
| 13 |
+
const int64_t /* mode */,
|
| 14 |
+
bool pruned_weights,
|
| 15 |
+
const c10::optional<Tensor>& per_sample_weights_,
|
| 16 |
+
const c10::optional<Tensor>& compressed_indices_mapping,
|
| 17 |
+
bool include_last_offset);
|
| 18 |
+
|
| 19 |
+
Tensor& embedding_bag_4bit_rowwise_offsets_out(
|
| 20 |
+
Tensor& output,
|
| 21 |
+
const Tensor& weight,
|
| 22 |
+
const Tensor& indices,
|
| 23 |
+
const c10::optional<Tensor>& offsets_in,
|
| 24 |
+
const bool /* scale_grad_by_freq */,
|
| 25 |
+
const int64_t /* mode */,
|
| 26 |
+
bool pruned_weights,
|
| 27 |
+
const c10::optional<Tensor>& per_sample_weights_,
|
| 28 |
+
const c10::optional<Tensor>& compressed_indices_mapping,
|
| 29 |
+
bool include_last_offset);
|
| 30 |
+
|
| 31 |
+
Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weight);
|
| 32 |
+
|
| 33 |
+
} // native
|
| 34 |
+
} // at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/core/Tensor.h>
|
| 3 |
+
|
| 4 |
+
namespace at { namespace native {
|
| 5 |
+
|
| 6 |
+
Tensor& qembeddingbag_byte_prepack_out(Tensor& output, const Tensor& weight);
|
| 7 |
+
|
| 8 |
+
Tensor qembeddingbag_byte_prepack(const Tensor& weight);
|
| 9 |
+
|
| 10 |
+
} // namespace native
|
| 11 |
+
} // namespace at
|
openflamingo/lib/python3.10/site-packages/torch/include/ATen/native/vol2col.h
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstring>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
namespace native {
|
| 7 |
+
|
| 8 |
+
template <typename T>
|
| 9 |
+
static void vol2col(
|
| 10 |
+
const T* data_vol,
|
| 11 |
+
const int64_t channels,
|
| 12 |
+
const int64_t depth,
|
| 13 |
+
const int64_t height,
|
| 14 |
+
const int64_t width,
|
| 15 |
+
const int64_t depth_col,
|
| 16 |
+
const int64_t height_col,
|
| 17 |
+
const int64_t width_col,
|
| 18 |
+
const int64_t kT,
|
| 19 |
+
const int64_t kernel_height,
|
| 20 |
+
const int64_t kernel_width,
|
| 21 |
+
const int64_t pT,
|
| 22 |
+
const int64_t pH,
|
| 23 |
+
const int64_t pW,
|
| 24 |
+
const int64_t dT,
|
| 25 |
+
const int64_t dH,
|
| 26 |
+
const int64_t dW,
|
| 27 |
+
const int64_t dilationT,
|
| 28 |
+
const int64_t dilationH,
|
| 29 |
+
const int64_t dilationW,
|
| 30 |
+
T* data_col) {
|
| 31 |
+
int64_t c, t, h, w;
|
| 32 |
+
int64_t channels_col = channels * kT * kernel_height * kernel_width;
|
| 33 |
+
for (c = 0; c < channels_col; ++c) {
|
| 34 |
+
int64_t w_offset = c % kernel_width;
|
| 35 |
+
int64_t h_offset = (c / kernel_width) % kernel_height;
|
| 36 |
+
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
|
| 37 |
+
int64_t c_vol = c / kT / kernel_height / kernel_width;
|
| 38 |
+
for (t = 0; t < depth_col; ++t) {
|
| 39 |
+
int64_t t_pad = t * dT - pT + t_offset * dilationT;
|
| 40 |
+
for (h = 0; h < height_col; ++h) {
|
| 41 |
+
int64_t h_pad = h * dH - pH + h_offset * dilationH;
|
| 42 |
+
for (w = 0; w < width_col; ++w) {
|
| 43 |
+
int64_t w_pad = w * dW - pW + w_offset * dilationW;
|
| 44 |
+
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
|
| 45 |
+
w_pad >= 0 && w_pad < width)
|
| 46 |
+
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
|
| 47 |
+
data_vol
|
| 48 |
+
[((c_vol * depth + t_pad) * height + h_pad) * width +
|
| 49 |
+
w_pad];
|
| 50 |
+
else
|
| 51 |
+
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
|
| 52 |
+
0;
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template <typename T>
|
| 60 |
+
static void col2vol(
|
| 61 |
+
const T* data_col,
|
| 62 |
+
const int64_t channels,
|
| 63 |
+
const int64_t depth,
|
| 64 |
+
const int64_t height,
|
| 65 |
+
const int64_t width,
|
| 66 |
+
const int64_t out_depth,
|
| 67 |
+
const int64_t out_height,
|
| 68 |
+
const int64_t out_width,
|
| 69 |
+
const int64_t kT,
|
| 70 |
+
const int64_t kernel_height,
|
| 71 |
+
const int64_t kernel_width,
|
| 72 |
+
const int64_t pT,
|
| 73 |
+
const int64_t pH,
|
| 74 |
+
const int64_t pW,
|
| 75 |
+
const int64_t dT,
|
| 76 |
+
const int64_t dH,
|
| 77 |
+
const int64_t dW,
|
| 78 |
+
const int64_t dilationT,
|
| 79 |
+
const int64_t dilationH,
|
| 80 |
+
const int64_t dilationW,
|
| 81 |
+
T* data_vol) {
|
| 82 |
+
int64_t c, t, h, w;
|
| 83 |
+
memset(data_vol, 0, sizeof(T) * depth * height * width * channels);
|
| 84 |
+
int64_t depth_col = out_depth;
|
| 85 |
+
int64_t height_col = out_height;
|
| 86 |
+
int64_t width_col = out_width;
|
| 87 |
+
int64_t channels_col = channels * kT * kernel_height * kernel_width;
|
| 88 |
+
for (c = 0; c < channels_col; ++c) {
|
| 89 |
+
int64_t w_offset = c % kernel_width;
|
| 90 |
+
int64_t h_offset = (c / kernel_width) % kernel_height;
|
| 91 |
+
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
|
| 92 |
+
int64_t c_vol = c / kT / kernel_height / kernel_width;
|
| 93 |
+
for (t = 0; t < depth_col; ++t) {
|
| 94 |
+
int64_t t_pad = t * dT - pT + t_offset * dilationT;
|
| 95 |
+
for (h = 0; h < height_col; ++h) {
|
| 96 |
+
int64_t h_pad = h * dH - pH + h_offset * dilationH;
|
| 97 |
+
for (w = 0; w < width_col; ++w) {
|
| 98 |
+
int64_t w_pad = w * dW - pW + w_offset * dilationW;
|
| 99 |
+
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
|
| 100 |
+
w_pad >= 0 && w_pad < width)
|
| 101 |
+
data_vol
|
| 102 |
+
[((c_vol * depth + t_pad) * height + h_pad) * width + w_pad] +=
|
| 103 |
+
data_col
|
| 104 |
+
[((c * depth_col + t) * height_col + h) * width_col + w];
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
} // namespace native
|
| 112 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__init__.py
ADDED
|
File without changes
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (179 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/autoheuristic.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/autoheuristic_utils.cpython-310.pyc
ADDED
|
Binary file (12 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/learned_heuristic_controller.cpython-310.pyc
ADDED
|
Binary file (3.61 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/__pycache__/learnedheuristic_interface.cpython-310.pyc
ADDED
|
Binary file (4.41 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MMRankingA100.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa: B950
|
| 2 |
+
# fmt: off
|
| 3 |
+
# This file was generated by AutoHeuristic. Do not modify it manually!
|
| 4 |
+
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mm/
|
| 5 |
+
from typing import List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 8 |
+
AHContext,
|
| 9 |
+
AHMetadata,
|
| 10 |
+
Choice,
|
| 11 |
+
)
|
| 12 |
+
from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
| 13 |
+
LearnedHeuristicDecision,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MMRankingA100(LearnedHeuristicDecision):
|
| 18 |
+
|
| 19 |
+
def __init__(self) -> None:
|
| 20 |
+
self.choices: List[Choice] = []
|
| 21 |
+
self.fill_choices()
|
| 22 |
+
|
| 23 |
+
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
| 24 |
+
return (
|
| 25 |
+
metadata.name == self.get_name()
|
| 26 |
+
and metadata.shared_memory == 166912
|
| 27 |
+
and str(metadata.device_capa) == "(8, 0)"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
def get_confidence_threshold(self) -> float:
|
| 31 |
+
return 0.0
|
| 32 |
+
|
| 33 |
+
def get_choice(self, idx: int) -> Optional[str]:
|
| 34 |
+
if idx < len(self.choices):
|
| 35 |
+
return self.choices[idx]
|
| 36 |
+
return None
|
| 37 |
+
|
| 38 |
+
def fill_choices(self) -> None:
|
| 39 |
+
self.choices.append('extern_mm')
|
| 40 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=8')
|
| 41 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=8')
|
| 42 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
|
| 43 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=2_numwarps=8')
|
| 44 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 45 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
|
| 46 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 47 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=4')
|
| 48 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
|
| 49 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
|
| 50 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=8')
|
| 51 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
|
| 52 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=8')
|
| 53 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
|
| 54 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=8')
|
| 55 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
|
| 56 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=8')
|
| 57 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
|
| 58 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=8')
|
| 59 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
|
| 60 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=8')
|
| 61 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
|
| 62 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=8')
|
| 63 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 64 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=8')
|
| 65 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=2')
|
| 66 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=8')
|
| 67 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
|
| 68 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
|
| 69 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 70 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
|
| 71 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 72 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
|
| 73 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=2_numwarps=8')
|
| 74 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 75 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=8')
|
| 76 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 77 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=4')
|
| 78 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=8')
|
| 79 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=8')
|
| 80 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
|
| 81 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=8')
|
| 82 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
|
| 83 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=8')
|
| 84 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
|
| 85 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=8')
|
| 86 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=2')
|
| 87 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=8')
|
| 88 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
|
| 89 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
|
| 90 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
|
| 91 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
|
| 92 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
|
| 93 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
|
| 94 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=2')
|
| 95 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=8')
|
| 96 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 97 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
|
| 98 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 99 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
|
| 100 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 101 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
|
| 102 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 103 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=8')
|
| 104 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
|
| 105 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 106 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
|
| 107 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=8')
|
| 108 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=8')
|
| 109 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
|
| 110 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=8')
|
| 111 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
|
| 112 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
|
| 113 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=8')
|
| 114 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
|
| 115 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=8')
|
| 116 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 117 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
|
| 118 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=8')
|
| 119 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
|
| 120 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=8')
|
| 121 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 122 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=2')
|
| 123 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
|
| 124 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
|
| 125 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 126 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 127 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
|
| 128 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 129 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
|
| 130 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
|
| 131 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=1')
|
| 132 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
|
| 133 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
|
| 134 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=2')
|
| 135 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=2')
|
| 136 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=2')
|
| 137 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
|
| 138 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 139 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 140 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 141 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 142 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=1')
|
| 143 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=2')
|
| 144 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 145 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 146 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 147 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
|
| 148 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 149 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=32_numstages=2_numwarps=2')
|
| 150 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 151 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=4')
|
| 152 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
|
| 153 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 154 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=16_numstages=2_numwarps=2')
|
| 155 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=4')
|
| 156 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 157 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
|
| 158 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
|
| 159 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 160 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=1_numwarps=2')
|
| 161 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
|
| 162 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=2')
|
| 163 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
|
| 164 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
|
| 165 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 166 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
|
| 167 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=2')
|
| 168 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=2')
|
| 169 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
|
| 170 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 171 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
|
| 172 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=32_numstages=2_numwarps=4')
|
| 173 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 174 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=3_numwarps=4')
|
| 175 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=4')
|
| 176 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=5_numwarps=4')
|
| 177 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=3_numwarps=4')
|
| 178 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=4')
|
| 179 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 180 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
|
| 181 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
|
| 182 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
|
| 183 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 184 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 185 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 186 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
|
| 187 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=4')
|
| 188 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
|
| 189 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
|
| 190 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
|
| 191 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
|
| 192 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
|
| 193 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=8')
|
| 194 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
|
| 195 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=8')
|
| 196 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 197 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=8')
|
| 198 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
|
| 199 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
|
| 200 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
|
| 201 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 202 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
|
| 203 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 204 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 205 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 206 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=8')
|
| 207 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=4')
|
| 208 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
|
| 209 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
|
| 210 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
|
| 211 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=4')
|
| 212 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
|
| 213 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
|
| 214 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
|
| 215 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
|
| 216 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
|
| 217 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
|
| 218 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=4')
|
| 219 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 220 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
|
| 221 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 222 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
|
| 223 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 224 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 225 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=4_numwarps=4')
|
| 226 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
|
| 227 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=4')
|
| 228 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
|
| 229 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
|
| 230 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
|
| 231 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=4')
|
| 232 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
|
| 233 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 234 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
|
| 235 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=4')
|
| 236 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
|
| 237 |
+
|
| 238 |
+
def get_name(self) -> str:
|
| 239 |
+
return 'mm'
|
| 240 |
+
|
| 241 |
+
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
| 242 |
+
if context.get_value('arith_intensity') <= 52.6245059967041:
|
| 243 |
+
if context.get_value('n') <= 34.0:
|
| 244 |
+
if context.get_value('n') <= 18.0:
|
| 245 |
+
if context.get_value('k*n') <= 312.0:
|
| 246 |
+
return [(0.093, 12), (0.081, 16), (0.081, 148), (0.070, 10), (0.070, 17), (0.070, 149), (0.070, 151), (0.070, 150), (0.070, 14), (0.058, 11), (0.058, 15), (0.058, 13), (0.058, 122), (0.047, 121), (0.035, 123), (0.012, 92)]
|
| 247 |
+
else:
|
| 248 |
+
if context.get_value('k') <= 40.0:
|
| 249 |
+
return [(0.083, 42), (0.083, 46), (0.083, 44), (0.083, 40), (0.083, 128), (0.067, 45), (0.067, 43), (0.067, 41), (0.067, 169), (0.067, 171), (0.067, 168), (0.067, 129), (0.067, 170), (0.033, 103), (0.017, 121)]
|
| 250 |
+
else:
|
| 251 |
+
return [(0.112, 137), (0.104, 136), (0.101, 0), (0.081, 1), (0.073, 135), (0.069, 67), (0.066, 187), (0.058, 41), (0.050, 71), (0.046, 68), (0.046, 70), (0.031, 44), (0.027, 43), (0.027, 170), (0.019, 189), (0.019, 188), (0.015, 169), (0.015, 171), (0.012, 115), (0.012, 168), (0.012, 69), (0.004, 103)]
|
| 252 |
+
else:
|
| 253 |
+
if context.get_value('mat1_stride_0') <= 20.0:
|
| 254 |
+
return [(0.069, 0), (0.059, 157), (0.059, 22), (0.059, 153), (0.059, 155), (0.059, 25), (0.059, 23), (0.059, 19), (0.044, 21), (0.044, 18), (0.044, 152), (0.044, 158), (0.044, 154), (0.044, 156), (0.044, 20), (0.044, 124), (0.044, 24), (0.030, 125), (0.029, 126), (0.015, 97), (0.015, 95), (0.015, 96), (0.010, 2), (0.010, 75)]
|
| 255 |
+
else:
|
| 256 |
+
if context.get_value('k') <= 68.0:
|
| 257 |
+
return [(0.087, 72), (0.087, 74), (0.087, 73), (0.086, 76), (0.077, 75), (0.067, 192), (0.058, 190), (0.048, 47), (0.048, 193), (0.048, 49), (0.048, 51), (0.048, 191), (0.038, 53), (0.019, 133), (0.019, 50), (0.019, 175), (0.019, 172), (0.019, 48), (0.019, 174), (0.010, 173), (0.010, 177), (0.010, 52), (0.010, 54), (0.010, 178), (0.010, 176)]
|
| 258 |
+
else:
|
| 259 |
+
return [(0.154, 52), (0.154, 72), (0.102, 75), (0.087, 49), (0.087, 73), (0.086, 51), (0.057, 176), (0.045, 2), (0.038, 191), (0.038, 178), (0.038, 190), (0.029, 173), (0.029, 76), (0.026, 138), (0.013, 139), (0.013, 140), (0.003, 0)]
|
| 260 |
+
else:
|
| 261 |
+
if context.get_value('k') <= 35.0:
|
| 262 |
+
if context.get_value('k') <= 18.0:
|
| 263 |
+
if context.get_value('m*n') <= 19505152.0:
|
| 264 |
+
return [(0.151, 159), (0.140, 160), (0.129, 164), (0.055, 127), (0.051, 29), (0.044, 161), (0.044, 147), (0.040, 146), (0.040, 31), (0.037, 145), (0.026, 28), (0.022, 90), (0.022, 93), (0.022, 94), (0.022, 100), (0.022, 125), (0.022, 158), (0.022, 157), (0.011, 87), (0.011, 88), (0.011, 89), (0.011, 91), (0.011, 95), (0.011, 96), (0.011, 98), (0.011, 99)]
|
| 265 |
+
else:
|
| 266 |
+
return [(0.069, 7), (0.069, 5), (0.067, 147), (0.066, 8), (0.061, 145), (0.058, 146), (0.052, 124), (0.049, 29), (0.049, 159), (0.046, 31), (0.043, 157), (0.041, 9), (0.041, 4), (0.040, 6), (0.035, 164), (0.035, 160), (0.026, 158), (0.017, 125), (0.017, 28), (0.017, 32), (0.017, 162), (0.017, 27), (0.017, 30), (0.017, 161), (0.009, 33), (0.009, 26), (0.009, 163), (0.006, 0)]
|
| 267 |
+
else:
|
| 268 |
+
if context.get_value('n') <= 68.0:
|
| 269 |
+
return [(0.101, 182), (0.101, 59), (0.088, 57), (0.076, 184), (0.076, 61), (0.076, 179), (0.076, 62), (0.076, 58), (0.063, 180), (0.063, 60), (0.051, 56), (0.050, 181), (0.025, 130), (0.025, 177), (0.025, 183), (0.013, 178), (0.013, 55)]
|
| 270 |
+
else:
|
| 271 |
+
return [(0.089, 180), (0.079, 60), (0.066, 35), (0.066, 181), (0.066, 38), (0.066, 58), (0.066, 179), (0.066, 57), (0.062, 184), (0.053, 37), (0.044, 166), (0.040, 55), (0.040, 39), (0.040, 36), (0.040, 165), (0.040, 167), (0.027, 177), (0.027, 34), (0.022, 159)]
|
| 272 |
+
else:
|
| 273 |
+
if context.get_value('m*n') <= 309760.0:
|
| 274 |
+
return [(0.298, 0), (0.097, 140), (0.080, 83), (0.072, 86), (0.044, 84), (0.036, 178), (0.036, 117), (0.036, 82), (0.032, 120), (0.032, 85), (0.028, 119), (0.024, 130), (0.024, 109), (0.020, 108), (0.020, 118), (0.012, 104), (0.012, 116), (0.012, 141), (0.012, 144), (0.008, 105), (0.008, 106), (0.008, 111), (0.008, 114), (0.008, 107), (0.008, 132), (0.004, 101), (0.004, 102), (0.004, 110), (0.004, 112), (0.004, 113), (0.004, 131)]
|
| 275 |
+
else:
|
| 276 |
+
if context.get_value('n') <= 72.0:
|
| 277 |
+
return [(0.227, 77), (0.118, 78), (0.102, 194), (0.086, 80), (0.059, 57), (0.054, 81), (0.049, 196), (0.048, 197), (0.048, 59), (0.043, 79), (0.032, 195), (0.027, 180), (0.022, 3), (0.021, 141), (0.016, 60), (0.016, 142), (0.011, 183), (0.011, 0), (0.011, 144)]
|
| 278 |
+
else:
|
| 279 |
+
return [(0.140, 186), (0.132, 185), (0.109, 63), (0.085, 65), (0.078, 37), (0.077, 35), (0.062, 197), (0.047, 194), (0.046, 165), (0.046, 57), (0.039, 78), (0.039, 79), (0.039, 66), (0.039, 64), (0.016, 195), (0.008, 159)]
|
| 280 |
+
else:
|
| 281 |
+
if str(context.get_value('using_tf32')) != 'False':
|
| 282 |
+
if context.get_value('m*n') <= 815360.0:
|
| 283 |
+
if context.get_value('k') <= 1184.0:
|
| 284 |
+
return [(0.218, 140), (0.205, 0), (0.154, 144), (0.115, 141), (0.051, 185), (0.051, 104), (0.039, 78), (0.038, 116), (0.026, 165), (0.026, 130), (0.026, 178), (0.013, 57), (0.013, 195), (0.013, 167), (0.013, 186)]
|
| 285 |
+
else:
|
| 286 |
+
return [(0.901, 0), (0.030, 144), (0.030, 134), (0.016, 3), (0.006, 78), (0.006, 77), (0.002, 57), (0.002, 194), (0.002, 59), (0.002, 60), (0.002, 143)]
|
| 287 |
+
else:
|
| 288 |
+
if context.get_value('arith_intensity') <= 187.23922729492188:
|
| 289 |
+
if context.get_value('mat1_stride_0') <= 198.0:
|
| 290 |
+
return [(0.273, 63), (0.158, 37), (0.152, 35), (0.127, 57), (0.097, 165), (0.053, 185), (0.031, 0), (0.028, 64), (0.014, 60), (0.014, 78), (0.009, 55), (0.008, 134), (0.005, 34), (0.005, 167), (0.005, 179), (0.005, 65), (0.005, 66), (0.005, 186), (0.005, 194), (0.002, 166)]
|
| 291 |
+
else:
|
| 292 |
+
return [(0.296, 63), (0.235, 0), (0.132, 64), (0.074, 37), (0.069, 78), (0.051, 185), (0.051, 35), (0.030, 57), (0.020, 77), (0.016, 194), (0.008, 66), (0.007, 65), (0.003, 3), (0.003, 165), (0.003, 141), (0.001, 134), (0.001, 166)]
|
| 293 |
+
else:
|
| 294 |
+
return [(0.405, 0), (0.246, 37), (0.177, 63), (0.145, 35), (0.005, 185), (0.005, 65), (0.005, 64), (0.004, 57), (0.003, 66), (0.002, 165), (0.001, 78), (0.001, 55)]
|
| 295 |
+
else:
|
| 296 |
+
return [(0.357, 0), (0.112, 165), (0.101, 57), (0.094, 179), (0.086, 64), (0.074, 167), (0.067, 60), (0.064, 159), (0.033, 35), (0.007, 195), (0.002, 180), (0.001, 34), (0.001, 166), (0.001, 78)]
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MMRankingH100.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa: B950
|
| 2 |
+
# fmt: off
|
| 3 |
+
# This file was generated by AutoHeuristic. Do not modify it manually!
|
| 4 |
+
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mm/
|
| 5 |
+
from typing import List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 8 |
+
AHContext,
|
| 9 |
+
AHMetadata,
|
| 10 |
+
Choice,
|
| 11 |
+
)
|
| 12 |
+
from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
| 13 |
+
LearnedHeuristicDecision,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MMRankingH100(LearnedHeuristicDecision):
|
| 18 |
+
|
| 19 |
+
def __init__(self) -> None:
|
| 20 |
+
self.choices: List[Choice] = []
|
| 21 |
+
self.fill_choices()
|
| 22 |
+
|
| 23 |
+
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
| 24 |
+
return (
|
| 25 |
+
metadata.name == self.get_name()
|
| 26 |
+
and metadata.shared_memory == 232448
|
| 27 |
+
and str(metadata.device_capa) == "(9, 0)"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
def get_confidence_threshold(self) -> float:
|
| 31 |
+
return 0.0
|
| 32 |
+
|
| 33 |
+
def get_choice(self, idx: int) -> Optional[str]:
|
| 34 |
+
if idx < len(self.choices):
|
| 35 |
+
return self.choices[idx]
|
| 36 |
+
return None
|
| 37 |
+
|
| 38 |
+
def fill_choices(self) -> None:
|
| 39 |
+
self.choices.append('extern_mm')
|
| 40 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=8')
|
| 41 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=8')
|
| 42 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
|
| 43 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=2_numwarps=8')
|
| 44 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 45 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
|
| 46 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 47 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=4')
|
| 48 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
|
| 49 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
|
| 50 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=8')
|
| 51 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
|
| 52 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=8')
|
| 53 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
|
| 54 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=8')
|
| 55 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
|
| 56 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=8')
|
| 57 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
|
| 58 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=8')
|
| 59 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
|
| 60 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
|
| 61 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=8')
|
| 62 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 63 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=2')
|
| 64 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=8')
|
| 65 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
|
| 66 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
|
| 67 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 68 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
|
| 69 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 70 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
|
| 71 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=2_numwarps=8')
|
| 72 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 73 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=8')
|
| 74 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 75 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=4')
|
| 76 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=8')
|
| 77 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=2')
|
| 78 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=8')
|
| 79 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
|
| 80 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=8')
|
| 81 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
|
| 82 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=8')
|
| 83 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
|
| 84 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=8')
|
| 85 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=2')
|
| 86 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=8')
|
| 87 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
|
| 88 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
|
| 89 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
|
| 90 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
|
| 91 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
|
| 92 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
|
| 93 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=2')
|
| 94 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=8')
|
| 95 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 96 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
|
| 97 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 98 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
|
| 99 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 100 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
|
| 101 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 102 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=8')
|
| 103 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
|
| 104 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 105 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
|
| 106 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=8')
|
| 107 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=8')
|
| 108 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
|
| 109 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=8')
|
| 110 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
|
| 111 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
|
| 112 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=8')
|
| 113 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
|
| 114 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=8')
|
| 115 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 116 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
|
| 117 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=8')
|
| 118 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
|
| 119 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=8')
|
| 120 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 121 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=2')
|
| 122 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=2')
|
| 123 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
|
| 124 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
|
| 125 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 126 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=2_numwarps=8')
|
| 127 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 128 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
|
| 129 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 130 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
|
| 131 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=4')
|
| 132 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
|
| 133 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=1')
|
| 134 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=1')
|
| 135 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=1')
|
| 136 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
|
| 137 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
|
| 138 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=2')
|
| 139 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=2')
|
| 140 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=2')
|
| 141 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=2')
|
| 142 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
|
| 143 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
|
| 144 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 145 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 146 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=2_numwarps=8')
|
| 147 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 148 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 149 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=8')
|
| 150 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=1')
|
| 151 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=1')
|
| 152 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=2')
|
| 153 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=2')
|
| 154 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=4')
|
| 155 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 156 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 157 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 158 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=8')
|
| 159 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
|
| 160 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 161 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 162 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=16_numstages=2_numwarps=2')
|
| 163 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=4')
|
| 164 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 165 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 166 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=1_numwarps=2')
|
| 167 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
|
| 168 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=2')
|
| 169 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
|
| 170 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
|
| 171 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 172 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
|
| 173 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=2')
|
| 174 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=2')
|
| 175 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=4')
|
| 176 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
|
| 177 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
|
| 178 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=16_numstages=2_numwarps=2')
|
| 179 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=32_numstages=2_numwarps=4')
|
| 180 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 181 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=3_numwarps=4')
|
| 182 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=4')
|
| 183 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=5_numwarps=4')
|
| 184 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=3_numwarps=4')
|
| 185 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=4')
|
| 186 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 187 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
|
| 188 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
|
| 189 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 190 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
|
| 191 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
|
| 192 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
|
| 193 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=4')
|
| 194 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
|
| 195 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
|
| 196 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
|
| 197 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
|
| 198 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
|
| 199 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
|
| 200 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
|
| 201 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=8')
|
| 202 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
|
| 203 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
|
| 204 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
|
| 205 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
|
| 206 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
|
| 207 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
|
| 208 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 209 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
|
| 210 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=8')
|
| 211 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=4')
|
| 212 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
|
| 213 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
|
| 214 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
|
| 215 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=4')
|
| 216 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
|
| 217 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
|
| 218 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
|
| 219 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
|
| 220 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
|
| 221 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
|
| 222 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=4')
|
| 223 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 224 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
|
| 225 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
|
| 226 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
|
| 227 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
|
| 228 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 229 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=4_numwarps=4')
|
| 230 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
|
| 231 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=4')
|
| 232 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
|
| 233 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
|
| 234 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
|
| 235 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=4')
|
| 236 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
|
| 237 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 238 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
|
| 239 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=4')
|
| 240 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
|
| 241 |
+
|
| 242 |
+
def get_name(self) -> str:
|
| 243 |
+
return 'mm'
|
| 244 |
+
|
| 245 |
+
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
| 246 |
+
if context.get_value('arith_intensity') <= 29.89772129058838:
|
| 247 |
+
if context.get_value('n') <= 34.0:
|
| 248 |
+
if context.get_value('n') <= 18.0:
|
| 249 |
+
if context.get_value('k*n') <= 432.0:
|
| 250 |
+
if context.get_value('arith_intensity') <= 7.8700292110443115:
|
| 251 |
+
return [(0.098, 128), (0.098, 129), (0.098, 127), (0.073, 14), (0.073, 16), (0.073, 12), (0.073, 154), (0.073, 156), (0.073, 157), (0.073, 155), (0.049, 10), (0.049, 94), (0.049, 95), (0.048, 96)]
|
| 252 |
+
else:
|
| 253 |
+
return [(0.091, 154), (0.073, 10), (0.073, 15), (0.073, 13), (0.073, 11), (0.073, 17), (0.073, 16), (0.073, 14), (0.073, 12), (0.055, 127), (0.054, 157), (0.054, 156), (0.054, 155), (0.036, 129), (0.036, 128), (0.018, 41), (0.018, 43)]
|
| 254 |
+
else:
|
| 255 |
+
if context.get_value('k') <= 40.0:
|
| 256 |
+
return [(0.070, 39), (0.069, 45), (0.069, 41), (0.069, 43), (0.069, 111), (0.069, 112), (0.056, 38), (0.056, 40), (0.056, 42), (0.056, 44), (0.056, 174), (0.056, 173), (0.056, 175), (0.056, 134), (0.056, 172), (0.056, 135), (0.014, 154), (0.014, 127)]
|
| 257 |
+
else:
|
| 258 |
+
return [(0.147, 144), (0.119, 143), (0.087, 142), (0.083, 0), (0.073, 191), (0.059, 69), (0.050, 67), (0.046, 70), (0.041, 1), (0.036, 174), (0.032, 43), (0.032, 123), (0.028, 40), (0.027, 42), (0.027, 173), (0.023, 175), (0.018, 66), (0.014, 192), (0.014, 193), (0.014, 139), (0.014, 68), (0.014, 127)]
|
| 259 |
+
else:
|
| 260 |
+
if context.get_value('mat1_stride_0') <= 40.0:
|
| 261 |
+
if context.get_value('mat1_stride_0') <= 20.0:
|
| 262 |
+
return [(0.109, 23), (0.109, 21), (0.109, 20), (0.088, 0), (0.087, 131), (0.066, 18), (0.065, 130), (0.065, 132), (0.065, 159), (0.065, 160), (0.065, 161), (0.065, 158), (0.022, 22), (0.022, 19)]
|
| 263 |
+
else:
|
| 264 |
+
return [(0.065, 46), (0.064, 52), (0.064, 50), (0.064, 48), (0.064, 51), (0.064, 49), (0.064, 47), (0.064, 53), (0.064, 181), (0.064, 177), (0.064, 179), (0.064, 176), (0.038, 130), (0.038, 136), (0.026, 182), (0.026, 178), (0.026, 180), (0.026, 137), (0.025, 158), (0.013, 114), (0.013, 113)]
|
| 265 |
+
else:
|
| 266 |
+
if context.get_value('mat1_stride_0') <= 68.0:
|
| 267 |
+
return [(0.138, 140), (0.125, 195), (0.100, 71), (0.100, 74), (0.100, 196), (0.100, 194), (0.100, 197), (0.075, 75), (0.062, 72), (0.062, 73), (0.012, 180), (0.012, 51), (0.012, 182)]
|
| 268 |
+
else:
|
| 269 |
+
return [(0.124, 180), (0.124, 182), (0.114, 75), (0.103, 74), (0.093, 51), (0.093, 71), (0.072, 72), (0.062, 194), (0.052, 145), (0.052, 195), (0.021, 48), (0.021, 50), (0.021, 47), (0.020, 124), (0.010, 147), (0.010, 146), (0.010, 46)]
|
| 270 |
+
else:
|
| 271 |
+
if context.get_value('k') <= 18.0:
|
| 272 |
+
if context.get_value('m*k') <= 528.0:
|
| 273 |
+
return [(0.097, 88), (0.087, 92), (0.077, 90), (0.058, 105), (0.058, 103), (0.058, 104), (0.058, 99), (0.058, 100), (0.058, 106), (0.058, 93), (0.057, 91), (0.057, 97), (0.057, 98), (0.057, 101), (0.048, 102), (0.029, 87), (0.029, 89)]
|
| 274 |
+
else:
|
| 275 |
+
if context.get_value('n') <= 80.0:
|
| 276 |
+
return [(0.057, 161), (0.057, 130), (0.057, 24), (0.056, 164), (0.056, 163), (0.056, 166), (0.056, 168), (0.056, 30), (0.056, 28), (0.056, 26), (0.056, 25), (0.056, 27), (0.056, 29), (0.056, 31), (0.042, 131), (0.028, 99), (0.028, 101), (0.028, 100), (0.028, 167), (0.028, 165), (0.028, 133)]
|
| 277 |
+
else:
|
| 278 |
+
return [(0.110, 164), (0.108, 163), (0.106, 168), (0.069, 161), (0.066, 151), (0.060, 152), (0.055, 165), (0.050, 27), (0.050, 29), (0.048, 131), (0.043, 153), (0.037, 133), (0.037, 130), (0.028, 8), (0.028, 5), (0.027, 7), (0.026, 26), (0.016, 162), (0.012, 9), (0.007, 4), (0.005, 100), (0.005, 6), (0.005, 24)]
|
| 279 |
+
else:
|
| 280 |
+
if context.get_value('k') <= 36.0:
|
| 281 |
+
if context.get_value('n') <= 68.0:
|
| 282 |
+
return [(0.097, 184), (0.097, 56), (0.086, 186), (0.086, 183), (0.086, 188), (0.086, 58), (0.086, 60), (0.065, 54), (0.043, 187), (0.043, 185), (0.043, 57), (0.043, 61), (0.032, 55), (0.032, 130), (0.032, 59), (0.011, 181), (0.011, 163), (0.011, 136), (0.011, 138)]
|
| 283 |
+
else:
|
| 284 |
+
return [(0.117, 184), (0.117, 170), (0.117, 169), (0.107, 183), (0.106, 188), (0.075, 181), (0.064, 130), (0.064, 56), (0.053, 171), (0.032, 57), (0.032, 59), (0.032, 185), (0.011, 163), (0.011, 32), (0.011, 37), (0.011, 34), (0.011, 33), (0.011, 35), (0.011, 36), (0.011, 54)]
|
| 285 |
+
else:
|
| 286 |
+
if context.get_value('mat2_stride_0') <= 384.0:
|
| 287 |
+
return [(0.244, 0), (0.061, 76), (0.061, 79), (0.030, 3), (0.030, 183), (0.030, 189), (0.030, 187), (0.030, 64), (0.030, 190), (0.030, 62), (0.030, 198), (0.030, 201), (0.030, 77), (0.030, 200), (0.030, 80), (0.030, 199), (0.030, 78), (0.030, 184), (0.020, 86), (0.020, 84), (0.020, 120), (0.020, 81), (0.020, 121), (0.020, 85), (0.020, 122), (0.010, 83), (0.010, 118), (0.010, 119), (0.010, 82)]
|
| 288 |
+
else:
|
| 289 |
+
return [(0.274, 83), (0.171, 86), (0.152, 0), (0.071, 85), (0.061, 125), (0.050, 84), (0.020, 109), (0.020, 117), (0.020, 81), (0.020, 118), (0.020, 121), (0.020, 108), (0.020, 115), (0.020, 116), (0.010, 110), (0.010, 120), (0.010, 103), (0.010, 107), (0.010, 119), (0.010, 122)]
|
| 290 |
+
else:
|
| 291 |
+
if context.get_value('arith_intensity') <= 56.995582580566406:
|
| 292 |
+
if context.get_value('n') <= 68.0:
|
| 293 |
+
if context.get_value('k*n') <= 4448.0:
|
| 294 |
+
if context.get_value('m*n') <= 29626368.0:
|
| 295 |
+
return [(0.107, 198), (0.107, 200), (0.107, 201), (0.107, 199), (0.106, 76), (0.106, 79), (0.064, 197), (0.063, 56), (0.043, 184), (0.043, 187), (0.042, 80), (0.042, 77), (0.042, 183), (0.021, 78)]
|
| 296 |
+
else:
|
| 297 |
+
return [(0.073, 201), (0.073, 198), (0.073, 200), (0.073, 199), (0.073, 197), (0.073, 56), (0.073, 58), (0.073, 79), (0.073, 76), (0.072, 59), (0.072, 78), (0.072, 77), (0.072, 80), (0.018, 184), (0.018, 55), (0.018, 54)]
|
| 298 |
+
else:
|
| 299 |
+
if context.get_value('k') <= 348.0:
|
| 300 |
+
return [(0.206, 76), (0.183, 77), (0.169, 198), (0.160, 199), (0.053, 59), (0.046, 56), (0.038, 3), (0.030, 148), (0.030, 58), (0.030, 187), (0.023, 184), (0.015, 0), (0.008, 55), (0.008, 54)]
|
| 301 |
+
else:
|
| 302 |
+
return [(0.146, 198), (0.145, 199), (0.145, 148), (0.126, 0), (0.084, 76), (0.084, 77), (0.042, 80), (0.042, 79), (0.021, 149), (0.021, 150), (0.021, 3), (0.014, 46), (0.014, 74), (0.014, 75), (0.014, 124), (0.014, 194), (0.014, 195), (0.007, 145), (0.007, 146), (0.007, 2), (0.007, 72), (0.007, 147), (0.007, 71)]
|
| 303 |
+
else:
|
| 304 |
+
if context.get_value('m') <= 3264.0:
|
| 305 |
+
return [(0.247, 147), (0.115, 197), (0.066, 199), (0.066, 201), (0.066, 198), (0.049, 0), (0.049, 169), (0.049, 171), (0.033, 140), (0.033, 125), (0.033, 114), (0.016, 126), (0.016, 183), (0.016, 184), (0.016, 185), (0.016, 182), (0.016, 188), (0.016, 78), (0.016, 148), (0.016, 138), (0.016, 77), (0.016, 56), (0.016, 59)]
|
| 306 |
+
else:
|
| 307 |
+
if context.get_value('k') <= 62.5:
|
| 308 |
+
return [(0.226, 190), (0.226, 189), (0.122, 62), (0.122, 64), (0.055, 77), (0.055, 78), (0.037, 198), (0.036, 201), (0.036, 33), (0.024, 163), (0.018, 56), (0.018, 35), (0.018, 169), (0.006, 171)]
|
| 309 |
+
else:
|
| 310 |
+
return [(0.162, 35), (0.118, 33), (0.096, 189), (0.096, 190), (0.088, 169), (0.074, 62), (0.073, 56), (0.066, 171), (0.051, 198), (0.051, 201), (0.044, 59), (0.037, 64), (0.029, 63), (0.007, 0), (0.007, 77)]
|
| 311 |
+
else:
|
| 312 |
+
if context.get_value('m*n') <= 1097728.0:
|
| 313 |
+
return [(0.403, 0), (0.179, 141), (0.134, 150), (0.086, 147), (0.051, 148), (0.048, 3), (0.024, 189), (0.020, 199), (0.017, 64), (0.010, 65), (0.010, 77), (0.007, 114), (0.003, 138), (0.003, 59), (0.003, 182)]
|
| 314 |
+
else:
|
| 315 |
+
if context.get_value('m*n') <= 3244032.0:
|
| 316 |
+
return [(0.295, 189), (0.176, 64), (0.157, 65), (0.090, 0), (0.069, 62), (0.059, 63), (0.046, 77), (0.039, 169), (0.023, 199), (0.020, 35), (0.013, 33), (0.010, 171), (0.003, 141)]
|
| 317 |
+
else:
|
| 318 |
+
if context.get_value('n') <= 136.0:
|
| 319 |
+
return [(0.197, 189), (0.197, 63), (0.161, 77), (0.157, 62), (0.061, 33), (0.044, 65), (0.039, 35), (0.039, 64), (0.030, 169), (0.026, 0), (0.017, 199), (0.017, 148), (0.009, 56), (0.004, 3)]
|
| 320 |
+
else:
|
| 321 |
+
return [(0.460, 0), (0.145, 62), (0.138, 63), (0.081, 35), (0.047, 33), (0.043, 189), (0.023, 64), (0.018, 77), (0.013, 169), (0.009, 65), (0.009, 56), (0.005, 32), (0.005, 59), (0.002, 183), (0.002, 163)]
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_MixedMMA100.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa: B950
|
| 2 |
+
# fmt: off
|
| 3 |
+
# This file was generated by AutoHeuristic. Do not modify it manually!
|
| 4 |
+
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
|
| 5 |
+
from typing import List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 8 |
+
AHContext,
|
| 9 |
+
AHMetadata,
|
| 10 |
+
Choice,
|
| 11 |
+
)
|
| 12 |
+
from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
| 13 |
+
LearnedHeuristicDecision,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MixedMMA100(LearnedHeuristicDecision):
|
| 18 |
+
|
| 19 |
+
def __init__(self) -> None:
|
| 20 |
+
self.choices: List[Choice] = []
|
| 21 |
+
self.fill_choices()
|
| 22 |
+
|
| 23 |
+
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
| 24 |
+
return (
|
| 25 |
+
metadata.name == self.get_name()
|
| 26 |
+
and metadata.shared_memory == 166912
|
| 27 |
+
and str(metadata.device_capa) == "(8, 0)"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
def get_confidence_threshold(self) -> float:
|
| 31 |
+
return 0.0
|
| 32 |
+
|
| 33 |
+
def get_choice(self, idx: int) -> Optional[str]:
|
| 34 |
+
if idx < len(self.choices):
|
| 35 |
+
return self.choices[idx]
|
| 36 |
+
return None
|
| 37 |
+
|
| 38 |
+
def fill_choices(self) -> None:
|
| 39 |
+
self.choices.append('extern_fallback_mixed_mm')
|
| 40 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 41 |
+
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 42 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 43 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=2')
|
| 44 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=2')
|
| 45 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 46 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=256_BLOCK-N=128_numstages=3_numwarps=4')
|
| 47 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=256_BLOCK-N=128_numstages=5_numwarps=8')
|
| 48 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 49 |
+
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
|
| 50 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 51 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=4')
|
| 52 |
+
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 53 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
|
| 54 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
|
| 55 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
|
| 56 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
|
| 57 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=8')
|
| 58 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
|
| 59 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
|
| 60 |
+
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
|
| 61 |
+
|
| 62 |
+
def get_name(self) -> str:
|
| 63 |
+
return 'mixed_mm'
|
| 64 |
+
|
| 65 |
+
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
| 66 |
+
if str(context.get_value('1LEQmLEQ16')) != 'True':
|
| 67 |
+
if context.get_value('m') <= 32.5:
|
| 68 |
+
if context.get_value('n') <= 6976.0:
|
| 69 |
+
if context.get_value('n') <= 3520.0:
|
| 70 |
+
if context.get_value('m*n') <= 37632.0:
|
| 71 |
+
return None
|
| 72 |
+
else:
|
| 73 |
+
return [(1.000, 13)]
|
| 74 |
+
else:
|
| 75 |
+
if context.get_value('m*k') <= 452352.0:
|
| 76 |
+
return [(0.590, 13), (0.256, 8), (0.103, 7), (0.051, 11)]
|
| 77 |
+
else:
|
| 78 |
+
return [(0.778, 8), (0.222, 13)]
|
| 79 |
+
else:
|
| 80 |
+
if context.get_value('k*n') <= 102776832.0:
|
| 81 |
+
if context.get_value('n') <= 14656.0:
|
| 82 |
+
return [(1.000, 11)]
|
| 83 |
+
else:
|
| 84 |
+
return [(0.889, 11), (0.111, 13)]
|
| 85 |
+
else:
|
| 86 |
+
return [(1.000, 11)]
|
| 87 |
+
else:
|
| 88 |
+
if context.get_value('m*n') <= 446464.0:
|
| 89 |
+
if context.get_value('m*n') <= 223424.0:
|
| 90 |
+
if context.get_value('mat1_stride_0') <= 3968.0:
|
| 91 |
+
return None
|
| 92 |
+
else:
|
| 93 |
+
return None
|
| 94 |
+
else:
|
| 95 |
+
if context.get_value('m*n') <= 346112.0:
|
| 96 |
+
return [(0.960, 16), (0.040, 7)]
|
| 97 |
+
else:
|
| 98 |
+
return [(0.750, 16), (0.136, 14), (0.114, 7)]
|
| 99 |
+
else:
|
| 100 |
+
if str(context.get_value('33LEQmLEQ64')) != 'True':
|
| 101 |
+
if context.get_value('n') <= 6976.0:
|
| 102 |
+
return [(1.000, 14)]
|
| 103 |
+
else:
|
| 104 |
+
return [(0.753, 2), (0.222, 1), (0.015, 7), (0.007, 16), (0.004, 12)]
|
| 105 |
+
else:
|
| 106 |
+
if context.get_value('n') <= 13888.0:
|
| 107 |
+
return [(0.710, 14), (0.275, 21), (0.014, 12)]
|
| 108 |
+
else:
|
| 109 |
+
return [(0.374, 19), (0.339, 20), (0.106, 21), (0.101, 16), (0.066, 17), (0.009, 14), (0.004, 18)]
|
| 110 |
+
else:
|
| 111 |
+
if context.get_value('n') <= 3520.0:
|
| 112 |
+
if context.get_value('arith_intensity') <= 3.994754433631897:
|
| 113 |
+
if str(context.get_value('mat2_dtype')) != 'torch.uint8':
|
| 114 |
+
if context.get_value('m*k') <= 18944.0:
|
| 115 |
+
return [(0.577, 5), (0.423, 6)]
|
| 116 |
+
else:
|
| 117 |
+
return [(0.988, 5), (0.012, 6)]
|
| 118 |
+
else:
|
| 119 |
+
if context.get_value('arith_intensity') <= 2.9899919033050537:
|
| 120 |
+
return None
|
| 121 |
+
else:
|
| 122 |
+
return None
|
| 123 |
+
else:
|
| 124 |
+
if context.get_value('arith_intensity') <= 7.956453561782837:
|
| 125 |
+
if context.get_value('k*n') <= 9244032.0:
|
| 126 |
+
return [(0.822, 5), (0.178, 6)]
|
| 127 |
+
else:
|
| 128 |
+
return [(0.977, 5), (0.023, 0)]
|
| 129 |
+
else:
|
| 130 |
+
if context.get_value('m*k') <= 978944.0:
|
| 131 |
+
return [(1.000, 5)]
|
| 132 |
+
else:
|
| 133 |
+
return [(0.971, 5), (0.029, 0)]
|
| 134 |
+
else:
|
| 135 |
+
if context.get_value('n') <= 13632.0:
|
| 136 |
+
if context.get_value('n') <= 6976.0:
|
| 137 |
+
return [(1.000, 6)]
|
| 138 |
+
else:
|
| 139 |
+
if context.get_value('k') <= 3968.0:
|
| 140 |
+
return [(0.617, 3), (0.111, 5), (0.099, 7), (0.086, 9), (0.062, 6), (0.025, 8)]
|
| 141 |
+
else:
|
| 142 |
+
return [(0.779, 8), (0.119, 5), (0.053, 7), (0.035, 6), (0.013, 3)]
|
| 143 |
+
else:
|
| 144 |
+
if context.get_value('k*n') <= 39518208.0:
|
| 145 |
+
return [(0.385, 4), (0.327, 3), (0.192, 6), (0.038, 7), (0.038, 10), (0.019, 5)]
|
| 146 |
+
else:
|
| 147 |
+
if context.get_value('n') <= 20800.0:
|
| 148 |
+
return [(0.821, 6), (0.121, 7), (0.029, 4), (0.014, 5), (0.007, 3), (0.007, 8)]
|
| 149 |
+
else:
|
| 150 |
+
return [(0.530, 7), (0.386, 6), (0.046, 8), (0.021, 3), (0.015, 4), (0.002, 5)]
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/_PadMMA100.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa: B950
|
| 2 |
+
# fmt: off
|
| 3 |
+
# This file was generated by AutoHeuristic. Do not modify it manually!
|
| 4 |
+
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/pad_mm/
|
| 5 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import AHContext, AHMetadata, Choice, CHOICE_COL
|
| 6 |
+
from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
| 7 |
+
LearnedHeuristicRegression,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class PadMMA100(LearnedHeuristicRegression):
|
| 12 |
+
|
| 13 |
+
def __init__(self) -> None:
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
| 17 |
+
return (
|
| 18 |
+
metadata.name == self.get_name()
|
| 19 |
+
and metadata.shared_memory == 166912
|
| 20 |
+
and str(metadata.device_capa) == "(8, 0)"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
def get_feedback(self, context: AHContext, choice: Choice) -> float:
|
| 24 |
+
context.context_dict[CHOICE_COL] = choice
|
| 25 |
+
return self.predict(context)
|
| 26 |
+
|
| 27 |
+
def get_confidence_threshold(self) -> float:
|
| 28 |
+
return 1.7025303314066
|
| 29 |
+
|
| 30 |
+
def get_name(self) -> str:
|
| 31 |
+
return 'pad_mm'
|
| 32 |
+
|
| 33 |
+
def predict(self, context: AHContext) -> float:
|
| 34 |
+
if str(context.get_value('choice')) != 'pad':
|
| 35 |
+
if str(context.get_value('using_tf32')) != 'False':
|
| 36 |
+
if context.get_value('m*n') <= 4171264.0:
|
| 37 |
+
if context.get_value('m*k') <= 3999308.0:
|
| 38 |
+
return 1.8751469764071178
|
| 39 |
+
else:
|
| 40 |
+
if str(context.get_value('n_multiple_32')) != 'True':
|
| 41 |
+
return 0.9117231355626345
|
| 42 |
+
else:
|
| 43 |
+
return 1.1607689608873861
|
| 44 |
+
else:
|
| 45 |
+
if str(context.get_value('n_multiple_2')) != 'True':
|
| 46 |
+
if str(context.get_value('using_tf32')) != 'True':
|
| 47 |
+
return 0.7430382200435992
|
| 48 |
+
else:
|
| 49 |
+
return 0.8531269794448678
|
| 50 |
+
else:
|
| 51 |
+
if str(context.get_value('k_multiple_2')) != 'True':
|
| 52 |
+
return 0.7577181972719917
|
| 53 |
+
else:
|
| 54 |
+
return 0.8977349440424219
|
| 55 |
+
else:
|
| 56 |
+
if context.get_value('m*n') <= 1299712.0:
|
| 57 |
+
return 1.1669723418995592
|
| 58 |
+
else:
|
| 59 |
+
if context.get_value('mat2_stride_1') <= 45217.5:
|
| 60 |
+
if context.get_value('m*n') <= 55884158.0:
|
| 61 |
+
return 1.0262769936909601
|
| 62 |
+
else:
|
| 63 |
+
return 1.0022677428470845
|
| 64 |
+
else:
|
| 65 |
+
if context.get_value('m') <= 18478.0:
|
| 66 |
+
return 1.1127066261894312
|
| 67 |
+
else:
|
| 68 |
+
return 1.0337740659894263
|
| 69 |
+
else:
|
| 70 |
+
if str(context.get_value('mat1_dtype')) != 'torch.float32':
|
| 71 |
+
if str(context.get_value('n_multiple_2')) != 'False':
|
| 72 |
+
if str(context.get_value('k_multiple_2')) != 'True':
|
| 73 |
+
if context.get_value('mat1_stride_0') <= 561.0:
|
| 74 |
+
return 1.2900382135142956
|
| 75 |
+
else:
|
| 76 |
+
return 1.5761737616057887
|
| 77 |
+
else:
|
| 78 |
+
if context.get_value('num_dims_needs_padding') <= 1.5:
|
| 79 |
+
return 1.0472263310239422
|
| 80 |
+
else:
|
| 81 |
+
return 1.1727673465762514
|
| 82 |
+
else:
|
| 83 |
+
if context.get_value('k') <= 28238.5:
|
| 84 |
+
if context.get_value('k/(m*n)') <= 0.00026227018679492176:
|
| 85 |
+
return 1.6770542505397175
|
| 86 |
+
else:
|
| 87 |
+
return 1.3974785435105923
|
| 88 |
+
else:
|
| 89 |
+
if str(context.get_value('mat1_dtype')) != 'torch.bfloat16':
|
| 90 |
+
return 1.3952699800111992
|
| 91 |
+
else:
|
| 92 |
+
return 1.5759286511628336
|
| 93 |
+
else:
|
| 94 |
+
if str(context.get_value('using_tf32')) != 'False':
|
| 95 |
+
if context.get_value('m*n') <= 14119424.0:
|
| 96 |
+
return 0.8875772670422478
|
| 97 |
+
else:
|
| 98 |
+
if str(context.get_value('mat2_innermost_needs_padding')) != 'True':
|
| 99 |
+
return 1.1467728924377265
|
| 100 |
+
else:
|
| 101 |
+
return 1.215842963532998
|
| 102 |
+
else:
|
| 103 |
+
if context.get_value('arith_intensity') <= 396.8774871826172:
|
| 104 |
+
return 0.89940161869551
|
| 105 |
+
else:
|
| 106 |
+
if context.get_value('mat2_stride_1') <= 45217.5:
|
| 107 |
+
return 0.9964328169353532
|
| 108 |
+
else:
|
| 109 |
+
return 0.9493479238294826
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/__init__.py
ADDED
|
File without changes
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/artifacts/__pycache__/_MixedMMH100.cpython-310.pyc
ADDED
|
Binary file (6.04 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/autoheuristic.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from functools import partial
|
| 4 |
+
from typing import Any, Callable, Dict, List, Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 8 |
+
AHContext,
|
| 9 |
+
AHMetadata,
|
| 10 |
+
AHOperation,
|
| 11 |
+
Choice,
|
| 12 |
+
CHOICE_COL,
|
| 13 |
+
Feedback,
|
| 14 |
+
FEEDBACK_COL,
|
| 15 |
+
get_metadata_str_from_log,
|
| 16 |
+
)
|
| 17 |
+
from torch._inductor.autoheuristic.learned_heuristic_controller import (
|
| 18 |
+
LearnedHeuristicController,
|
| 19 |
+
)
|
| 20 |
+
from torch._inductor.ir import ChoiceCaller
|
| 21 |
+
from torch._inductor.runtime.runtime_utils import cache_dir
|
| 22 |
+
from torch._inductor.utils import get_gpu_shared_memory
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class LocalFeedback:
|
| 26 |
+
"""
|
| 27 |
+
To be able to collect data for a choice, a function providing feedback given a choice has to be provided.
|
| 28 |
+
LocalFeedback can be used when AutoHeuristic should immediately run the function to collect feedback for each choice
|
| 29 |
+
(see pad_mm.py, where the autotuning happens locally, for an example).
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, feedback_fn: Callable[[Choice], Feedback]) -> None:
|
| 33 |
+
self.feedback_fn = feedback_fn
|
| 34 |
+
|
| 35 |
+
def __call__(self, choice: Choice) -> Feedback:
|
| 36 |
+
return self.feedback_fn(choice)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class InconsistentMetadata(Exception):
|
| 40 |
+
"""
|
| 41 |
+
Exception that is thrown when AutoHeuristic tries to log data to a file where the metadata stored in the file does
|
| 42 |
+
not match the metadata it would store if the file didn't exist.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class AutoHeuristic:
|
| 47 |
+
"""
|
| 48 |
+
AutoHeuristic is a framework that allows one to collect data, learn a heuristic (i.e. a regression tree) and
|
| 49 |
+
generate the heuristic to code. This class allows one to collect data. The collected data can then be used to train
|
| 50 |
+
a heuristic (see torchgen/autoheuristic/).
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
collected_feedback: Dict[Choice, Feedback]
|
| 54 |
+
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
fallback: Callable[[], Choice],
|
| 58 |
+
choices: List[Choice],
|
| 59 |
+
feedback: Optional[LocalFeedback],
|
| 60 |
+
context: AHContext,
|
| 61 |
+
name: str,
|
| 62 |
+
augment_context: Optional[List[AHOperation]] = None,
|
| 63 |
+
precondition: Optional[Callable[[AHMetadata, AHContext], bool]] = None,
|
| 64 |
+
) -> None:
|
| 65 |
+
"""
|
| 66 |
+
Initializes an instance of the AutoHeuristic class.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
fallback: A callable that returns a Choice when the heuristic is unsure which choice to make, or
|
| 70 |
+
AutoHeuristic is in data collection mode.
|
| 71 |
+
choices: A list of possible choices the heuristic can make.
|
| 72 |
+
feedback: An instance of LocalFeedback that provides feedback for a given choice.
|
| 73 |
+
context: Context to store with each choice and feedback.
|
| 74 |
+
name: A string that identifies the heuristic.
|
| 75 |
+
augment_context: An optional list of AHOperation instances that augment the context.
|
| 76 |
+
precondition: A callable that returns a boolean indicating whether AutoHeuristic should run.
|
| 77 |
+
"""
|
| 78 |
+
self.fallback = fallback
|
| 79 |
+
self.choices = choices
|
| 80 |
+
self.feedback = feedback
|
| 81 |
+
self.context = context
|
| 82 |
+
self.name = name
|
| 83 |
+
self.collected_feedback = {}
|
| 84 |
+
self.augment_context = augment_context
|
| 85 |
+
self.metadata = AHMetadata(
|
| 86 |
+
get_gpu_shared_memory(),
|
| 87 |
+
torch.cuda.get_device_capability(),
|
| 88 |
+
self.choices,
|
| 89 |
+
self.name,
|
| 90 |
+
)
|
| 91 |
+
self.precondition = precondition
|
| 92 |
+
|
| 93 |
+
if not self.satisfies_precondition():
|
| 94 |
+
return
|
| 95 |
+
|
| 96 |
+
if torch._inductor.config.autoheuristic_log_path == "DEFAULT":
|
| 97 |
+
self.log_path = self.get_default_log_path()
|
| 98 |
+
else:
|
| 99 |
+
self.log_path = torch._inductor.config.autoheuristic_log_path
|
| 100 |
+
|
| 101 |
+
if torch._inductor.config.collect_autoheuristic(self.name):
|
| 102 |
+
if self.feedback is not None:
|
| 103 |
+
for choice in self.choices:
|
| 104 |
+
feedback_val = self.feedback(choice)
|
| 105 |
+
self.save_data(choice, feedback_val)
|
| 106 |
+
|
| 107 |
+
def satisfies_precondition(self) -> bool:
|
| 108 |
+
return self.precondition is None or self.precondition(
|
| 109 |
+
self.metadata, self.context
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def get_choice(self) -> Choice:
|
| 113 |
+
"""
|
| 114 |
+
Returns the chosen option based on the value of autoheuristic_use.
|
| 115 |
+
If self.name is one of the comma separated strings in autoheuristic_use,
|
| 116 |
+
it queries a learned heuristic to make a decision. Otherwise, it returns the fallback option.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
if not self.satisfies_precondition():
|
| 120 |
+
return self.fallback()
|
| 121 |
+
|
| 122 |
+
if torch._inductor.config.use_autoheuristic(self.name):
|
| 123 |
+
if self.augment_context is not None:
|
| 124 |
+
self.context.apply_operations(self.augment_context)
|
| 125 |
+
controller = LearnedHeuristicController(
|
| 126 |
+
self.metadata,
|
| 127 |
+
self.context,
|
| 128 |
+
)
|
| 129 |
+
decision = controller.get_decision()
|
| 130 |
+
if decision not in self.choices:
|
| 131 |
+
# TODO(AlnisM): We might want to allow this in the future
|
| 132 |
+
return self.fallback()
|
| 133 |
+
if decision is not None:
|
| 134 |
+
return decision
|
| 135 |
+
return self.fallback()
|
| 136 |
+
|
| 137 |
+
def get_top_k_choices(
|
| 138 |
+
self, top_k: int, always_included: Optional[List[str]] = None
|
| 139 |
+
) -> Optional[List[Choice]]:
|
| 140 |
+
if not self.satisfies_precondition():
|
| 141 |
+
return None
|
| 142 |
+
if torch._inductor.config.use_autoheuristic(self.name):
|
| 143 |
+
if self.augment_context is not None:
|
| 144 |
+
self.context.apply_operations(self.augment_context)
|
| 145 |
+
controller = LearnedHeuristicController(
|
| 146 |
+
self.metadata,
|
| 147 |
+
self.context,
|
| 148 |
+
)
|
| 149 |
+
choices = controller.get_decisions_ranked(top_k)
|
| 150 |
+
if choices is None:
|
| 151 |
+
return None
|
| 152 |
+
if always_included is not None:
|
| 153 |
+
for choice in always_included:
|
| 154 |
+
if choice not in choices:
|
| 155 |
+
choices.append(choice)
|
| 156 |
+
return choices
|
| 157 |
+
return None
|
| 158 |
+
|
| 159 |
+
def get_collected_feedback(self, choice: Choice) -> Any:
|
| 160 |
+
return self.collected_feedback.get(choice, None)
|
| 161 |
+
|
| 162 |
+
@staticmethod
|
| 163 |
+
def get_device_identifier() -> str:
|
| 164 |
+
# a heuristic might work well for one GPU, but not for another
|
| 165 |
+
# we store the collected data per GPU model and learn a heuristic per GPU model
|
| 166 |
+
|
| 167 |
+
# TODO(AlnisM): just using the device name for now, but the same GPU model can have different names
|
| 168 |
+
device_name = torch.cuda.get_device_name().replace(" ", "_")
|
| 169 |
+
return device_name
|
| 170 |
+
|
| 171 |
+
def get_default_log_path(self) -> str:
|
| 172 |
+
device_name = self.get_device_identifier()
|
| 173 |
+
path = f"{cache_dir()}/autoheuristic/{device_name}/"
|
| 174 |
+
os.makedirs(path, exist_ok=True)
|
| 175 |
+
path += f"{self.name}.txt"
|
| 176 |
+
return path
|
| 177 |
+
|
| 178 |
+
def serialize_metadata(self) -> str:
|
| 179 |
+
metadata_dict = self.metadata.to_dict()
|
| 180 |
+
(
|
| 181 |
+
num_features,
|
| 182 |
+
cat_features,
|
| 183 |
+
) = self.context.get_numerical_and_categorical_features()
|
| 184 |
+
metadata_dict["numerical_features"] = num_features
|
| 185 |
+
metadata_dict["categorical_features"] = cat_features
|
| 186 |
+
return json.dumps(metadata_dict)
|
| 187 |
+
|
| 188 |
+
def save_data(self, choice: Choice, feedback_val: Feedback) -> None:
|
| 189 |
+
self.collected_feedback[choice] = feedback_val
|
| 190 |
+
log_path = self.log_path
|
| 191 |
+
|
| 192 |
+
lines = []
|
| 193 |
+
log_exists = os.path.exists(log_path)
|
| 194 |
+
if log_exists:
|
| 195 |
+
# if log already exists, make sure it is consistent
|
| 196 |
+
metadata = self.serialize_metadata()
|
| 197 |
+
existing_metadata = get_metadata_str_from_log(self.log_path)
|
| 198 |
+
if existing_metadata != metadata:
|
| 199 |
+
raise InconsistentMetadata(
|
| 200 |
+
"Given metadata does not match existing metadata"
|
| 201 |
+
)
|
| 202 |
+
else:
|
| 203 |
+
lines.append(self.serialize_metadata())
|
| 204 |
+
feature_header = self.context.get_feature_names_csv()
|
| 205 |
+
header = feature_header + "," + CHOICE_COL + "," + FEEDBACK_COL
|
| 206 |
+
lines.append(header)
|
| 207 |
+
|
| 208 |
+
line = ""
|
| 209 |
+
feature_values = self.context.get_feature_values_csv()
|
| 210 |
+
line += feature_values + "," + choice + "," + str(feedback_val)
|
| 211 |
+
lines.append(line)
|
| 212 |
+
|
| 213 |
+
with open(log_path, "a") as f:
|
| 214 |
+
f.write("\n".join(lines) + "\n")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class AutoHeuristicSelectAlgorithm(AutoHeuristic):
|
| 218 |
+
"""
|
| 219 |
+
AutoHeuristicSelectAlgorithm is a subclass of AutoHeuristic that allows one to collect data and learn a heuristic
|
| 220 |
+
when one wants to use AutoHeuristic for kernel choice selection.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
def __init__(
|
| 224 |
+
self,
|
| 225 |
+
fallback: Callable[[], Optional[ChoiceCaller]],
|
| 226 |
+
choices: List[ChoiceCaller],
|
| 227 |
+
input_nodes: List[Any],
|
| 228 |
+
context: AHContext,
|
| 229 |
+
name: str,
|
| 230 |
+
augment_context: Optional[List[AHOperation]] = None,
|
| 231 |
+
precondition: Optional[Callable[[AHMetadata, AHContext], bool]] = None,
|
| 232 |
+
) -> None:
|
| 233 |
+
"""
|
| 234 |
+
The arguments choices, input_nodes and name have to match the ones used in the call to
|
| 235 |
+
autotune_select_algorithm(), e.g. if the following call is made
|
| 236 |
+
autotune_select_algorithm(name, choices, input_nodes, layout), the same name, choices and input_nodes
|
| 237 |
+
have to be used here.
|
| 238 |
+
"""
|
| 239 |
+
self.input_nodes = input_nodes
|
| 240 |
+
self.choicestr2choice: Dict[str, ChoiceCaller] = {}
|
| 241 |
+
for choice in choices:
|
| 242 |
+
self.choicestr2choice[choice.autoheuristic_id()] = choice
|
| 243 |
+
choices_str = list(self.choicestr2choice.keys())
|
| 244 |
+
|
| 245 |
+
def fallback_str() -> str:
|
| 246 |
+
fallback_choice = fallback()
|
| 247 |
+
if fallback_choice is None:
|
| 248 |
+
# TODO: Find a nicer way to handle this
|
| 249 |
+
return "unsure"
|
| 250 |
+
return fallback_choice.autoheuristic_id()
|
| 251 |
+
|
| 252 |
+
super().__init__(
|
| 253 |
+
fallback_str,
|
| 254 |
+
choices_str,
|
| 255 |
+
None,
|
| 256 |
+
context,
|
| 257 |
+
name,
|
| 258 |
+
augment_context,
|
| 259 |
+
precondition,
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
if (
|
| 263 |
+
torch._inductor.config.collect_autoheuristic(self.name)
|
| 264 |
+
and self.satisfies_precondition()
|
| 265 |
+
):
|
| 266 |
+
self.register_global_feedback(input_nodes, choices)
|
| 267 |
+
|
| 268 |
+
def register_global_feedback(
|
| 269 |
+
self, input_nodes: List[Any], choices: List[ChoiceCaller]
|
| 270 |
+
) -> None:
|
| 271 |
+
"""
|
| 272 |
+
Registers a callback in select_algorithm, which is called with the timing of each choice.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
from torch._inductor.select_algorithm import (
|
| 276 |
+
add_feedback_saver,
|
| 277 |
+
create_inputs_key,
|
| 278 |
+
create_precompile_key,
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
def store_global_feedback(
|
| 282 |
+
ah_inputs_key: str,
|
| 283 |
+
ah_precompile_key: str,
|
| 284 |
+
timings: Dict[ChoiceCaller, float],
|
| 285 |
+
name: str,
|
| 286 |
+
input_nodes: List[Any],
|
| 287 |
+
choices: List[ChoiceCaller],
|
| 288 |
+
) -> None:
|
| 289 |
+
current_inputs_key = create_inputs_key(input_nodes)
|
| 290 |
+
if current_inputs_key != ah_inputs_key:
|
| 291 |
+
return
|
| 292 |
+
current_precompile_key = create_precompile_key(
|
| 293 |
+
name, current_inputs_key, choices
|
| 294 |
+
)
|
| 295 |
+
if current_precompile_key != ah_precompile_key:
|
| 296 |
+
return
|
| 297 |
+
for choice, time in timings.items():
|
| 298 |
+
self.save_data(choice.autoheuristic_id(), time)
|
| 299 |
+
|
| 300 |
+
inputs_key = create_inputs_key(input_nodes)
|
| 301 |
+
precompile_key = create_precompile_key(self.name, inputs_key, choices)
|
| 302 |
+
feedback_saver = partial(store_global_feedback, inputs_key, precompile_key)
|
| 303 |
+
add_feedback_saver(feedback_saver)
|
| 304 |
+
|
| 305 |
+
def get_choice_caller(self) -> Optional[ChoiceCaller]:
|
| 306 |
+
choice = self.get_choice()
|
| 307 |
+
return self.choicestr2choice.get(choice, None)
|
| 308 |
+
|
| 309 |
+
def get_top_k_choices_caller(
|
| 310 |
+
self, top_k: int, always_included: Optional[List[str]] = None
|
| 311 |
+
) -> Optional[List[ChoiceCaller]]:
|
| 312 |
+
choices = self.get_top_k_choices(top_k, always_included)
|
| 313 |
+
if choices is None:
|
| 314 |
+
return None
|
| 315 |
+
return [self.choicestr2choice[choice] for choice in choices]
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/autoheuristic_utils.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from typing import Any, Callable, Dict, List, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
Feedback = float
|
| 8 |
+
Choice = str
|
| 9 |
+
Value = Any
|
| 10 |
+
|
| 11 |
+
CHOICE_COL = "choice"
|
| 12 |
+
FEEDBACK_COL = "feedback"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class AHFeature:
|
| 16 |
+
"""
|
| 17 |
+
The context, that AutoHeuristic stores, is a list of features. AutoHeuristic needs to know whether a feature is
|
| 18 |
+
categorical (i.e., not a continuous variable) to learn a machine learning model.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, name: str, value: Value, is_categorical: bool = False) -> None:
|
| 22 |
+
self.name = name
|
| 23 |
+
self.value = value
|
| 24 |
+
self.is_categorical = is_categorical
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class AHOperation:
|
| 28 |
+
"""
|
| 29 |
+
AHOperation can be used to augment the data collected by AutoHeuristic.
|
| 30 |
+
One might for example store features like m, k, n, but also want to use
|
| 31 |
+
features like m*n, or k*n, to learn a heuristic. Instead of storing features
|
| 32 |
+
that can be created from the collected data, one can use AHOperation to
|
| 33 |
+
create new features from the collected data.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self, name: str, func: Callable[[Any], Value], is_categorical: bool = False
|
| 38 |
+
) -> None:
|
| 39 |
+
self.name = name
|
| 40 |
+
self.func = func
|
| 41 |
+
self.is_categorical = is_categorical
|
| 42 |
+
|
| 43 |
+
def apply_operation(self, data: Any) -> None:
|
| 44 |
+
data[self.name] = self.func(data)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class AHContext:
|
| 48 |
+
"""
|
| 49 |
+
This class is used to specify which information AutoHeuristic should store. For each choice, AutoHeursitic will
|
| 50 |
+
store the context and the collected feedback. The context could be something like the shape of a tensor, i.e.,
|
| 51 |
+
information that will help to learn a heuristic.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
features: List[AHFeature]
|
| 55 |
+
context_dict: Dict[str, Value]
|
| 56 |
+
|
| 57 |
+
def __init__(self) -> None:
|
| 58 |
+
self.features = []
|
| 59 |
+
self.context_dict = {}
|
| 60 |
+
|
| 61 |
+
def add_feature(
|
| 62 |
+
self, name: str, value: Value, is_categorical: bool = False
|
| 63 |
+
) -> None:
|
| 64 |
+
self.features.append(AHFeature(name, value, is_categorical=is_categorical))
|
| 65 |
+
self.context_dict[name] = value
|
| 66 |
+
|
| 67 |
+
def get_numerical_and_categorical_features(self) -> Tuple[List[str], List[str]]:
|
| 68 |
+
numerical_features = []
|
| 69 |
+
categorical_features = []
|
| 70 |
+
for feature in self.features:
|
| 71 |
+
if feature.is_categorical:
|
| 72 |
+
categorical_features.append(feature.name)
|
| 73 |
+
else:
|
| 74 |
+
numerical_features.append(feature.name)
|
| 75 |
+
|
| 76 |
+
return numerical_features, categorical_features
|
| 77 |
+
|
| 78 |
+
def get_feature_names_csv(self) -> str:
|
| 79 |
+
return ",".join(feature.name for feature in self.features)
|
| 80 |
+
|
| 81 |
+
def get_feature_values_csv(self) -> str:
|
| 82 |
+
return ",".join(str(feature.value) for feature in self.features)
|
| 83 |
+
|
| 84 |
+
def get_value(self, name: str) -> Value:
|
| 85 |
+
return self.context_dict[name]
|
| 86 |
+
|
| 87 |
+
def apply_operations(self, operations: List[AHOperation]) -> None:
|
| 88 |
+
for op in operations:
|
| 89 |
+
op.apply_operation(self.context_dict)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class AHMetadata:
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
shared_memory: Any,
|
| 96 |
+
device_capa: Tuple[int, int],
|
| 97 |
+
choices: List[Choice],
|
| 98 |
+
name: str,
|
| 99 |
+
) -> None:
|
| 100 |
+
# use amount of shared_memory and device_capability to identify GPU
|
| 101 |
+
# TODO(AlnisM): there might be a better way to do this
|
| 102 |
+
self.shared_memory = shared_memory
|
| 103 |
+
self.device_capa = device_capa
|
| 104 |
+
self.choices = choices
|
| 105 |
+
self.name = name
|
| 106 |
+
|
| 107 |
+
def to_dict(self) -> Dict[str, Value]:
|
| 108 |
+
return {
|
| 109 |
+
"shared_memory": self.shared_memory,
|
| 110 |
+
"device_capa": self.device_capa,
|
| 111 |
+
"name": self.name,
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_metadata_str_from_log(log_path: str) -> str:
|
| 116 |
+
with open(log_path, newline="") as file:
|
| 117 |
+
json_string = file.readline().strip()
|
| 118 |
+
return json_string
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def check_minsize(context: AHContext, minsize: int) -> bool:
|
| 122 |
+
return (
|
| 123 |
+
context.get_value("m") >= minsize
|
| 124 |
+
and context.get_value("k") >= minsize
|
| 125 |
+
and context.get_value("n") >= minsize
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def pad_mm_precondition(metadata: AHMetadata, context: AHContext) -> bool:
|
| 130 |
+
if metadata.shared_memory == 166912 and metadata.device_capa == (8, 0):
|
| 131 |
+
# A100 precondition
|
| 132 |
+
return check_minsize(context, 512)
|
| 133 |
+
elif metadata.shared_memory == 232448 and metadata.device_capa == (9, 0):
|
| 134 |
+
# H100 precondition
|
| 135 |
+
return check_minsize(context, 768)
|
| 136 |
+
return True
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def get_mixedmm_precondition(metadata: AHMetadata, context: AHContext) -> bool:
|
| 140 |
+
m = context.get_value("m")
|
| 141 |
+
k = context.get_value("k")
|
| 142 |
+
n = context.get_value("n")
|
| 143 |
+
if m > 128 or k < 1024 or n < 1024:
|
| 144 |
+
return False
|
| 145 |
+
mat1_iscontig = context.get_value("mat1_iscontig")
|
| 146 |
+
mat2_iscontig = context.get_value("mat2_iscontig")
|
| 147 |
+
return mat1_iscontig and not mat2_iscontig
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def get_mult_dims_ops() -> List[AHOperation]:
|
| 151 |
+
m_times_k_op = AHOperation("m*k", lambda data: data["m"] * data["k"])
|
| 152 |
+
m_times_n_op = AHOperation("m*n", lambda data: data["m"] * data["n"])
|
| 153 |
+
k_times_n_op = AHOperation("k*n", lambda data: data["k"] * data["n"])
|
| 154 |
+
return [m_times_k_op, m_times_n_op, k_times_n_op]
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def get_arith_intensity(data: Any) -> float:
|
| 158 |
+
m = data["m"]
|
| 159 |
+
k = data["k"]
|
| 160 |
+
n = data["n"]
|
| 161 |
+
if m == 0 or k == 0 or n == 0:
|
| 162 |
+
return 0.0
|
| 163 |
+
return m * k * n / (m * k + k * n + m * n)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def pad_mm_operations() -> List[AHOperation]:
|
| 167 |
+
mult_dims_ops = get_mult_dims_ops()
|
| 168 |
+
k_div_m_times_n_op = AHOperation(
|
| 169 |
+
"k/(m*n)", lambda data: data["k"] / (data["m"] * data["n"])
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
def bfloat_perf_hit(data: Any) -> bool:
|
| 173 |
+
m = data["m"]
|
| 174 |
+
k = data["k"]
|
| 175 |
+
n = data["n"]
|
| 176 |
+
is_bfloat = str(data["mat1_dtype"]) == "torch.bfloat16"
|
| 177 |
+
return k > (m * 1024) and k > (n * 1024) and is_bfloat
|
| 178 |
+
|
| 179 |
+
bfloat_perf_hit_op = AHOperation(
|
| 180 |
+
"bfloat_perf_hit", bfloat_perf_hit, is_categorical=True
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
arith_intensity_op = AHOperation("arith_intensity", get_arith_intensity)
|
| 184 |
+
dims_need_padding_ops = get_dims_need_padding_ops()
|
| 185 |
+
dims_multiple_ops = get_dims_multiple_ops()
|
| 186 |
+
is_contig_ops = get_is_contig_ops()
|
| 187 |
+
|
| 188 |
+
ah_operations = mult_dims_ops + [
|
| 189 |
+
k_div_m_times_n_op,
|
| 190 |
+
bfloat_perf_hit_op,
|
| 191 |
+
arith_intensity_op,
|
| 192 |
+
]
|
| 193 |
+
ah_operations.extend(dims_need_padding_ops)
|
| 194 |
+
ah_operations.extend(dims_multiple_ops)
|
| 195 |
+
ah_operations.extend(is_contig_ops)
|
| 196 |
+
return ah_operations
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def between_op(data: Any, dim: str, lower: int, upper: int) -> bool:
|
| 200 |
+
return data[dim] >= lower and data[dim] <= upper
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def between_ops() -> List[AHOperation]:
|
| 204 |
+
dims = ["m", "k", "n"]
|
| 205 |
+
limits = [(1, 16), (17, 32), (33, 64), (65, 128), (129, 256)]
|
| 206 |
+
ah_operations = []
|
| 207 |
+
for dim in dims:
|
| 208 |
+
for lower, upper in limits:
|
| 209 |
+
between_op_fn = functools.partial(
|
| 210 |
+
between_op, dim=dim, lower=lower, upper=upper
|
| 211 |
+
)
|
| 212 |
+
# using 'LEQ' instead of '<=' because '<=' cannot be exported to dot
|
| 213 |
+
between_op_name = f"{lower}LEQ{dim}LEQ{upper}"
|
| 214 |
+
ah_operations.append(
|
| 215 |
+
AHOperation(between_op_name, between_op_fn, is_categorical=True)
|
| 216 |
+
)
|
| 217 |
+
return ah_operations
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def pow2_op(data: Any, dim: str, exponent: int) -> bool:
|
| 221 |
+
return data[dim] == 2**exponent
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def mm_operations() -> List[AHOperation]:
|
| 225 |
+
mult_dims_ops = get_mult_dims_ops()
|
| 226 |
+
arith_intensity_op = AHOperation("arith_intensity", get_arith_intensity)
|
| 227 |
+
return mult_dims_ops + [arith_intensity_op]
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def mixed_mm_operations() -> List[AHOperation]:
|
| 231 |
+
return mm_operations() + between_ops()
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def is_multiple(data: Any, dim: str, mult: int) -> bool:
|
| 235 |
+
return data[dim] % mult == 0
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def get_dims_multiple_ops() -> List[AHOperation]:
|
| 239 |
+
multiples = [2, 4, 8, 16, 32]
|
| 240 |
+
dims = ["m", "k", "n"]
|
| 241 |
+
dims_multiple_ops = []
|
| 242 |
+
for dim in dims:
|
| 243 |
+
for mult in multiples:
|
| 244 |
+
is_multiple_fn = functools.partial(is_multiple, dim=dim, mult=mult)
|
| 245 |
+
dims_multiple_op = AHOperation(
|
| 246 |
+
f"{dim}_multiple_{mult}", is_multiple_fn, is_categorical=True
|
| 247 |
+
)
|
| 248 |
+
dims_multiple_ops.append(dims_multiple_op)
|
| 249 |
+
return dims_multiple_ops
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def get_dims_need_padding_ops() -> List[AHOperation]:
|
| 253 |
+
def mat1_innermost_needs_padding_fn(data: Any) -> bool:
|
| 254 |
+
mat1_stride_0 = data["mat1_stride_0"]
|
| 255 |
+
mat1_stride_1 = data["mat1_stride_1"]
|
| 256 |
+
m_padded_length = data["m_padded_length"]
|
| 257 |
+
k_padded_length = data["k_padded_length"]
|
| 258 |
+
mat1_innermost_needs_padding = False
|
| 259 |
+
if mat1_stride_0 == 1 and m_padded_length != 0:
|
| 260 |
+
mat1_innermost_needs_padding = True
|
| 261 |
+
if mat1_stride_1 == 1 and k_padded_length != 0:
|
| 262 |
+
mat1_innermost_needs_padding = True
|
| 263 |
+
return mat1_innermost_needs_padding
|
| 264 |
+
|
| 265 |
+
mat1_innermost_op = AHOperation(
|
| 266 |
+
"mat1_innermost_needs_padding",
|
| 267 |
+
mat1_innermost_needs_padding_fn,
|
| 268 |
+
is_categorical=True,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
def mat2_innermost_needs_padding_fn(data: Any) -> bool:
|
| 272 |
+
mat2_stride_0 = data["mat2_stride_0"]
|
| 273 |
+
mat2_stride_1 = data["mat2_stride_1"]
|
| 274 |
+
k_padded_length = data["k_padded_length"]
|
| 275 |
+
n_padded_length = data["n_padded_length"]
|
| 276 |
+
mat2_innermost_needs_padding = False
|
| 277 |
+
if mat2_stride_0 == 1 and k_padded_length != 0:
|
| 278 |
+
mat2_innermost_needs_padding = True
|
| 279 |
+
if mat2_stride_1 == 1 and n_padded_length != 0:
|
| 280 |
+
mat2_innermost_needs_padding = True
|
| 281 |
+
return mat2_innermost_needs_padding
|
| 282 |
+
|
| 283 |
+
mat2_innermost_op = AHOperation(
|
| 284 |
+
"mat2_innermost_needs_padding",
|
| 285 |
+
mat2_innermost_needs_padding_fn,
|
| 286 |
+
is_categorical=True,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
def num_dims_needs_padding_fn(data: Any) -> int:
|
| 290 |
+
m_padded_length = data["m_padded_length"]
|
| 291 |
+
k_padded_length = data["k_padded_length"]
|
| 292 |
+
n_padded_length = data["n_padded_length"]
|
| 293 |
+
num_dims_needs_padding = 0
|
| 294 |
+
if m_padded_length != 0:
|
| 295 |
+
num_dims_needs_padding += 1
|
| 296 |
+
if k_padded_length != 0:
|
| 297 |
+
num_dims_needs_padding += 1
|
| 298 |
+
if n_padded_length != 0:
|
| 299 |
+
num_dims_needs_padding += 1
|
| 300 |
+
return num_dims_needs_padding
|
| 301 |
+
|
| 302 |
+
num_dims_op = AHOperation("num_dims_needs_padding", num_dims_needs_padding_fn)
|
| 303 |
+
return [mat1_innermost_op, mat2_innermost_op, num_dims_op]
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def get_is_contig_ops() -> List[AHOperation]:
|
| 307 |
+
def mat1_is_contig_fn(data: Any) -> bool:
|
| 308 |
+
stride_0 = data["mat1_stride_0"]
|
| 309 |
+
stride_1 = data["mat1_stride_1"]
|
| 310 |
+
k = data["k"]
|
| 311 |
+
return stride_0 == k and stride_1 == 1
|
| 312 |
+
|
| 313 |
+
mat1_is_contig_op = AHOperation(
|
| 314 |
+
"mat1_iscontig", mat1_is_contig_fn, is_categorical=True
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
def mat2_is_contig_fn(data: Any) -> bool:
|
| 318 |
+
stride_0 = data["mat2_stride_0"]
|
| 319 |
+
stride_1 = data["mat2_stride_1"]
|
| 320 |
+
n = data["n"]
|
| 321 |
+
return stride_0 == n and stride_1 == 1
|
| 322 |
+
|
| 323 |
+
mat2_is_contig_op = AHOperation(
|
| 324 |
+
"mat2_iscontig", mat2_is_contig_fn, is_categorical=True
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
return [mat1_is_contig_op, mat2_is_contig_op]
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def context_add_strides(context: AHContext, name: str, stride: Tuple[int, ...]) -> None:
|
| 331 |
+
for i, s in enumerate(stride):
|
| 332 |
+
context.add_feature(f"{name}_stride_{i}", s)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def context_add_using_tf32(context: AHContext, dtype: torch.dtype) -> None:
|
| 336 |
+
using_tf32 = "not_float_32"
|
| 337 |
+
if dtype == torch.float32:
|
| 338 |
+
using_tf32 = torch.backends.cuda.matmul.allow_tf32
|
| 339 |
+
context.add_feature("using_tf32", using_tf32, is_categorical=True)
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/learned_heuristic_controller.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
import inspect
|
| 3 |
+
import pkgutil
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
|
| 7 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 8 |
+
AHContext,
|
| 9 |
+
AHMetadata,
|
| 10 |
+
Choice,
|
| 11 |
+
)
|
| 12 |
+
from torch._inductor.autoheuristic.learnedheuristic_interface import LearnedHeuristic
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def find_and_instantiate_subclasses(
|
| 16 |
+
package_name: str, base_class: Any
|
| 17 |
+
) -> List[LearnedHeuristic]:
|
| 18 |
+
instances = []
|
| 19 |
+
|
| 20 |
+
package = importlib.import_module(package_name)
|
| 21 |
+
for _, module_name, _ in pkgutil.walk_packages(
|
| 22 |
+
package.__path__, package.__name__ + "."
|
| 23 |
+
):
|
| 24 |
+
try:
|
| 25 |
+
module_basename = module_name.split(".")[-1]
|
| 26 |
+
if not module_basename.startswith("_"):
|
| 27 |
+
# learned heuristics start with an underscore
|
| 28 |
+
continue
|
| 29 |
+
module = importlib.import_module(module_name)
|
| 30 |
+
|
| 31 |
+
# look for classes that are subclasses of base_class
|
| 32 |
+
for name, obj in inspect.getmembers(module):
|
| 33 |
+
if (
|
| 34 |
+
inspect.isclass(obj)
|
| 35 |
+
and issubclass(obj, base_class)
|
| 36 |
+
and obj != base_class
|
| 37 |
+
):
|
| 38 |
+
instance = obj()
|
| 39 |
+
instances.append(instance)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Error processing module {module_name}: {e}")
|
| 42 |
+
|
| 43 |
+
return instances
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class LearnedHeuristicController:
|
| 47 |
+
"""
|
| 48 |
+
Class that finds and instantiates all learned heuristics. It also provides
|
| 49 |
+
a way to get the decision of a learned heuristic.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
existing_heuristics: Dict[str, List[LearnedHeuristic]] = defaultdict(list)
|
| 53 |
+
"""
|
| 54 |
+
A dictionary that stores all the learned heuristics for each optimization.
|
| 55 |
+
The key is the optimization name, and the value is a list of LearnedHeuristic objects.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
heuristics_initialized: bool = False
|
| 59 |
+
"""
|
| 60 |
+
A flag that indicates whether the learned heuristics have been initialized.
|
| 61 |
+
Set to true when the get_decision() function is called for the first time.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
metadata: AHMetadata,
|
| 67 |
+
context: AHContext,
|
| 68 |
+
) -> None:
|
| 69 |
+
self.metadata = metadata
|
| 70 |
+
self.context = context
|
| 71 |
+
|
| 72 |
+
def get_heuristics(self, name: str) -> List[LearnedHeuristic]:
|
| 73 |
+
"""
|
| 74 |
+
Returns a list of learned heuristics for the given optimization name.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
if not LearnedHeuristicController.heuristics_initialized:
|
| 78 |
+
# learned heuristics are generated into the following package
|
| 79 |
+
learned_heuristics_package = "torch._inductor.autoheuristic.artifacts"
|
| 80 |
+
|
| 81 |
+
# learned heuristics have to be of type LearnedHeuristic
|
| 82 |
+
base_class = LearnedHeuristic
|
| 83 |
+
found_heuristics = find_and_instantiate_subclasses(
|
| 84 |
+
learned_heuristics_package, base_class
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
for learned_heuristic in found_heuristics:
|
| 88 |
+
opt_name = learned_heuristic.get_name()
|
| 89 |
+
LearnedHeuristicController.existing_heuristics[opt_name].append(
|
| 90 |
+
learned_heuristic
|
| 91 |
+
)
|
| 92 |
+
LearnedHeuristicController.heuristics_initialized = True
|
| 93 |
+
|
| 94 |
+
return LearnedHeuristicController.existing_heuristics[name]
|
| 95 |
+
|
| 96 |
+
def get_decision(self) -> Optional[Choice]:
|
| 97 |
+
"""
|
| 98 |
+
Returns the decision made by the learned heuristic or None if no heuristic was found or the heuristic is unsure
|
| 99 |
+
which choice to make.
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
heuristics = self.get_heuristics(self.metadata.name)
|
| 103 |
+
for heuristic in heuristics:
|
| 104 |
+
if heuristic.check_precondition(self.metadata, self.context):
|
| 105 |
+
return heuristic.get_decision(self.context, self.metadata.choices)
|
| 106 |
+
return None
|
| 107 |
+
|
| 108 |
+
def get_decisions_ranked(self, top_k: int) -> Optional[List[Choice]]:
|
| 109 |
+
heuristics = self.get_heuristics(self.metadata.name)
|
| 110 |
+
for heuristic in heuristics:
|
| 111 |
+
if heuristic.check_precondition(self.metadata, self.context):
|
| 112 |
+
choices = heuristic.get_decisions_ranked(self.context)
|
| 113 |
+
if choices is None:
|
| 114 |
+
return None
|
| 115 |
+
avail_choices = [
|
| 116 |
+
choice for choice in choices if choice in self.metadata.choices
|
| 117 |
+
]
|
| 118 |
+
return avail_choices[:top_k]
|
| 119 |
+
return None
|
phi4/lib/python3.10/site-packages/torch/_inductor/autoheuristic/learnedheuristic_interface.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple
|
| 2 |
+
|
| 3 |
+
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
| 4 |
+
AHContext,
|
| 5 |
+
AHMetadata,
|
| 6 |
+
Choice,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class LearnedHeuristic:
|
| 11 |
+
"""
|
| 12 |
+
LearnedHeuristic is a base class for all learned heuristics.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
def __init__(self) -> None:
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
def check_precondition(
|
| 19 |
+
self,
|
| 20 |
+
metadata: AHMetadata,
|
| 21 |
+
context: AHContext,
|
| 22 |
+
) -> bool:
|
| 23 |
+
return True
|
| 24 |
+
|
| 25 |
+
def get_decision(
|
| 26 |
+
self, context: AHContext, choices: List[Choice]
|
| 27 |
+
) -> Optional[Choice]:
|
| 28 |
+
return None
|
| 29 |
+
|
| 30 |
+
def get_confidence_threshold(self) -> float:
|
| 31 |
+
return 1.0
|
| 32 |
+
|
| 33 |
+
def get_name(self) -> str:
|
| 34 |
+
return ""
|
| 35 |
+
|
| 36 |
+
def get_decisions_ranked(self, context: AHContext) -> Optional[List[str]]:
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class LearnedHeuristicRegression(LearnedHeuristic):
|
| 41 |
+
def __init__(self) -> None:
|
| 42 |
+
super().__init__()
|
| 43 |
+
|
| 44 |
+
def get_feedback(self, context: AHContext, choice: Choice) -> float:
|
| 45 |
+
return 1.0
|
| 46 |
+
|
| 47 |
+
def get_decision(
|
| 48 |
+
self, context: AHContext, choices: List[Choice]
|
| 49 |
+
) -> Optional[Choice]:
|
| 50 |
+
choice2feedback = {}
|
| 51 |
+
for choice in choices:
|
| 52 |
+
predicted_feedback = self.get_feedback(context, choice)
|
| 53 |
+
choice2feedback[choice] = predicted_feedback
|
| 54 |
+
sorted_choices_feedback = sorted(choice2feedback.items(), key=lambda t: t[1])
|
| 55 |
+
highest_feedback = sorted_choices_feedback[-1][1]
|
| 56 |
+
second_highest_feedback = sorted_choices_feedback[-2][1]
|
| 57 |
+
if highest_feedback / second_highest_feedback > self.get_confidence_threshold():
|
| 58 |
+
return sorted_choices_feedback[-1][0]
|
| 59 |
+
# We are not sure which choice is the best one
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class LearnedHeuristicDecision(LearnedHeuristic):
|
| 64 |
+
def __init__(self) -> None:
|
| 65 |
+
super().__init__()
|
| 66 |
+
|
| 67 |
+
def get_choice(self, idx: int) -> Optional[str]:
|
| 68 |
+
return None
|
| 69 |
+
|
| 70 |
+
def get_decision(
|
| 71 |
+
self, context: AHContext, choices: List[Choice]
|
| 72 |
+
) -> Optional[Choice]:
|
| 73 |
+
best_choices = self.get_best_choices(context)
|
| 74 |
+
if not best_choices:
|
| 75 |
+
return None
|
| 76 |
+
(best_choice_proba, best_choice_idx) = best_choices[0]
|
| 77 |
+
if best_choice_proba <= self.get_confidence_threshold():
|
| 78 |
+
return None
|
| 79 |
+
return self.get_choice(best_choice_idx)
|
| 80 |
+
|
| 81 |
+
def get_decisions_ranked(self, context: AHContext) -> Optional[List[str]]:
|
| 82 |
+
feedback_idx_list = self.get_best_choices(context)
|
| 83 |
+
if feedback_idx_list is None:
|
| 84 |
+
return None
|
| 85 |
+
choices = [
|
| 86 |
+
self.get_choice(feedback_idx[1]) for feedback_idx in feedback_idx_list
|
| 87 |
+
]
|
| 88 |
+
choices = [choice for choice in choices if choice is not None]
|
| 89 |
+
return choices
|
| 90 |
+
|
| 91 |
+
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
| 92 |
+
return []
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/aoti_hipify_utils.cpython-310.pyc
ADDED
|
Binary file (819 Bytes). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/block_analysis.cpython-310.pyc
ADDED
|
Binary file (4.37 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (73.8 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_bmm_template.cpython-310.pyc
ADDED
|
Binary file (8.49 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_flex_attention_template.cpython-310.pyc
ADDED
|
Binary file (39.4 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_gemm_template.cpython-310.pyc
ADDED
|
Binary file (41.3 kB). View file
|
|
|
phi4/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp_micro_gemm.cpython-310.pyc
ADDED
|
Binary file (26.8 kB). View file
|
|
|