Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h +246 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h +281 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h +291 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h +1732 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h +513 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h +1019 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_convert.h +262 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h +708 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h +1459 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_mask.h +393 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h +1409 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_native.h +21 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_native.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_ops.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true.h +30 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h +20 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h +28 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_cuda_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h +24 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_ops.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h +28 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/angle_ops.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_ops.h +61 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_compositeexplicitautograd_dispatch.h +24 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_meta_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h +22 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h +28 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft.h +91 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h +26 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h +26 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/gt_cuda_dispatch.h +30 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h +38 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta.h +39 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h +27 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h +25 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_norm_ops.h +61 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h +24 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_ops.h +105 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h +24 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h +26 -0
- videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/moveaxis_ops.h +39 -0
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 6 |
+
|
| 7 |
+
// Note: header order is important here
|
| 8 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h>
|
| 11 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h>
|
| 12 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h>
|
| 13 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h>
|
| 14 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h>
|
| 15 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h>
|
| 16 |
+
|
| 17 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h>
|
| 18 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h>
|
| 19 |
+
|
| 20 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
namespace vec {
|
| 24 |
+
|
| 25 |
+
inline namespace CPU_CAPABILITY {
|
| 26 |
+
|
| 27 |
+
DEFINE_CLAMP_FUNCS(c10::quint8)
|
| 28 |
+
DEFINE_CLAMP_FUNCS(c10::qint8)
|
| 29 |
+
DEFINE_CLAMP_FUNCS(c10::qint32)
|
| 30 |
+
DEFINE_CLAMP_FUNCS(int16_t)
|
| 31 |
+
DEFINE_CLAMP_FUNCS(int32_t)
|
| 32 |
+
DEFINE_CLAMP_FUNCS(int64_t)
|
| 33 |
+
DEFINE_CLAMP_FUNCS(float)
|
| 34 |
+
DEFINE_CLAMP_FUNCS(double)
|
| 35 |
+
|
| 36 |
+
template <>
|
| 37 |
+
Vectorized<double> C10_ALWAYS_INLINE fmadd(
|
| 38 |
+
const Vectorized<double>& a,
|
| 39 |
+
const Vectorized<double>& b,
|
| 40 |
+
const Vectorized<double>& c) {
|
| 41 |
+
return Vectorized<double>{
|
| 42 |
+
vec_madd(a.vec0(), b.vec0(), c.vec0()),
|
| 43 |
+
vec_madd(a.vec1(), b.vec1(), c.vec1())};
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
template <>
|
| 47 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
|
| 48 |
+
const Vectorized<int64_t>& a,
|
| 49 |
+
const Vectorized<int64_t>& b,
|
| 50 |
+
const Vectorized<int64_t>& c) {
|
| 51 |
+
return Vectorized<int64_t>{
|
| 52 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 53 |
+
}
|
| 54 |
+
template <>
|
| 55 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
|
| 56 |
+
const Vectorized<int32_t>& a,
|
| 57 |
+
const Vectorized<int32_t>& b,
|
| 58 |
+
const Vectorized<int32_t>& c) {
|
| 59 |
+
return Vectorized<int32_t>{
|
| 60 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 61 |
+
}
|
| 62 |
+
template <>
|
| 63 |
+
Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
|
| 64 |
+
const Vectorized<int16_t>& a,
|
| 65 |
+
const Vectorized<int16_t>& b,
|
| 66 |
+
const Vectorized<int16_t>& c) {
|
| 67 |
+
return Vectorized<int16_t>{
|
| 68 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
|
| 72 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
|
| 73 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
|
| 74 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
|
| 75 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
|
| 76 |
+
|
| 77 |
+
template <>
|
| 78 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE
|
| 79 |
+
convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
|
| 80 |
+
return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
template <>
|
| 84 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE
|
| 85 |
+
convert_to_int_of_same_size<float>(
|
| 86 |
+
const Vectorized<float>& src) {
|
| 87 |
+
return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
template <>
|
| 91 |
+
inline void convert(const int32_t* src, float* dst, int64_t n) {
|
| 92 |
+
// int32_t and float have same size
|
| 93 |
+
int64_t i;
|
| 94 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 95 |
+
const int32_t* src_a = src + i;
|
| 96 |
+
float* dst_a = dst + i;
|
| 97 |
+
vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a));
|
| 98 |
+
vint32 input_vec1 =
|
| 99 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a));
|
| 100 |
+
vfloat32 c0 = vec_float(input_vec0);
|
| 101 |
+
vfloat32 c1 = vec_float(input_vec1);
|
| 102 |
+
vec_vsx_st(c0, offset0, dst_a);
|
| 103 |
+
vec_vsx_st(c1, offset16, dst_a);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
for (; i < n; i++) {
|
| 107 |
+
dst[i] = static_cast<float>(src[i]);
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template <>
|
| 112 |
+
inline void convert(const int64_t* src, double* dst, int64_t n) {
|
| 113 |
+
int64_t i;
|
| 114 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
| 115 |
+
const int64_t* src_a = src + i;
|
| 116 |
+
double* dst_a = dst + i;
|
| 117 |
+
vint64 input_vec0 =
|
| 118 |
+
vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a));
|
| 119 |
+
vint64 input_vec1 =
|
| 120 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a));
|
| 121 |
+
vfloat64 c0 = vec_double(input_vec0);
|
| 122 |
+
vfloat64 c1 = vec_double(input_vec1);
|
| 123 |
+
vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a));
|
| 124 |
+
vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a));
|
| 125 |
+
}
|
| 126 |
+
for (; i < n; i++) {
|
| 127 |
+
dst[i] = static_cast<double>(src[i]);
|
| 128 |
+
}
|
| 129 |
+
}
|
| 130 |
+
//Generic implementation to fix compiler error
|
| 131 |
+
//TO-DO : Add optimized version for ppc64
|
| 132 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(
|
| 133 |
+
const Vectorized<Half>& a) {
|
| 134 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
| 135 |
+
__at_align__ float arr[K];
|
| 136 |
+
__at_align__ Half arr2[K];
|
| 137 |
+
a.store(arr2);
|
| 138 |
+
convert(arr2, arr, K);
|
| 139 |
+
return std::make_tuple(
|
| 140 |
+
Vectorized<float>::loadu(arr),
|
| 141 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size()));
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
inline Vectorized<Half> convert_float_half(
|
| 145 |
+
const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 146 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
| 147 |
+
__at_align__ float arr[K];
|
| 148 |
+
__at_align__ Half arr2[K];
|
| 149 |
+
a.store(arr);
|
| 150 |
+
b.store(arr + Vectorized<float>::size());
|
| 151 |
+
convert(arr, arr2, K);
|
| 152 |
+
return Vectorized<Half>::loadu(arr2);
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
template <>
|
| 156 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
|
| 157 |
+
const Vectorized<double>& a,
|
| 158 |
+
const Vectorized<double>& b) {
|
| 159 |
+
// inputs:
|
| 160 |
+
// a = {a0, a1, a2, a3}
|
| 161 |
+
// b = {b0, b1, b2, b3}
|
| 162 |
+
|
| 163 |
+
vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0);
|
| 164 |
+
vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3);
|
| 165 |
+
vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0);
|
| 166 |
+
vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3);
|
| 167 |
+
// return {a0, b0, a1, b1}
|
| 168 |
+
// {a2, b2, a3, b3}
|
| 169 |
+
return std::make_pair(
|
| 170 |
+
Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11});
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
template <>
|
| 174 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
|
| 175 |
+
const Vectorized<double>& a,
|
| 176 |
+
const Vectorized<double>& b) {
|
| 177 |
+
// inputs:
|
| 178 |
+
// a = {a0, b0, a1, b1}
|
| 179 |
+
// b = {a2, b2, a3, b3}
|
| 180 |
+
vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0);
|
| 181 |
+
vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0);
|
| 182 |
+
|
| 183 |
+
vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3);
|
| 184 |
+
vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3);
|
| 185 |
+
|
| 186 |
+
// swap lanes:
|
| 187 |
+
// return {a0, a1, a2, a3}
|
| 188 |
+
// {b0, b1, b2, b3}
|
| 189 |
+
return std::make_pair(
|
| 190 |
+
Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23});
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <>
|
| 194 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
|
| 195 |
+
const Vectorized<float>& a,
|
| 196 |
+
const Vectorized<float>& b) {
|
| 197 |
+
// inputs:
|
| 198 |
+
// a = {a0, a1, a2, a3,, a4, a5, a6, a7}
|
| 199 |
+
// b = {b0, b1, b2, b3,, b4, b5, b6, b7}
|
| 200 |
+
|
| 201 |
+
vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0());
|
| 202 |
+
vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0());
|
| 203 |
+
|
| 204 |
+
vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
|
| 205 |
+
vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1());
|
| 206 |
+
// group cols crossing lanes:
|
| 207 |
+
// return {a0, b0, a1, b1,, a2, b2, a3, b3}
|
| 208 |
+
// {a4, b4, a5, b5,, a6, b6, a7, b7}
|
| 209 |
+
|
| 210 |
+
return std::make_pair(
|
| 211 |
+
Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233});
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
template <>
|
| 215 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
|
| 216 |
+
const Vectorized<float>& a,
|
| 217 |
+
const Vectorized<float>& b) {
|
| 218 |
+
// inputs:
|
| 219 |
+
// a = {a0, b0, a1, b1,, a2, b2, a3, b3}
|
| 220 |
+
// b = {a4, b4, a5, b5,, a6, b6, a7, b7}
|
| 221 |
+
|
| 222 |
+
// {a0,a2,b0,b2} {a1,a3,b1,b3}
|
| 223 |
+
vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
|
| 224 |
+
vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
|
| 225 |
+
|
| 226 |
+
vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
|
| 227 |
+
vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
|
| 228 |
+
|
| 229 |
+
vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
|
| 230 |
+
vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
|
| 231 |
+
|
| 232 |
+
vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
|
| 233 |
+
vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
|
| 234 |
+
|
| 235 |
+
// it could be done with vec_perm ,too
|
| 236 |
+
// swap lanes:
|
| 237 |
+
// return {a0, a1, a2, a3,, a4, a5, a6, a7}
|
| 238 |
+
// {b0, b1, b2, b3,, b4, b5, b6, b7}
|
| 239 |
+
|
| 240 |
+
return std::make_pair(
|
| 241 |
+
Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2});
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
} // namespace
|
| 245 |
+
} // namespace vec
|
| 246 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 6 |
+
#include <c10/util/qint32.h>
|
| 7 |
+
#include <array>
|
| 8 |
+
|
| 9 |
+
// This file defines Vectorized<> for the quantized types.
|
| 10 |
+
//
|
| 11 |
+
//
|
| 12 |
+
// Currently, we simply use these classes as efficient converters between
|
| 13 |
+
// the quantized types and Vectorized<float>, usually in bandwidth-bound cases
|
| 14 |
+
// where doing the arithmetic in full-precision is acceptable (e.g.
|
| 15 |
+
// elementwise operators).
|
| 16 |
+
//
|
| 17 |
+
//
|
| 18 |
+
// Conversions are as follows:
|
| 19 |
+
// Vectorized<qint32> -> 1x Vectorized<float>
|
| 20 |
+
//
|
| 21 |
+
// The size of the returned float vector is specified by the special
|
| 22 |
+
// constexpr function float_num_vecs. The type of the value returned
|
| 23 |
+
// from dequantize (and expected as an argument to quantize) is
|
| 24 |
+
// specified by float_vec_return_type.
|
| 25 |
+
//
|
| 26 |
+
// When writing kernels with these vectors, it is expected that floating-
|
| 27 |
+
// point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
|
| 28 |
+
// iterations.
|
| 29 |
+
|
| 30 |
+
namespace at {
|
| 31 |
+
namespace vec {
|
| 32 |
+
inline namespace CPU_CAPABILITY {
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
struct Vectorized<c10::qint32> {
|
| 36 |
+
private:
|
| 37 |
+
union {
|
| 38 |
+
struct {
|
| 39 |
+
vint32 _vec0;
|
| 40 |
+
vint32 _vec1;
|
| 41 |
+
};
|
| 42 |
+
struct {
|
| 43 |
+
vbool32 _vecb0;
|
| 44 |
+
vbool32 _vecb1;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
} __attribute__((__may_alias__));
|
| 48 |
+
|
| 49 |
+
public:
|
| 50 |
+
Vectorized() {}
|
| 51 |
+
|
| 52 |
+
using size_type = int;
|
| 53 |
+
static constexpr size_type size() {
|
| 54 |
+
return 8;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
static constexpr size_t float_num_vecs() {
|
| 58 |
+
return 1;
|
| 59 |
+
}
|
| 60 |
+
static constexpr int int_num_vecs() {
|
| 61 |
+
return 1;
|
| 62 |
+
}
|
| 63 |
+
using float_vec_return_type = std::array<Vectorized<float>, 1>;
|
| 64 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
|
| 65 |
+
using value_type = c10::qint32::underlying;
|
| 66 |
+
using vec_internal_type = vint32;
|
| 67 |
+
using vec_internal_mask_type = vbool32;
|
| 68 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
|
| 69 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
| 70 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
|
| 71 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
| 72 |
+
|
| 73 |
+
Vectorized(const c10::qint32& val)
|
| 74 |
+
: _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
|
| 75 |
+
|
| 76 |
+
static Vectorized<c10::qint32> C10_ALWAYS_INLINE
|
| 77 |
+
loadu(const void* ptr, int count = size()) {
|
| 78 |
+
if (count == size()) {
|
| 79 |
+
return {
|
| 80 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
| 81 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
__at_align__ value_type tmp_values[size()] = {};
|
| 85 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
| 86 |
+
|
| 87 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
| 88 |
+
}
|
| 89 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
| 90 |
+
if (count == size()) {
|
| 91 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
| 92 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
| 93 |
+
} else if (count > 0) {
|
| 94 |
+
__at_align__ value_type tmp_values[size()];
|
| 95 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
| 96 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
| 97 |
+
std::memcpy(
|
| 98 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
| 103 |
+
return _vec0;
|
| 104 |
+
}
|
| 105 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
| 106 |
+
return _vec1;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
float_vec_return_type dequantize(
|
| 110 |
+
Vectorized<float> scale,
|
| 111 |
+
Vectorized<float> zero_point,
|
| 112 |
+
Vectorized<float> scale_zp_premul) const {
|
| 113 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
| 114 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
| 115 |
+
vfloat32 scale_vec0 = scale.vec0();
|
| 116 |
+
vfloat32 scale_vec1 = scale.vec1();
|
| 117 |
+
vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
|
| 118 |
+
vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
|
| 119 |
+
return {Vectorized<float>{
|
| 120 |
+
vec_madd(scale_vec0, float_vals0, scale_zp_premul0),
|
| 121 |
+
vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}};
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
float_vec_return_type dequantize(
|
| 125 |
+
Vectorized<float> scale,
|
| 126 |
+
Vectorized<float> zero_point) const {
|
| 127 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
| 128 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
| 129 |
+
vfloat32 scale_vec0 = scale.vec0();
|
| 130 |
+
vfloat32 scale_vec1 = scale.vec1();
|
| 131 |
+
vfloat32 zero_point0 = zero_point.vec0();
|
| 132 |
+
vfloat32 zero_point1 = zero_point.vec1();
|
| 133 |
+
return {Vectorized<float>{
|
| 134 |
+
(float_vals0 - zero_point0) * scale_vec0,
|
| 135 |
+
(float_vals1 - zero_point1) * scale_vec1}};
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
static Vectorized<c10::qint32> quantize(
|
| 139 |
+
const float_vec_return_type& rhs,
|
| 140 |
+
float scale,
|
| 141 |
+
int32_t zero_point,
|
| 142 |
+
float inverse_scale) {
|
| 143 |
+
Vectorized<c10::qint32> retval;
|
| 144 |
+
|
| 145 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
| 146 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
| 147 |
+
vfloat32 inverse_scale_v = vec_splats(inverse_scale);
|
| 148 |
+
vfloat32 vec_zero_point = vec_splats((float)(zero_point));
|
| 149 |
+
Vectorized<float> vf0 = rhs[0];
|
| 150 |
+
|
| 151 |
+
vfloat32 vecf0 = vf0.vec0();
|
| 152 |
+
vfloat32 vecf1 = vf0.vec1();
|
| 153 |
+
vecf0 = vec_mul(vecf0, inverse_scale_v);
|
| 154 |
+
vecf1 = vec_mul(vecf1, inverse_scale_v);
|
| 155 |
+
vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
|
| 156 |
+
vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
|
| 157 |
+
vint32 veci0 = vec_signed(vecf0);
|
| 158 |
+
vint32 veci1 = vec_signed(vecf1);
|
| 159 |
+
|
| 160 |
+
veci0 = vec_max(veci0, vmin);
|
| 161 |
+
veci1 = vec_max(veci1, vmin);
|
| 162 |
+
veci0 = vec_min(veci0, vmax);
|
| 163 |
+
veci1 = vec_min(veci1, vmax);
|
| 164 |
+
|
| 165 |
+
return {veci0, veci1};
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 169 |
+
return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
Vectorized<c10::qint32> relu6(
|
| 173 |
+
Vectorized<c10::qint32> zero_point,
|
| 174 |
+
Vectorized<c10::qint32> q_six) const {
|
| 175 |
+
vint32 max0 = vec_max(_vec0, zero_point._vec0);
|
| 176 |
+
vint32 max1 = vec_max(_vec1, zero_point._vec1);
|
| 177 |
+
return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 181 |
+
return {*this - b};
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 185 |
+
const int_vec_return_type& inp,
|
| 186 |
+
float multiplier,
|
| 187 |
+
int32_t zero_point) {
|
| 188 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
| 189 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
| 190 |
+
vfloat32 vec_mult = vec_splats(multiplier);
|
| 191 |
+
vint32 vec_zero_point = vec_splats(zero_point);
|
| 192 |
+
Vectorized<c10::qint32> vi = inp[0];
|
| 193 |
+
vfloat32 vecf0 = vec_float(vi.vec0());
|
| 194 |
+
vfloat32 vecf1 = vec_float(vi.vec1());
|
| 195 |
+
|
| 196 |
+
vecf0 = vec_mul(vecf0, vec_mult);
|
| 197 |
+
vecf1 = vec_mul(vecf1, vec_mult);
|
| 198 |
+
|
| 199 |
+
vecf0 = vec_rint(vecf0);
|
| 200 |
+
vecf1 = vec_rint(vecf1);
|
| 201 |
+
|
| 202 |
+
vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point);
|
| 203 |
+
vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point);
|
| 204 |
+
|
| 205 |
+
veci0 = vec_max(veci0, vmin);
|
| 206 |
+
veci1 = vec_max(veci1, vmin);
|
| 207 |
+
veci0 = vec_min(veci0, vmax);
|
| 208 |
+
veci1 = vec_min(veci1, vmax);
|
| 209 |
+
|
| 210 |
+
return {veci0, veci1};
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq)
|
| 214 |
+
DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne)
|
| 215 |
+
DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt)
|
| 216 |
+
DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple)
|
| 217 |
+
DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt)
|
| 218 |
+
DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge)
|
| 219 |
+
DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add)
|
| 220 |
+
DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub)
|
| 221 |
+
DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul)
|
| 222 |
+
DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /)
|
| 223 |
+
DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max)
|
| 224 |
+
DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min)
|
| 225 |
+
DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and)
|
| 226 |
+
DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or)
|
| 227 |
+
DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor)
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
template <>
|
| 231 |
+
Vectorized<c10::qint32> inline maximum(
|
| 232 |
+
const Vectorized<c10::qint32>& a,
|
| 233 |
+
const Vectorized<c10::qint32>& b) {
|
| 234 |
+
return a.maximum(b);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
template <>
|
| 238 |
+
Vectorized<c10::qint32> inline minimum(
|
| 239 |
+
const Vectorized<c10::qint32>& a,
|
| 240 |
+
const Vectorized<c10::qint32>& b) {
|
| 241 |
+
return a.minimum(b);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template <>
|
| 245 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator+(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 246 |
+
return Vectorized<c10::qint32>{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())};
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
template <>
|
| 250 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator-(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 251 |
+
return Vectorized<c10::qint32>{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())};
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <>
|
| 255 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator*(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 256 |
+
return Vectorized<c10::qint32>{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())};
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <>
|
| 260 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator/(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 261 |
+
return Vectorized<c10::qint32>{a.vec0()/b.vec0(), a.vec1()/b.vec1()};
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
template <>
|
| 265 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator&(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 266 |
+
return Vectorized<c10::qint32>{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())};
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
template <>
|
| 270 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator|(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 271 |
+
return Vectorized<c10::qint32>{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())};
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
template <>
|
| 275 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator^(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 276 |
+
return Vectorized<c10::qint32>{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())};
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
} // namespace
|
| 280 |
+
} // namespace vec
|
| 281 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
|
| 8 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec512/vec512_float.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec512/vec512_bfloat16.h>
|
| 11 |
+
#include <ATen/cpu/vec/vec512/vec512_double.h>
|
| 12 |
+
#include <ATen/cpu/vec/vec512/vec512_int.h>
|
| 13 |
+
#include <ATen/cpu/vec/vec512/vec512_qint.h>
|
| 14 |
+
#include <ATen/cpu/vec/vec512/vec512_complex_float.h>
|
| 15 |
+
#include <ATen/cpu/vec/vec512/vec512_complex_double.h>
|
| 16 |
+
#include <ATen/cpu/vec/vec512/vec512_convert.h>
|
| 17 |
+
#include <ATen/cpu/vec/vec512/vec512_mask.h>
|
| 18 |
+
|
| 19 |
+
#include <algorithm>
|
| 20 |
+
#include <cstddef>
|
| 21 |
+
#include <cstdint>
|
| 22 |
+
#include <cstring>
|
| 23 |
+
#include <ostream>
|
| 24 |
+
|
| 25 |
+
namespace at {
|
| 26 |
+
namespace vec {
|
| 27 |
+
|
| 28 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 29 |
+
inline namespace CPU_CAPABILITY {
|
| 30 |
+
|
| 31 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
|
| 32 |
+
stream << val.val_;
|
| 33 |
+
return stream;
|
| 34 |
+
}
|
| 35 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
|
| 36 |
+
stream << static_cast<int>(val.val_);
|
| 37 |
+
return stream;
|
| 38 |
+
}
|
| 39 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
|
| 40 |
+
stream << static_cast<unsigned int>(val.val_);
|
| 41 |
+
return stream;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template <typename T>
|
| 45 |
+
std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
|
| 46 |
+
T buf[Vectorized<T>::size()];
|
| 47 |
+
vec.store(buf);
|
| 48 |
+
stream << "vec[";
|
| 49 |
+
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
| 50 |
+
if (i != 0) {
|
| 51 |
+
stream << ", ";
|
| 52 |
+
}
|
| 53 |
+
stream << buf[i];
|
| 54 |
+
}
|
| 55 |
+
stream << "]";
|
| 56 |
+
return stream;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 61 |
+
|
| 62 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 63 |
+
|
| 64 |
+
template<>
|
| 65 |
+
inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
|
| 66 |
+
return _mm512_castpd_ps(src);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
template<>
|
| 70 |
+
inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
|
| 71 |
+
return _mm512_castps_pd(src);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template<>
|
| 75 |
+
inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
|
| 76 |
+
return _mm512_castsi512_ps(src);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
template<>
|
| 80 |
+
inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
|
| 81 |
+
return _mm512_castsi512_pd(src);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 85 |
+
#ifndef _MSC_VER
|
| 86 |
+
// MSVC is not working well on complex function overload.
|
| 87 |
+
template<int64_t scale = 1>
|
| 88 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
|
| 89 |
+
inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
|
| 90 |
+
return _mm512_i64gather_pd(vindex, base_addr, scale);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template<int64_t scale = 1>
|
| 94 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
|
| 95 |
+
inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
|
| 96 |
+
return _mm512_i32gather_ps(vindex, base_addr, scale);
|
| 97 |
+
}
|
| 98 |
+
#endif
|
| 99 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 100 |
+
#ifndef _MSC_VER
|
| 101 |
+
// MSVC is not working well on complex function overload.
|
| 102 |
+
template<int64_t scale = 1>
|
| 103 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
|
| 104 |
+
inline mask_gather(const Vectorized<double>& src, const double* base_addr,
|
| 105 |
+
const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
|
| 106 |
+
auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF));
|
| 107 |
+
auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ);
|
| 108 |
+
return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale);
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template<int64_t scale = 1>
|
| 112 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
|
| 113 |
+
inline mask_gather(const Vectorized<float>& src, const float* base_addr,
|
| 114 |
+
const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
|
| 115 |
+
auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF));
|
| 116 |
+
auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ);
|
| 117 |
+
return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale);
|
| 118 |
+
}
|
| 119 |
+
#endif
|
| 120 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 121 |
+
|
| 122 |
+
template<>
|
| 123 |
+
Vectorized<int64_t>
|
| 124 |
+
inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
|
| 125 |
+
return _mm512_cvtpd_epi64(src);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template<>
|
| 129 |
+
Vectorized<int32_t>
|
| 130 |
+
inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
|
| 131 |
+
return _mm512_cvttps_epi32(src);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
template<>
|
| 135 |
+
Vectorized<double>
|
| 136 |
+
inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
|
| 137 |
+
return _mm512_cvtepi64_pd(src);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template<>
|
| 141 |
+
Vectorized<float>
|
| 142 |
+
inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
|
| 143 |
+
return _mm512_cvtepi32_ps(src);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 147 |
+
|
| 148 |
+
template <>
|
| 149 |
+
std::pair<Vectorized<double>, Vectorized<double>>
|
| 150 |
+
inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 151 |
+
// inputs:
|
| 152 |
+
// a = {a0, a1, a3, a3, a4, a5, a6, a7}
|
| 153 |
+
// b = {b0, b1, b2, b3, b4, b5, b6, b7}
|
| 154 |
+
// group cols crossing lanes:
|
| 155 |
+
// return {a0, b0, a1, b1, a2, b2, a3, b3}
|
| 156 |
+
// {a4, b4, a5, b5, a6, b6, a7, b7}
|
| 157 |
+
__m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0);
|
| 158 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4);
|
| 159 |
+
return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 160 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
template <>
|
| 164 |
+
std::pair<Vectorized<float>, Vectorized<float>>
|
| 165 |
+
inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 166 |
+
// inputs:
|
| 167 |
+
// a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
| 168 |
+
// b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
|
| 169 |
+
//
|
| 170 |
+
// return:
|
| 171 |
+
// {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
|
| 172 |
+
// {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
|
| 173 |
+
__m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4,
|
| 174 |
+
19, 3, 18, 2, 17, 1, 16, 0);
|
| 175 |
+
__m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12,
|
| 176 |
+
27, 11, 26, 10, 25, 9, 24, 8);
|
| 177 |
+
return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 178 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 182 |
+
|
| 183 |
+
template <>
|
| 184 |
+
std::pair<Vectorized<double>, Vectorized<double>>
|
| 185 |
+
inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 186 |
+
// inputs:
|
| 187 |
+
// a = {a0, b0, a1, b1, a2, b2, a3, b3}
|
| 188 |
+
// b = {a4, b4, a5, b5, a6, b6, a7, b7}
|
| 189 |
+
// output:
|
| 190 |
+
// return {a0, a1, a2, a3, a4, a5, a6, a7}
|
| 191 |
+
// {b0, b1, b2, b3, b4, b5, b6, b7}
|
| 192 |
+
// The members of indices have been written in binary format for better understandability
|
| 193 |
+
__m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0);
|
| 194 |
+
__m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1);
|
| 195 |
+
|
| 196 |
+
return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 197 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <>
|
| 201 |
+
std::pair<Vectorized<float>, Vectorized<float>>
|
| 202 |
+
inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 203 |
+
// inputs:
|
| 204 |
+
// a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
|
| 205 |
+
// b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
|
| 206 |
+
// output:
|
| 207 |
+
// return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
| 208 |
+
// {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
|
| 209 |
+
__m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16,
|
| 210 |
+
14, 12, 10, 8, 6, 4, 2, 0);
|
| 211 |
+
__m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17,
|
| 212 |
+
15, 13, 11, 9, 7, 5, 3, 1);
|
| 213 |
+
|
| 214 |
+
return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 215 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 219 |
+
|
| 220 |
+
template<>
|
| 221 |
+
inline Vectorized<float> flip(const Vectorized<float> & v) {
|
| 222 |
+
const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
|
| 223 |
+
8, 9, 10, 11, 12, 13, 14, 15);
|
| 224 |
+
return _mm512_permutexvar_ps(mask, v);
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
template<>
|
| 228 |
+
inline Vectorized<double> flip(const Vectorized<double> & v) {
|
| 229 |
+
const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
|
| 230 |
+
return _mm512_permutexvar_pd(mask, v);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
template<>
|
| 234 |
+
inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
|
| 235 |
+
const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
|
| 236 |
+
return _mm512_permutexvar_epi64(mask, v);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
template<>
|
| 240 |
+
inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
|
| 241 |
+
const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
|
| 242 |
+
8, 9, 10, 11, 12, 13, 14, 15);
|
| 243 |
+
return _mm512_permutexvar_epi32(mask, v);
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
template<>
|
| 247 |
+
inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
|
| 248 |
+
const __m512i mask = _mm512_set_epi16(
|
| 249 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 250 |
+
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
|
| 251 |
+
);
|
| 252 |
+
return _mm512_permutexvar_epi16(mask, v);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
inline __m512i flip8(const __m512i & v) {
|
| 256 |
+
const __m512i mask1 = _mm512_set_epi8(
|
| 257 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 258 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 259 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 260 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
| 261 |
+
);
|
| 262 |
+
const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6);
|
| 263 |
+
auto reversed_vec = _mm512_shuffle_epi8(v, mask1);
|
| 264 |
+
return _mm512_permutexvar_epi64(mask2, reversed_vec);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
template<>
|
| 268 |
+
inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
|
| 269 |
+
return flip8(v);
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template<>
|
| 273 |
+
inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
|
| 274 |
+
return flip8(v);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
inline Vectorized<bool> operator&&(
|
| 278 |
+
const Vectorized<bool>& self,
|
| 279 |
+
const Vectorized<bool>& other) {
|
| 280 |
+
const __m512i* self_ = reinterpret_cast<const __m512i*>(self.as_bytes());
|
| 281 |
+
const __m512i* other_ = reinterpret_cast<const __m512i*>(other.as_bytes());
|
| 282 |
+
__m512i out = _mm512_and_si512(*self_, *other_);
|
| 283 |
+
Vectorized<bool> ret;
|
| 284 |
+
// We do not have a constructer that takes __m512i, so we need to memcpy
|
| 285 |
+
std::memcpy(ret, &out, ret.size() * sizeof(bool));
|
| 286 |
+
return ret;
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 290 |
+
|
| 291 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h
ADDED
|
@@ -0,0 +1,1732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
#ifndef SLEEF_CONST
|
| 23 |
+
#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
|
| 24 |
+
#define SLEEF_CONST const
|
| 25 |
+
#else
|
| 26 |
+
#define SLEEF_CONST
|
| 27 |
+
#endif
|
| 28 |
+
#define SLEEF_CONST_OLD SLEEF_CONST
|
| 29 |
+
#else
|
| 30 |
+
#define SLEEF_CONST_OLD
|
| 31 |
+
#endif
|
| 32 |
+
|
| 33 |
+
// bfloat16 conversion
|
| 34 |
+
static inline void cvtbf16_fp32(const __m256i& a, __m512& o) {
|
| 35 |
+
o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 39 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 40 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 41 |
+
cvtbf16_fp32(lo, o1);
|
| 42 |
+
cvtbf16_fp32(hi, o2);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
static inline __m256i cvtfp32_bf16(const __m512& src) {
|
| 46 |
+
__m512i value = _mm512_castps_si512(src);
|
| 47 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 48 |
+
auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
|
| 49 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 50 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 51 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 52 |
+
auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
|
| 53 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 54 |
+
t_value = _mm512_add_epi32(t_value, vec_bias);
|
| 55 |
+
// input += rounding_bias;
|
| 56 |
+
t_value = _mm512_add_epi32(t_value, value);
|
| 57 |
+
// input = input >> 16;
|
| 58 |
+
t_value = _mm512_srli_epi32(t_value, 16);
|
| 59 |
+
// Check NaN before converting back to bf16
|
| 60 |
+
t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
|
| 61 |
+
return _mm512_cvtusepi32_epi16(t_value);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) {
|
| 65 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 66 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 67 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 68 |
+
auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
|
| 69 |
+
auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q);
|
| 70 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 71 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 72 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 73 |
+
auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones);
|
| 74 |
+
auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones);
|
| 75 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 76 |
+
t_lo = _mm512_add_epi32(t_lo, vec_bias);
|
| 77 |
+
t_hi = _mm512_add_epi32(t_hi, vec_bias);
|
| 78 |
+
// input += rounding_bias;
|
| 79 |
+
t_lo = _mm512_add_epi32(t_lo, lo);
|
| 80 |
+
t_hi = _mm512_add_epi32(t_hi, hi);
|
| 81 |
+
// input = input >> 16;
|
| 82 |
+
t_lo = _mm512_srli_epi32(t_lo, 16);
|
| 83 |
+
t_hi = _mm512_srli_epi32(t_hi, 16);
|
| 84 |
+
// Check NaN before converting back to bf16
|
| 85 |
+
t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo);
|
| 86 |
+
t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi);
|
| 87 |
+
|
| 88 |
+
t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
|
| 89 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 90 |
+
return _mm512_permutexvar_epi64(idx, t_lo);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
static inline __m512i merge_compare_result(const __m512& a, const __m512& b) {
|
| 94 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 95 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 96 |
+
lo = _mm512_srli_epi32(lo, 16);
|
| 97 |
+
hi = _mm512_srli_epi32(hi, 16);
|
| 98 |
+
auto out = _mm512_packus_epi32(lo, hi);
|
| 99 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 100 |
+
return _mm512_permutexvar_epi64(idx, out);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// float16 conversion
|
| 104 |
+
static inline void cvtfp16_fp32(const __m256i& a, __m512& o) {
|
| 105 |
+
o = _mm512_cvtph_ps(a);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 109 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 110 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 111 |
+
cvtfp16_fp32(lo, o1);
|
| 112 |
+
cvtfp16_fp32(hi, o2);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
static inline __m256i cvtfp32_fp16(const __m512& src) {
|
| 116 |
+
return _mm512_cvtps_ph(
|
| 117 |
+
src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) {
|
| 121 |
+
__m256i lo = _mm512_cvtps_ph(
|
| 122 |
+
a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 123 |
+
__m256i hi = _mm512_cvtps_ph(
|
| 124 |
+
b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 125 |
+
__m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo));
|
| 126 |
+
__m256 t_hi = _mm256_castsi256_ps(hi);
|
| 127 |
+
return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1));
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// dtype conversion between float16/bfloat16 and float32
|
| 131 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 132 |
+
inline void cvt_to_fp32(const __m256i& a, __m512& o);
|
| 133 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m512& o) {
|
| 134 |
+
cvtbf16_fp32(a, o);
|
| 135 |
+
}
|
| 136 |
+
template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m512& o) {
|
| 137 |
+
cvtfp16_fp32(a, o);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 141 |
+
inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2);
|
| 142 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 143 |
+
cvtbf16_fp32(a, o1, o2);
|
| 144 |
+
}
|
| 145 |
+
template <> inline void cvt_to_fp32<Half>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 146 |
+
cvtfp16_fp32(a, o1, o2);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
template <typename T, bool is_compare_op = false,
|
| 150 |
+
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 151 |
+
inline __m512i cvt_from_fp32(const __m512& a, const __m512& b);
|
| 152 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, false>(const __m512& a, const __m512& b) {
|
| 153 |
+
return cvtfp32_bf16(a, b);
|
| 154 |
+
}
|
| 155 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, true>(const __m512& a, const __m512& b) {
|
| 156 |
+
return merge_compare_result(a, b);
|
| 157 |
+
}
|
| 158 |
+
template <> inline __m512i cvt_from_fp32<Half, false>(const __m512& a, const __m512& b) {
|
| 159 |
+
return cvtfp32_fp16(a, b);
|
| 160 |
+
}
|
| 161 |
+
template <> inline __m512i cvt_from_fp32<Half, true>(const __m512& a, const __m512& b) {
|
| 162 |
+
return cvtfp32_fp16(a, b);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template <typename T>
|
| 166 |
+
class Vectorized16 {
|
| 167 |
+
static_assert(
|
| 168 |
+
is_reduced_floating_point_v<T>,
|
| 169 |
+
"Support only float16 and bfloat16.");
|
| 170 |
+
private:
|
| 171 |
+
__m512i values;
|
| 172 |
+
public:
|
| 173 |
+
using value_type = uint16_t;
|
| 174 |
+
using size_type = int;
|
| 175 |
+
static constexpr size_type size() {
|
| 176 |
+
return 32;
|
| 177 |
+
}
|
| 178 |
+
Vectorized16() {}
|
| 179 |
+
Vectorized16(__m512i v) : values(v) {}
|
| 180 |
+
Vectorized16(T val) {
|
| 181 |
+
value_type uw = val.x;
|
| 182 |
+
values = _mm512_set1_epi16(uw);
|
| 183 |
+
}
|
| 184 |
+
Vectorized16(T val1, T val2, T val3, T val4,
|
| 185 |
+
T val5, T val6, T val7, T val8,
|
| 186 |
+
T val9, T val10, T val11, T val12,
|
| 187 |
+
T val13, T val14, T val15, T val16,
|
| 188 |
+
T val17, T val18, T val19, T val20,
|
| 189 |
+
T val21, T val22, T val23, T val24,
|
| 190 |
+
T val25, T val26, T val27, T val28,
|
| 191 |
+
T val29, T val30, T val31, T val32) {
|
| 192 |
+
values = _mm512_set_epi16(
|
| 193 |
+
val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x,
|
| 194 |
+
val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x,
|
| 195 |
+
val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x,
|
| 196 |
+
val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x);
|
| 197 |
+
}
|
| 198 |
+
operator __m512i() const {
|
| 199 |
+
return values;
|
| 200 |
+
}
|
| 201 |
+
T& operator[](int idx) = delete;
|
| 202 |
+
const T& operator[](int idx) const = delete;
|
| 203 |
+
int zero_mask() const {
|
| 204 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 205 |
+
return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0));
|
| 206 |
+
}
|
| 207 |
+
static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
|
| 208 |
+
if (count == size())
|
| 209 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 210 |
+
|
| 211 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 212 |
+
return _mm512_maskz_loadu_epi16(mask, ptr);
|
| 213 |
+
}
|
| 214 |
+
void store(void* ptr, int count = size()) const {
|
| 215 |
+
if (count == size()) {
|
| 216 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 217 |
+
} else if (count > 0) {
|
| 218 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 219 |
+
_mm512_mask_storeu_epi16(ptr, mask, values);
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
template <int64_t mask>
|
| 223 |
+
static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 224 |
+
__at_align__ int16_t tmp_values[size()];
|
| 225 |
+
a.store(tmp_values);
|
| 226 |
+
if (mask & 0x01)
|
| 227 |
+
tmp_values[0] = b.values[31];
|
| 228 |
+
if (mask & 0x02)
|
| 229 |
+
tmp_values[1] = b.values[30];
|
| 230 |
+
if (mask & 0x04)
|
| 231 |
+
tmp_values[2] = b.values[29];
|
| 232 |
+
if (mask & 0x08)
|
| 233 |
+
tmp_values[3] = b.values[28];
|
| 234 |
+
if (mask & 0x10)
|
| 235 |
+
tmp_values[4] = b.values[27];
|
| 236 |
+
if (mask & 0x20)
|
| 237 |
+
tmp_values[5] = b.values[26];
|
| 238 |
+
if (mask & 0x40)
|
| 239 |
+
tmp_values[6] = b.values[25];
|
| 240 |
+
if (mask & 0x80)
|
| 241 |
+
tmp_values[7] = b.values[24];
|
| 242 |
+
if (mask & 0x100)
|
| 243 |
+
tmp_values[8] = b.values[23];
|
| 244 |
+
if (mask & 0x200)
|
| 245 |
+
tmp_values[9] = b.values[22];
|
| 246 |
+
if (mask & 0x400)
|
| 247 |
+
tmp_values[10] = b.values[21];
|
| 248 |
+
if (mask & 0x800)
|
| 249 |
+
tmp_values[11] = b.values[20];
|
| 250 |
+
if (mask & 0x1000)
|
| 251 |
+
tmp_values[12] = b.values[19];
|
| 252 |
+
if (mask & 0x2000)
|
| 253 |
+
tmp_values[13] = b.values[18];
|
| 254 |
+
if (mask & 0x4000)
|
| 255 |
+
tmp_values[14] = b.values[17];
|
| 256 |
+
if (mask & 0x8000)
|
| 257 |
+
tmp_values[15] = b.values[16];
|
| 258 |
+
if (mask & 0x10000)
|
| 259 |
+
tmp_values[16] = b.values[15];
|
| 260 |
+
if (mask & 0x20000)
|
| 261 |
+
tmp_values[17] = b.values[14];
|
| 262 |
+
if (mask & 0x40000)
|
| 263 |
+
tmp_values[18] = b.values[13];
|
| 264 |
+
if (mask & 0x80000)
|
| 265 |
+
tmp_values[19] = b.values[12];
|
| 266 |
+
if (mask & 0x100000)
|
| 267 |
+
tmp_values[20] = b.values[11];
|
| 268 |
+
if (mask & 0x200000)
|
| 269 |
+
tmp_values[21] = b.values[10];
|
| 270 |
+
if (mask & 0x400000)
|
| 271 |
+
tmp_values[22] = b.values[9];
|
| 272 |
+
if (mask & 0x800000)
|
| 273 |
+
tmp_values[23] = b.values[8];
|
| 274 |
+
if (mask & 0x1000000)
|
| 275 |
+
tmp_values[24] = b.values[7];
|
| 276 |
+
if (mask & 0x2000000)
|
| 277 |
+
tmp_values[25] = b.values[6];
|
| 278 |
+
if (mask & 0x4000000)
|
| 279 |
+
tmp_values[26] = b.values[5];
|
| 280 |
+
if (mask & 0x8000000)
|
| 281 |
+
tmp_values[27] = b.values[4];
|
| 282 |
+
if (mask & 0x10000000)
|
| 283 |
+
tmp_values[28] = b.values[3];
|
| 284 |
+
if (mask & 0x20000000)
|
| 285 |
+
tmp_values[29] = b.values[2];
|
| 286 |
+
if (mask & 0x40000000)
|
| 287 |
+
tmp_values[30] = b.values[1];
|
| 288 |
+
if (mask & 0x80000000)
|
| 289 |
+
tmp_values[31] = b.values[0];
|
| 290 |
+
return loadu(tmp_values);
|
| 291 |
+
}
|
| 292 |
+
static Vectorized<T> blendv(const Vectorized<T>& a,
|
| 293 |
+
const Vectorized<T>& b, const Vectorized<T>& mask) {
|
| 294 |
+
auto all_ones = _mm512_set1_epi16(0xFFFF);
|
| 295 |
+
auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ);
|
| 296 |
+
return _mm512_mask_blend_epi16(mask_, a.values, b.values);
|
| 297 |
+
}
|
| 298 |
+
template<typename step_t>
|
| 299 |
+
static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 300 |
+
return Vectorized<T>(
|
| 301 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 302 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 303 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 304 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 305 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 306 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 307 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 308 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
|
| 309 |
+
}
|
| 310 |
+
static Vectorized<T> set(const Vectorized<T>& a,
|
| 311 |
+
const Vectorized<T>& b, int64_t count = size()) {
|
| 312 |
+
switch (count) {
|
| 313 |
+
case 0:
|
| 314 |
+
return a;
|
| 315 |
+
case 1:
|
| 316 |
+
return blend<1>(a, b);
|
| 317 |
+
case 2:
|
| 318 |
+
return blend<3>(a, b);
|
| 319 |
+
case 3:
|
| 320 |
+
return blend<7>(a, b);
|
| 321 |
+
case 4:
|
| 322 |
+
return blend<15>(a, b);
|
| 323 |
+
case 5:
|
| 324 |
+
return blend<31>(a, b);
|
| 325 |
+
case 6:
|
| 326 |
+
return blend<63>(a, b);
|
| 327 |
+
case 7:
|
| 328 |
+
return blend<127>(a, b);
|
| 329 |
+
case 8:
|
| 330 |
+
return blend<255>(a, b);
|
| 331 |
+
case 9:
|
| 332 |
+
return blend<511>(a, b);
|
| 333 |
+
case 10:
|
| 334 |
+
return blend<1023>(a, b);
|
| 335 |
+
case 11:
|
| 336 |
+
return blend<2047>(a, b);
|
| 337 |
+
case 12:
|
| 338 |
+
return blend<4095>(a, b);
|
| 339 |
+
case 13:
|
| 340 |
+
return blend<8191>(a, b);
|
| 341 |
+
case 14:
|
| 342 |
+
return blend<16383>(a, b);
|
| 343 |
+
case 15:
|
| 344 |
+
return blend<32767>(a, b);
|
| 345 |
+
case 16:
|
| 346 |
+
return blend<65535>(a, b);
|
| 347 |
+
case 17:
|
| 348 |
+
return blend<131071>(a, b);
|
| 349 |
+
case 18:
|
| 350 |
+
return blend<262143>(a, b);
|
| 351 |
+
case 19:
|
| 352 |
+
return blend<524287>(a, b);
|
| 353 |
+
case 20:
|
| 354 |
+
return blend<1048575>(a, b);
|
| 355 |
+
case 21:
|
| 356 |
+
return blend<2097151>(a, b);
|
| 357 |
+
case 22:
|
| 358 |
+
return blend<4194303>(a, b);
|
| 359 |
+
case 23:
|
| 360 |
+
return blend<8388607>(a, b);
|
| 361 |
+
case 24:
|
| 362 |
+
return blend<16777215>(a, b);
|
| 363 |
+
case 25:
|
| 364 |
+
return blend<33554431>(a, b);
|
| 365 |
+
case 26:
|
| 366 |
+
return blend<67108863>(a, b);
|
| 367 |
+
case 27:
|
| 368 |
+
return blend<134217727>(a, b);
|
| 369 |
+
case 28:
|
| 370 |
+
return blend<268435455>(a, b);
|
| 371 |
+
case 29:
|
| 372 |
+
return blend<536870911>(a, b);
|
| 373 |
+
case 30:
|
| 374 |
+
return blend<1073741823>(a, b);
|
| 375 |
+
case 31:
|
| 376 |
+
return blend<2147483647>(a, b);
|
| 377 |
+
}
|
| 378 |
+
return b;
|
| 379 |
+
}
|
| 380 |
+
#pragma clang diagnostic push
|
| 381 |
+
#pragma clang diagnostic ignored "-Wignored-qualifiers"
|
| 382 |
+
|
| 383 |
+
Vectorized<T> map(SLEEF_CONST __m512 (*SLEEF_CONST_OLD vop)(__m512)) const {
|
| 384 |
+
__m512 lo, hi;
|
| 385 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 386 |
+
const auto o1 = vop(lo);
|
| 387 |
+
const auto o2 = vop(hi);
|
| 388 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 389 |
+
}
|
| 390 |
+
Vectorized<T> isnan() const {
|
| 391 |
+
__m512 lo, hi;
|
| 392 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 393 |
+
__mmask16 lo_mask, hi_mask;
|
| 394 |
+
__m512 zero = _mm512_set1_ps(0.0);
|
| 395 |
+
__m512i zeroi = _mm512_castps_si512(zero);
|
| 396 |
+
lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q);
|
| 397 |
+
lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF));
|
| 398 |
+
hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q);
|
| 399 |
+
hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF));
|
| 400 |
+
return merge_compare_result(lo, hi);
|
| 401 |
+
}
|
| 402 |
+
#pragma clang diagnostic pop
|
| 403 |
+
Vectorized<T> abs() const {
|
| 404 |
+
return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values);
|
| 405 |
+
}
|
| 406 |
+
Vectorized<T> angle() const {
|
| 407 |
+
__m512 lo, hi;
|
| 408 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 409 |
+
auto angle_lambda = [](__m512 values) {
|
| 410 |
+
const auto zero_vec = _mm512_set1_ps(0.f);
|
| 411 |
+
const auto nan_vec = _mm512_set1_ps(NAN);
|
| 412 |
+
const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
|
| 413 |
+
const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
|
| 414 |
+
not_nan_mask, 0xFFFFFFFF);
|
| 415 |
+
const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec),
|
| 416 |
+
zero_vec, _CMP_EQ_OQ);
|
| 417 |
+
const auto pi = _mm512_set1_ps(c10::pi<float>);
|
| 418 |
+
|
| 419 |
+
const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
|
| 420 |
+
auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
|
| 421 |
+
angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
|
| 422 |
+
return angle;
|
| 423 |
+
};
|
| 424 |
+
auto o1 = angle_lambda(lo);
|
| 425 |
+
auto o2 = angle_lambda(hi);
|
| 426 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 427 |
+
}
|
| 428 |
+
Vectorized<T> real() const {
|
| 429 |
+
return *this;
|
| 430 |
+
}
|
| 431 |
+
Vectorized<T> imag() const {
|
| 432 |
+
return _mm512_set1_epi16(0);
|
| 433 |
+
}
|
| 434 |
+
Vectorized<T> conj() const {
|
| 435 |
+
return *this;
|
| 436 |
+
}
|
| 437 |
+
Vectorized<T> acos() const {
|
| 438 |
+
return map(Sleef_acosf16_u10);
|
| 439 |
+
}
|
| 440 |
+
Vectorized<T> acosh() const {
|
| 441 |
+
return map(Sleef_acoshf16_u10);
|
| 442 |
+
}
|
| 443 |
+
Vectorized<T> asin() const {
|
| 444 |
+
return map(Sleef_asinf16_u10);
|
| 445 |
+
}
|
| 446 |
+
Vectorized<T> atan() const {
|
| 447 |
+
return map(Sleef_atanf16_u10);
|
| 448 |
+
}
|
| 449 |
+
Vectorized<T> atanh() const {
|
| 450 |
+
return map(Sleef_atanhf16_u10);
|
| 451 |
+
}
|
| 452 |
+
Vectorized<T> atan2(const Vectorized<T> &b) const {
|
| 453 |
+
__m512 lo, hi;
|
| 454 |
+
__m512 b1, b2;
|
| 455 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 456 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 457 |
+
auto o1 = Sleef_atan2f16_u10(lo, b1);
|
| 458 |
+
auto o2 = Sleef_atan2f16_u10(hi, b2);
|
| 459 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 460 |
+
}
|
| 461 |
+
Vectorized<T> copysign(const Vectorized<T> &sign) const {
|
| 462 |
+
// copy sign bit (0x8000) from sign and remaining bits from values
|
| 463 |
+
__m512i mask_value = _mm512_set1_epi32(~0x80008000);
|
| 464 |
+
__m512i mask_signbit = _mm512_set1_epi32(0x80008000);
|
| 465 |
+
return Vectorized<T>(
|
| 466 |
+
_mm512_or_si512(
|
| 467 |
+
_mm512_and_si512(values, mask_value),
|
| 468 |
+
_mm512_and_si512(sign, mask_signbit)));
|
| 469 |
+
}
|
| 470 |
+
Vectorized<T> erf() const {
|
| 471 |
+
return map(Sleef_erff16_u10);
|
| 472 |
+
}
|
| 473 |
+
Vectorized<T> erfc() const {
|
| 474 |
+
return map(Sleef_erfcf16_u15);
|
| 475 |
+
}
|
| 476 |
+
Vectorized<T> erfinv() const {
|
| 477 |
+
__m512 lo, hi;
|
| 478 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 479 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 480 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 481 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 482 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 483 |
+
tmp1[i] = calc_erfinv(tmp1[i]);
|
| 484 |
+
tmp2[i] = calc_erfinv(tmp2[i]);
|
| 485 |
+
}
|
| 486 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 487 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 488 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 489 |
+
}
|
| 490 |
+
Vectorized<T> exp() const {
|
| 491 |
+
return map(Sleef_expf16_u10);
|
| 492 |
+
}
|
| 493 |
+
Vectorized<T> exp2() const {
|
| 494 |
+
return map(Sleef_exp2f16_u10);
|
| 495 |
+
}
|
| 496 |
+
Vectorized<T> expm1() const {
|
| 497 |
+
return map(Sleef_expm1f16_u10);
|
| 498 |
+
}
|
| 499 |
+
Vectorized<T> exp_u20() const {
|
| 500 |
+
return exp();
|
| 501 |
+
}
|
| 502 |
+
Vectorized<T> fmod(const Vectorized<T> & q) const {
|
| 503 |
+
__m512 x_lo, x_hi;
|
| 504 |
+
cvt_to_fp32<T>(values, x_lo, x_hi);
|
| 505 |
+
__m512 q_lo, q_hi;
|
| 506 |
+
cvtbf16_fp32(q.values, q_lo, q_hi);
|
| 507 |
+
auto o1 = Sleef_fmodf16(x_lo, q_lo);
|
| 508 |
+
auto o2 = Sleef_fmodf16(x_hi, q_hi);
|
| 509 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 510 |
+
}
|
| 511 |
+
Vectorized<T> hypot(const Vectorized<T> &b) const {
|
| 512 |
+
__m512 lo, hi;
|
| 513 |
+
__m512 b1, b2;
|
| 514 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 515 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 516 |
+
auto o1 = Sleef_hypotf16_u05(lo, b1);
|
| 517 |
+
auto o2 = Sleef_hypotf16_u05(hi, b2);
|
| 518 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 519 |
+
}
|
| 520 |
+
Vectorized<T> i0() const {
|
| 521 |
+
__m512 lo, hi;
|
| 522 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 523 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 524 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 525 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 526 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 527 |
+
tmp1[i] = calc_i0(tmp1[i]);
|
| 528 |
+
tmp2[i] = calc_i0(tmp2[i]);
|
| 529 |
+
}
|
| 530 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 531 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 532 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 533 |
+
}
|
| 534 |
+
Vectorized<T> i0e() const {
|
| 535 |
+
__m512 lo, hi;
|
| 536 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 537 |
+
constexpr auto sz = size();
|
| 538 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 539 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 540 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 541 |
+
|
| 542 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 543 |
+
tmp1[i] = calc_i0e(tmp1[i]);
|
| 544 |
+
tmp2[i] = calc_i0e(tmp2[i]);
|
| 545 |
+
}
|
| 546 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 547 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 548 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 549 |
+
}
|
| 550 |
+
Vectorized<T> digamma() const {
|
| 551 |
+
__m512 lo, hi;
|
| 552 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 553 |
+
constexpr auto sz = size();
|
| 554 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 555 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 556 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 557 |
+
|
| 558 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 559 |
+
tmp1[i] = calc_digamma(tmp1[i]);
|
| 560 |
+
tmp2[i] = calc_digamma(tmp2[i]);
|
| 561 |
+
}
|
| 562 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 563 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 564 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 565 |
+
}
|
| 566 |
+
Vectorized<T> igamma(const Vectorized<T> &x) const {
|
| 567 |
+
__m512 lo, hi;
|
| 568 |
+
__m512 xlo, xhi;
|
| 569 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 570 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 571 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 572 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 573 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 574 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 575 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 576 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 577 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 578 |
+
tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
|
| 579 |
+
tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
|
| 580 |
+
}
|
| 581 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 582 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 583 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
Vectorized<T> igammac(const Vectorized<T> &x) const {
|
| 587 |
+
__m512 lo, hi;
|
| 588 |
+
__m512 xlo, xhi;
|
| 589 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 590 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 591 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 592 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 593 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 594 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 595 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 596 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 597 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 598 |
+
tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
|
| 599 |
+
tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
|
| 600 |
+
}
|
| 601 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 602 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 603 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 604 |
+
}
|
| 605 |
+
Vectorized<T> log() const {
|
| 606 |
+
return map(Sleef_logf16_u10);
|
| 607 |
+
}
|
| 608 |
+
Vectorized<T> log2() const {
|
| 609 |
+
return map(Sleef_log2f16_u10);
|
| 610 |
+
}
|
| 611 |
+
Vectorized<T> log10() const {
|
| 612 |
+
return map(Sleef_log10f16_u10);
|
| 613 |
+
}
|
| 614 |
+
Vectorized<T> log1p() const {
|
| 615 |
+
return map(Sleef_log1pf16_u10);
|
| 616 |
+
}
|
| 617 |
+
Vectorized<T> sin() const {
|
| 618 |
+
return map(Sleef_sinf16_u10);
|
| 619 |
+
}
|
| 620 |
+
Vectorized<T> sinh() const {
|
| 621 |
+
return map(Sleef_sinhf16_u10);
|
| 622 |
+
}
|
| 623 |
+
Vectorized<T> cos() const {
|
| 624 |
+
return map(Sleef_cosf16_u10);
|
| 625 |
+
}
|
| 626 |
+
Vectorized<T> cosh() const {
|
| 627 |
+
return map(Sleef_coshf16_u10);
|
| 628 |
+
}
|
| 629 |
+
Vectorized<T> ceil() const {
|
| 630 |
+
__m512 lo, hi;
|
| 631 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 632 |
+
auto o1 = _mm512_ceil_ps(lo);
|
| 633 |
+
auto o2 = _mm512_ceil_ps(hi);
|
| 634 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 635 |
+
}
|
| 636 |
+
Vectorized<T> floor() const {
|
| 637 |
+
__m512 lo, hi;
|
| 638 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 639 |
+
auto o1 = _mm512_floor_ps(lo);
|
| 640 |
+
auto o2 = _mm512_floor_ps(hi);
|
| 641 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 642 |
+
}
|
| 643 |
+
Vectorized<T> neg() const {
|
| 644 |
+
return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000));
|
| 645 |
+
}
|
| 646 |
+
Vectorized<T> round() const {
|
| 647 |
+
__m512 lo, hi;
|
| 648 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 649 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 650 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 651 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 652 |
+
}
|
| 653 |
+
Vectorized<T> tan() const {
|
| 654 |
+
return map(Sleef_tanf16_u10);
|
| 655 |
+
}
|
| 656 |
+
Vectorized<T> tanh() const {
|
| 657 |
+
return map(Sleef_tanhf16_u10);
|
| 658 |
+
}
|
| 659 |
+
Vectorized<T> trunc() const {
|
| 660 |
+
__m512 lo, hi;
|
| 661 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 662 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 663 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 664 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 665 |
+
}
|
| 666 |
+
Vectorized<T> lgamma() const {
|
| 667 |
+
return map(Sleef_lgammaf16_u10);
|
| 668 |
+
}
|
| 669 |
+
Vectorized<T> sqrt() const {
|
| 670 |
+
__m512 lo, hi;
|
| 671 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 672 |
+
auto o1 = _mm512_sqrt_ps(lo);
|
| 673 |
+
auto o2 = _mm512_sqrt_ps(hi);
|
| 674 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 675 |
+
}
|
| 676 |
+
Vectorized<T> reciprocal() const {
|
| 677 |
+
__m512 lo, hi;
|
| 678 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 679 |
+
auto ones = _mm512_set1_ps(1);
|
| 680 |
+
auto o1 = _mm512_div_ps(ones, lo);
|
| 681 |
+
auto o2 = _mm512_div_ps(ones, hi);
|
| 682 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 683 |
+
}
|
| 684 |
+
Vectorized<T> rsqrt() const {
|
| 685 |
+
__m512 lo, hi;
|
| 686 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 687 |
+
auto ones = _mm512_set1_ps(1);
|
| 688 |
+
auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo));
|
| 689 |
+
auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi));
|
| 690 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 691 |
+
}
|
| 692 |
+
Vectorized<T> pow(const Vectorized<T> &b) const {
|
| 693 |
+
__m512 lo, hi;
|
| 694 |
+
__m512 b1, b2;
|
| 695 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 696 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 697 |
+
auto o1 = Sleef_powf16_u10(lo, b1);
|
| 698 |
+
auto o2 = Sleef_powf16_u10(hi, b2);
|
| 699 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 700 |
+
}
|
| 701 |
+
private:
|
| 702 |
+
template<typename Op>
|
| 703 |
+
Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
|
| 704 |
+
__m512 a_lo, a_hi;
|
| 705 |
+
__m512 b_lo, b_hi;
|
| 706 |
+
cvt_to_fp32<T>(values, a_lo, a_hi);
|
| 707 |
+
cvt_to_fp32<T>(b.values, b_lo, b_hi);
|
| 708 |
+
auto o1 = op(a_lo, b_lo);
|
| 709 |
+
auto o2 = op(a_hi, b_hi);
|
| 710 |
+
return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
public:
|
| 714 |
+
Vectorized<T> inline operator>(const Vectorized<T>& other) const {
|
| 715 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 716 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 717 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ);
|
| 718 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 719 |
+
});
|
| 720 |
+
}
|
| 721 |
+
Vectorized<T> inline operator<(const Vectorized<T>& other) const {
|
| 722 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 723 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 724 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ);
|
| 725 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 726 |
+
});
|
| 727 |
+
}
|
| 728 |
+
Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
|
| 729 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 730 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 731 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ);
|
| 732 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 733 |
+
});
|
| 734 |
+
}
|
| 735 |
+
Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
|
| 736 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 737 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 738 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ);
|
| 739 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 740 |
+
});
|
| 741 |
+
}
|
| 742 |
+
Vectorized<T> inline operator==(const Vectorized<T>& other) const {
|
| 743 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 744 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 745 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ);
|
| 746 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 747 |
+
});
|
| 748 |
+
}
|
| 749 |
+
Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
|
| 750 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 751 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 752 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ);
|
| 753 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 754 |
+
});
|
| 755 |
+
}
|
| 756 |
+
};
|
| 757 |
+
|
| 758 |
+
template<typename T, typename Op>
|
| 759 |
+
static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 760 |
+
__m512 a_lo, a_hi;
|
| 761 |
+
__m512 b_lo, b_hi;
|
| 762 |
+
cvt_to_fp32<T>(__m512i(a), a_lo, a_hi);
|
| 763 |
+
cvt_to_fp32<T>(__m512i(b), b_lo, b_hi);
|
| 764 |
+
auto o1 = op(a_lo, b_lo);
|
| 765 |
+
auto o2 = op(a_hi, b_hi);
|
| 766 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
template <>
|
| 770 |
+
class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
|
| 771 |
+
public:
|
| 772 |
+
using Vectorized16::Vectorized16;
|
| 773 |
+
|
| 774 |
+
Vectorized<BFloat16> frac() const;
|
| 775 |
+
|
| 776 |
+
Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
|
| 777 |
+
Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
|
| 778 |
+
Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
|
| 779 |
+
Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
|
| 780 |
+
Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
|
| 781 |
+
Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
|
| 782 |
+
};
|
| 783 |
+
|
| 784 |
+
Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 785 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 786 |
+
}
|
| 787 |
+
Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 788 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 789 |
+
}
|
| 790 |
+
Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 791 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 792 |
+
}
|
| 793 |
+
Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 794 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 795 |
+
}
|
| 796 |
+
Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 797 |
+
return _mm512_and_si512(a, b);
|
| 798 |
+
}
|
| 799 |
+
Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 800 |
+
return _mm512_or_si512(a, b);
|
| 801 |
+
}
|
| 802 |
+
Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 803 |
+
return _mm512_xor_si512(a, b);
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
|
| 807 |
+
return (*this == other) & Vectorized<BFloat16>(1.0f);
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
|
| 811 |
+
return (*this != other) & Vectorized<BFloat16>(1.0f);
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
|
| 815 |
+
return (*this > other) & Vectorized<BFloat16>(1.0f);
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
|
| 819 |
+
return (*this >= other) & Vectorized<BFloat16>(1.0f);
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
|
| 823 |
+
return (*this < other) & Vectorized<BFloat16>(1.0f);
|
| 824 |
+
}
|
| 825 |
+
|
| 826 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
|
| 827 |
+
return (*this <= other) & Vectorized<BFloat16>(1.0f);
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
// frac. Implement this here so we can use subtraction
|
| 831 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
|
| 832 |
+
return *this - this->trunc();
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 836 |
+
// either input is a NaN.
|
| 837 |
+
template <>
|
| 838 |
+
Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 839 |
+
__m512 a_lo, a_hi;
|
| 840 |
+
__m512 b_lo, b_hi;
|
| 841 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 842 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 843 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 844 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 845 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 846 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 847 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 848 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 849 |
+
// Exploit the fact that all-ones is a NaN.
|
| 850 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 851 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 852 |
+
return cvtfp32_bf16(o1, o2);
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 856 |
+
// either input is a NaN.
|
| 857 |
+
template <>
|
| 858 |
+
Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 859 |
+
__m512 a_lo, a_hi;
|
| 860 |
+
__m512 b_lo, b_hi;
|
| 861 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 862 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 863 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 864 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 865 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 866 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 867 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 868 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 869 |
+
0xFFFFFFFF));
|
| 870 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 871 |
+
0xFFFFFFFF));
|
| 872 |
+
// Exploit the fact that all-ones is a NaN.
|
| 873 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 874 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 875 |
+
return cvtfp32_bf16(o1, o2);
|
| 876 |
+
}
|
| 877 |
+
|
| 878 |
+
template <>
|
| 879 |
+
Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
|
| 880 |
+
const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
|
| 881 |
+
__m512 a_lo, a_hi;
|
| 882 |
+
__m512 min_lo, min_hi;
|
| 883 |
+
__m512 max_lo, max_hi;
|
| 884 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 885 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 886 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 887 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 888 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 889 |
+
return cvtfp32_bf16(o1, o2);
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
template <>
|
| 893 |
+
Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
|
| 894 |
+
__m512 a_lo, a_hi;
|
| 895 |
+
__m512 max_lo, max_hi;
|
| 896 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 897 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 898 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 899 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 900 |
+
return cvtfp32_bf16(o1, o2);
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
template <>
|
| 904 |
+
Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
|
| 905 |
+
__m512 a_lo, a_hi;
|
| 906 |
+
__m512 min_lo, min_hi;
|
| 907 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 908 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 909 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 910 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 911 |
+
return cvtfp32_bf16(o1, o2);
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
template <>
|
| 915 |
+
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
| 916 |
+
int64_t i;
|
| 917 |
+
#ifndef __msvc_cl__
|
| 918 |
+
#pragma unroll
|
| 919 |
+
#endif
|
| 920 |
+
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
| 921 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 922 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 923 |
+
}
|
| 924 |
+
#ifndef __msvc_cl__
|
| 925 |
+
#pragma unroll
|
| 926 |
+
#endif
|
| 927 |
+
for (; i < n; i++) {
|
| 928 |
+
dst[i] = src[i];
|
| 929 |
+
}
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
template <>
|
| 933 |
+
inline void convert(const float* src, BFloat16* dst, int64_t n) {
|
| 934 |
+
int64_t i;
|
| 935 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 936 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 937 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 938 |
+
|
| 939 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 940 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 941 |
+
}
|
| 942 |
+
for (; i < n; i++) {
|
| 943 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 944 |
+
}
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
template <>
|
| 948 |
+
inline void convert(const double* src, BFloat16* dst, int64_t n) {
|
| 949 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 950 |
+
// Load one float vector from an array of doubles
|
| 951 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 952 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 953 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 954 |
+
};
|
| 955 |
+
|
| 956 |
+
int64_t i;
|
| 957 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 958 |
+
__m512 a = load_float(&src[i]);
|
| 959 |
+
__m512 b = load_float(&src[i + 16]);
|
| 960 |
+
|
| 961 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 962 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 963 |
+
}
|
| 964 |
+
for (; i < n; i++) {
|
| 965 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 966 |
+
}
|
| 967 |
+
}
|
| 968 |
+
|
| 969 |
+
template <>
|
| 970 |
+
Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
|
| 971 |
+
const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
|
| 972 |
+
__m512 a_lo, a_hi;
|
| 973 |
+
__m512 b_lo, b_hi;
|
| 974 |
+
__m512 c_lo, c_hi;
|
| 975 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 976 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 977 |
+
cvtbf16_fp32(__m512i(c), c_lo, c_hi);
|
| 978 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 979 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 980 |
+
return cvtfp32_bf16(o1, o2);
|
| 981 |
+
}
|
| 982 |
+
|
| 983 |
+
static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
| 984 |
+
__m512i r[8];
|
| 985 |
+
// a0a1 a2a3 a4a5 a6a7 a8a9 a10a11 a12a13 a14a15 e0e1 e2e3 e4e5 e6e7 e8e9 e10e11 e12e13 e14e15
|
| 986 |
+
// b0-b15 f0-f15
|
| 987 |
+
// c0-c15 g0-g15
|
| 988 |
+
// d0-d15 h0-h15
|
| 989 |
+
// i0-i15 m0-m15
|
| 990 |
+
// j0-j15 n0-n15
|
| 991 |
+
// k0-k15 o0-o15
|
| 992 |
+
// l0-l15 p0-p15
|
| 993 |
+
#ifndef __msvc_cl__
|
| 994 |
+
#pragma unroll(4)
|
| 995 |
+
#endif
|
| 996 |
+
for (int i = 0; i < 4; i++) {
|
| 997 |
+
r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01);
|
| 998 |
+
r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01);
|
| 999 |
+
}
|
| 1000 |
+
|
| 1001 |
+
// u0: a0a1 b0b1 a2a3 b2b3 a8a9 b8b9 a10a11 b10b11 e0e1 f0f1 e2e3 f2f3 e8e9 f8f9 e10e11 f10f11
|
| 1002 |
+
// u1: a4a5 b4b5 a6a7 b6b7 a12a13 b12b13 a14a15 b14b15 e4e5 f4f5 e6e7 f6f7 e12e13 f12f13 e14e15 f14f15
|
| 1003 |
+
// u2: c0c1 d0d1 c2c3 d2d3 c8c9 d8d9 c10c11 d10d11 g0g1 h0h1 g2g3 h2h3 g8g9 h8h9 g10g11 h10h11
|
| 1004 |
+
// u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15
|
| 1005 |
+
// i j m n
|
| 1006 |
+
// k l o p
|
| 1007 |
+
#ifndef __msvc_cl__
|
| 1008 |
+
#pragma unroll(4)
|
| 1009 |
+
#endif
|
| 1010 |
+
for (int i = 0; i < 8; i += 2) {
|
| 1011 |
+
u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]);
|
| 1012 |
+
u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]);
|
| 1013 |
+
}
|
| 1014 |
+
|
| 1015 |
+
// r0: a0a1 b0b1 c0c1 d0d1 a8a9 b8b9 c8c9 d8d9 e0e1 f0f1 g0g1 h0h1 e8e9 f8f9 g8g9 h8h9
|
| 1016 |
+
// r1: a2a3 b2b3 c2c3 d2d3 a10a11 b10b11 c10c11 d10d11 e2e3 f2f3 g2g3 h2h3 e10e11 f10f11 g10g11 h10h11
|
| 1017 |
+
// r2: a4a5 b4b5 c4c5 d4b5 a12a13 b12b13 c12c13 d12d13
|
| 1018 |
+
// r3: a6a7 b6b7 c6c7 d6b7 a14a15 b14b15 c14c15 d14d15
|
| 1019 |
+
// r4: i j k l m n o p
|
| 1020 |
+
r[0] = _mm512_unpacklo_epi64(u[0], u[2]);
|
| 1021 |
+
r[1] = _mm512_unpackhi_epi64(u[0], u[2]);
|
| 1022 |
+
r[2] = _mm512_unpacklo_epi64(u[1], u[3]);
|
| 1023 |
+
r[3] = _mm512_unpackhi_epi64(u[1], u[3]);
|
| 1024 |
+
r[4] = _mm512_unpacklo_epi64(u[4], u[6]);
|
| 1025 |
+
r[5] = _mm512_unpackhi_epi64(u[4], u[6]);
|
| 1026 |
+
r[6] = _mm512_unpacklo_epi64(u[5], u[7]);
|
| 1027 |
+
r[7] = _mm512_unpackhi_epi64(u[5], u[7]);
|
| 1028 |
+
|
| 1029 |
+
__m512i const1 = _mm512_set_epi32(
|
| 1030 |
+
0x00370035,
|
| 1031 |
+
0x00330031,
|
| 1032 |
+
0x00270025,
|
| 1033 |
+
0x00230021,
|
| 1034 |
+
0x00170015,
|
| 1035 |
+
0x00130011,
|
| 1036 |
+
0x00070005,
|
| 1037 |
+
0x00030001,
|
| 1038 |
+
0x00360034,
|
| 1039 |
+
0x00320030,
|
| 1040 |
+
0x00260024,
|
| 1041 |
+
0x00220020,
|
| 1042 |
+
0x00160014,
|
| 1043 |
+
0x00120010,
|
| 1044 |
+
0x00060004,
|
| 1045 |
+
0x00020000);
|
| 1046 |
+
__m512i const2 = _mm512_set_epi32(
|
| 1047 |
+
0x003f003d,
|
| 1048 |
+
0x003b0039,
|
| 1049 |
+
0x002f002d,
|
| 1050 |
+
0x002b0029,
|
| 1051 |
+
0x001f001d,
|
| 1052 |
+
0x001b0019,
|
| 1053 |
+
0x000f000d,
|
| 1054 |
+
0x000b0009,
|
| 1055 |
+
0x003e003c,
|
| 1056 |
+
0x003a0038,
|
| 1057 |
+
0x002e002c,
|
| 1058 |
+
0x002a0028,
|
| 1059 |
+
0x001e001c,
|
| 1060 |
+
0x001a0018,
|
| 1061 |
+
0x000e000c,
|
| 1062 |
+
0x000a0008);
|
| 1063 |
+
// merge values from two regs
|
| 1064 |
+
// 0-- 1--
|
| 1065 |
+
// 8-- 9--
|
| 1066 |
+
// 2-- 3--
|
| 1067 |
+
// 10-- 11--
|
| 1068 |
+
// 4-- 5--
|
| 1069 |
+
// 12-- 13--
|
| 1070 |
+
// 6-- 7--
|
| 1071 |
+
// 14-- 15--
|
| 1072 |
+
#ifndef __msvc_cl__
|
| 1073 |
+
#pragma unroll(4)
|
| 1074 |
+
#endif
|
| 1075 |
+
for (int i = 0; i < 4; i++) {
|
| 1076 |
+
u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]);
|
| 1077 |
+
u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]);
|
| 1078 |
+
}
|
| 1079 |
+
}
|
| 1080 |
+
|
| 1081 |
+
// TODO(Leslie): Add the AVX2 Version of transpose_mxn for BFloat16 and Float16
|
| 1082 |
+
// Code referred to FBGEMM:
|
| 1083 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1084 |
+
template<>
|
| 1085 |
+
inline void transpose_mxn<BFloat16, 16, 16>(
|
| 1086 |
+
const BFloat16* src,
|
| 1087 |
+
int64_t ld_src,
|
| 1088 |
+
BFloat16* dst,
|
| 1089 |
+
int64_t ld_dst) {
|
| 1090 |
+
__m256i t[16];
|
| 1091 |
+
// load from src to registers
|
| 1092 |
+
// a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
|
| 1093 |
+
// b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
|
| 1094 |
+
// c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
|
| 1095 |
+
// d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
|
| 1096 |
+
// e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
|
| 1097 |
+
// f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
|
| 1098 |
+
// g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
|
| 1099 |
+
// h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
|
| 1100 |
+
// i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
|
| 1101 |
+
// j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
|
| 1102 |
+
// k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
|
| 1103 |
+
// l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
|
| 1104 |
+
// m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
|
| 1105 |
+
// n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
|
| 1106 |
+
// o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
|
| 1107 |
+
// p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
|
| 1108 |
+
#ifndef __msvc_cl__
|
| 1109 |
+
#pragma unroll(16)
|
| 1110 |
+
#endif
|
| 1111 |
+
for (int i = 0; i < 16; i++) {
|
| 1112 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1113 |
+
}
|
| 1114 |
+
|
| 1115 |
+
__m512i u[8];
|
| 1116 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1117 |
+
|
| 1118 |
+
#ifndef __msvc_cl__
|
| 1119 |
+
#pragma unroll(8)
|
| 1120 |
+
#endif
|
| 1121 |
+
for (int i = 0; i < 8; i++) {
|
| 1122 |
+
_mm256_storeu_si256(
|
| 1123 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1124 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1125 |
+
_mm256_storeu_si256(
|
| 1126 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1127 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1128 |
+
}
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
// Code referred to FBGEMM:
|
| 1132 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1133 |
+
template<>
|
| 1134 |
+
inline void transpose_mxn<Half, 16, 16>(
|
| 1135 |
+
const Half* src,
|
| 1136 |
+
int64_t ld_src,
|
| 1137 |
+
Half* dst,
|
| 1138 |
+
int64_t ld_dst) {
|
| 1139 |
+
__m256i t[16];
|
| 1140 |
+
// load from src to registers
|
| 1141 |
+
// Same matrix indices as above transpose_mxn<BFloat16, 16, 16>
|
| 1142 |
+
#ifndef __msvc_cl__
|
| 1143 |
+
#pragma unroll(16)
|
| 1144 |
+
#endif
|
| 1145 |
+
for (int i = 0; i < 16; i++) {
|
| 1146 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1147 |
+
}
|
| 1148 |
+
|
| 1149 |
+
__m512i u[8];
|
| 1150 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1151 |
+
|
| 1152 |
+
#ifndef __msvc_cl__
|
| 1153 |
+
#pragma unroll(8)
|
| 1154 |
+
#endif
|
| 1155 |
+
for (int i = 0; i < 8; i++) {
|
| 1156 |
+
_mm256_storeu_si256(
|
| 1157 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1158 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1159 |
+
_mm256_storeu_si256(
|
| 1160 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1161 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1162 |
+
}
|
| 1163 |
+
}
|
| 1164 |
+
|
| 1165 |
+
static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
| 1166 |
+
// t[0]: 0 32 1 33 2 34 3 35 8 40 9 41 10 42 11 43 16 ... 59
|
| 1167 |
+
// t[1]: 4 36 5 37 6 38 7 39 12 44 13 45 14 46 15 47 20 ... 63
|
| 1168 |
+
// t[2]: 64 96 65 97 66 98 67 99 72 104 73 105 74 106 75 ... 123
|
| 1169 |
+
// t[3]: 68 100 69 101 70 102 71 103 76 108 77 109 78 110 79 111 84 ... 127
|
| 1170 |
+
// t[4]: 128 160 129 161 130 162 131 163 136 168 137 169 138 170 139 171 144 ... 187
|
| 1171 |
+
// t[5]: 132 164 133 165 134 166 135 167 140 172 141 173 142 174 143 175 148 ... 191
|
| 1172 |
+
// t[6]: 192 224 193 225 194 226 195 227 200 232 201 233 202 234 203 235 208 ... 251
|
| 1173 |
+
// t[7]: 196 228 197 229 198 230 199 231 204 236 205 237 206 238 207 239 212 ... 255
|
| 1174 |
+
// t[8]: 256 288 257 289 258 290 259 291 264 296 265 297 266 298 267 299 272 ... 315
|
| 1175 |
+
// t[9]: 260 292 261 293 262 294 263 295 268 300 269 301 270 302 271 303 276 ... 319
|
| 1176 |
+
// t[10]: 320 352 321 353 322 354 323 355 328 360 329 361 330 362 331 363 336 ... 379
|
| 1177 |
+
// t[11]: 324 356 325 357 326 358 327 359 332 364 333 365 334 366 335 367 340 ... 383
|
| 1178 |
+
// t[12]: 384 416 385 417 386 418 387 419 392 424 393 425 394 426 395 427 400 ... 443
|
| 1179 |
+
// t[13]: 388 420 389 421 390 422 391 423 396 428 397 429 398 430 399 431 404 ... 447
|
| 1180 |
+
// t[14]: 448 480 449 481 450 482 451 483 456 488 457 489 458 490 459 491 464 ... 507
|
| 1181 |
+
// t[15]: 452 484 453 485 454 486 455 487 460 492 461 493 462 494 463 495 468 ... 511
|
| 1182 |
+
// t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571
|
| 1183 |
+
// ...
|
| 1184 |
+
// t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023
|
| 1185 |
+
#ifndef __msvc_cl__
|
| 1186 |
+
#pragma unroll(16)
|
| 1187 |
+
#endif
|
| 1188 |
+
for (int i = 0; i < 16; ++i) {
|
| 1189 |
+
d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1190 |
+
d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
// t[0]: 0 32 64 96 1 33 65 97 8 40 72 104 9 41 73 105 16 ... 121
|
| 1194 |
+
// t[1]: 2 34 66 98 3 35 67 99 10 42 74 106 11 43 75 107 18 ... 123
|
| 1195 |
+
// t[2]: 4 36 68 100 5 37 69 101 12 44 76 108 13 45 77 109 20 ... 125
|
| 1196 |
+
// t[3]: 6 38 70 102 7 39 71 103 14 46 78 110 15 47 79 111 22 ... 127
|
| 1197 |
+
// t[4]: 128 160 192 224 129 161 193 225 136 168 200 232 137 169 201 233 144 ... 249
|
| 1198 |
+
// t[5]: 130 162 194 226 131 163 195 227 138 170 202 234 139 171 203 235 146 ... 251
|
| 1199 |
+
// t[6]: 132 164 196 228 133 165 197 229 140 172 204 236 141 173 205 237 148 ... 253
|
| 1200 |
+
// t[7]: 134 166 198 230 135 167 199 231 142 174 206 238 143 175 207 239 150 ... 255
|
| 1201 |
+
// t[8]: 256 288 320 352 257 289 321 353 264 296 328 360 265 297 329 361 272 ... 377
|
| 1202 |
+
// t[9]: 258 290 322 354 259 291 323 355 266 298 330 362 267 299 331 363 274 ... 379
|
| 1203 |
+
// t[10]: 260 292 324 356 261 293 325 357 268 300 332 364 269 301 333 365 276 ... 381
|
| 1204 |
+
// t[11]: 262 294 326 358 263 295 327 359 270 302 334 366 271 303 335 367 278 ... 383
|
| 1205 |
+
// t[12]: 384 416 448 480 385 417 449 481 392 424 456 488 393 425 457 489 400 ... 505
|
| 1206 |
+
// t[13]: 386 418 450 482 387 419 451 483 394 426 458 490 395 427 459 491 402 ... 507
|
| 1207 |
+
// t[14]: 388 420 452 484 389 421 453 485 396 428 460 492 397 429 461 493 404 ... 509
|
| 1208 |
+
// t[15]: 390 422 454 486 391 423 455 487 398 430 462 494 399 431 463 495 406 ... 511
|
| 1209 |
+
// t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633
|
| 1210 |
+
// ...
|
| 1211 |
+
// t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023
|
| 1212 |
+
#ifndef __msvc_cl__
|
| 1213 |
+
#pragma unroll(8)
|
| 1214 |
+
#endif
|
| 1215 |
+
for (int i = 0; i < 8; ++i) {
|
| 1216 |
+
r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1217 |
+
r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1218 |
+
r[i * 4 + 2] = _mm512_unpacklo_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1219 |
+
r[i * 4 + 3] = _mm512_unpackhi_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1220 |
+
}
|
| 1221 |
+
|
| 1222 |
+
// t[0]: 0 32 64 96 128 160 192 224 8 40 72 104 136 168 200 232 16 ... 248
|
| 1223 |
+
// t[1]: 1 33 65 97 129 161 193 225 9 41 73 105 137 169 201 233 17 ... 249
|
| 1224 |
+
// t[2]: 2 34 66 98 130 162 194 226 10 42 74 106 138 170 202 234 18 ... 250
|
| 1225 |
+
// t[3]: 3 35 67 99 131 163 195 227 11 43 75 107 139 171 203 235 19 ... 251
|
| 1226 |
+
// t[4]: 4 36 68 100 132 164 196 228 12 44 76 108 140 172 204 236 20 ... 252
|
| 1227 |
+
// t[5]: 5 37 69 101 133 165 197 229 13 45 77 109 141 173 205 237 21 ... 253
|
| 1228 |
+
// t[6]: 6 38 70 102 134 166 198 230 14 46 78 110 142 174 206 238 22 ... 254
|
| 1229 |
+
// t[7]: 7 39 71 103 135 167 199 231 15 47 79 111 143 175 207 239 23 ... 255
|
| 1230 |
+
// t[8]: 256 288 320 352 384 416 448 480 264 296 328 360 392 424 456 488 272 ... 504
|
| 1231 |
+
// t[9]: 257 289 321 353 385 417 449 481 265 297 329 361 393 425 457 489 273 ... 505
|
| 1232 |
+
// t[10]: 258 290 322 354 386 418 450 482 266 298 330 362 394 426 458 490 274 ... 506
|
| 1233 |
+
// t[11]: 259 291 323 355 387 419 451 483 267 299 331 363 395 427 459 491 275 ... 507
|
| 1234 |
+
// t[12]: 260 292 324 356 388 420 452 484 268 300 332 364 396 428 460 492 276 ... 508
|
| 1235 |
+
// t[13]: 261 293 325 357 389 421 453 485 269 301 333 365 397 429 461 493 277 ... 509
|
| 1236 |
+
// t[14]: 262 294 326 358 390 422 454 486 270 302 334 366 398 430 462 494 278 ... 510
|
| 1237 |
+
// t[15]: 263 295 327 359 391 423 455 487 271 303 335 367 399 431 463 495 279 ... 511
|
| 1238 |
+
// t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760
|
| 1239 |
+
// ...
|
| 1240 |
+
// t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023
|
| 1241 |
+
#ifndef __msvc_cl__
|
| 1242 |
+
#pragma unroll(4)
|
| 1243 |
+
#endif
|
| 1244 |
+
for (int i = 0; i < 4; ++i) {
|
| 1245 |
+
d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1246 |
+
d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1247 |
+
d[i * 8 + 2] = _mm512_unpacklo_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1248 |
+
d[i * 8 + 3] = _mm512_unpackhi_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1249 |
+
d[i * 8 + 4] = _mm512_unpacklo_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1250 |
+
d[i * 8 + 5] = _mm512_unpackhi_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1251 |
+
d[i * 8 + 6] = _mm512_unpacklo_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1252 |
+
d[i * 8 + 7] = _mm512_unpackhi_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1253 |
+
}
|
| 1254 |
+
|
| 1255 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 16 ... 496
|
| 1256 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 17 ... 497
|
| 1257 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 18 ... 498
|
| 1258 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 19 ... 499
|
| 1259 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 20 ... 500
|
| 1260 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 21 ... 501
|
| 1261 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 22 ... 502
|
| 1262 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 23 ... 503
|
| 1263 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 24 ... 504
|
| 1264 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 25 ... 505
|
| 1265 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 26 ... 506
|
| 1266 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 27 ... 507
|
| 1267 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 28 ... 508
|
| 1268 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 29 ... 509
|
| 1269 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 30 ... 510
|
| 1270 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 31 ... 511
|
| 1271 |
+
// t[16]: 512 544 576 608 640 672 704 736 768 800 832 864 896 928 960 992 528 ... 1008
|
| 1272 |
+
// ...
|
| 1273 |
+
// t[31]: 527 559 591 623 655 687 719 751 783 815 847 879 911 943 975 1007 543 ... 1023
|
| 1274 |
+
__m512i const1 = _mm512_set_epi64(
|
| 1275 |
+
0x000000000000000d,
|
| 1276 |
+
0x000000000000000c,
|
| 1277 |
+
0x0000000000000005,
|
| 1278 |
+
0x0000000000000004,
|
| 1279 |
+
0x0000000000000009,
|
| 1280 |
+
0x0000000000000008,
|
| 1281 |
+
0x0000000000000001,
|
| 1282 |
+
0x0000000000000000);
|
| 1283 |
+
__m512i const2 = _mm512_set_epi64(
|
| 1284 |
+
0x000000000000000f,
|
| 1285 |
+
0x000000000000000e,
|
| 1286 |
+
0x0000000000000007,
|
| 1287 |
+
0x0000000000000006,
|
| 1288 |
+
0x000000000000000b,
|
| 1289 |
+
0x000000000000000a,
|
| 1290 |
+
0x0000000000000003,
|
| 1291 |
+
0x0000000000000002);
|
| 1292 |
+
#ifndef __msvc_cl__
|
| 1293 |
+
#pragma unroll(8)
|
| 1294 |
+
#endif
|
| 1295 |
+
for (int i = 0; i < 8; ++i) {
|
| 1296 |
+
r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]);
|
| 1297 |
+
r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]);
|
| 1298 |
+
r[i + 16] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const1, d[i + 24]);
|
| 1299 |
+
r[i + 24] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const2, d[i + 24]);
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 512 544 ... 992
|
| 1303 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 513 545 ... 993
|
| 1304 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 514 546 ... 994
|
| 1305 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 515 547 ... 995
|
| 1306 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 516 548 ... 996
|
| 1307 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 517 549 ... 997
|
| 1308 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 518 550 ... 998
|
| 1309 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 519 551 ... 999
|
| 1310 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 520 552 ... 1000
|
| 1311 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 521 553 ... 1001
|
| 1312 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 522 554 ... 1002
|
| 1313 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 523 555 ... 1003
|
| 1314 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 524 556 ... 1004
|
| 1315 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 525 557 ... 1005
|
| 1316 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 526 558 ... 1006
|
| 1317 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 527 559 ... 1007
|
| 1318 |
+
// t[16]: 16 48 80 112 144 176 208 240 272 304 336 368 400 432 464 496 528 560 ... 1008
|
| 1319 |
+
// ...
|
| 1320 |
+
// t[31]: 31 63 95 127 159 191 223 255 287 319 351 383 415 447 479 511 543 575 ... 1023
|
| 1321 |
+
__m512i const3 = _mm512_set_epi64(
|
| 1322 |
+
0x000000000000000b,
|
| 1323 |
+
0x000000000000000a,
|
| 1324 |
+
0x0000000000000009,
|
| 1325 |
+
0x0000000000000008,
|
| 1326 |
+
0x0000000000000003,
|
| 1327 |
+
0x0000000000000002,
|
| 1328 |
+
0x0000000000000001,
|
| 1329 |
+
0x0000000000000000);
|
| 1330 |
+
__m512i const4 = _mm512_set_epi64(
|
| 1331 |
+
0x000000000000000f,
|
| 1332 |
+
0x000000000000000e,
|
| 1333 |
+
0x000000000000000d,
|
| 1334 |
+
0x000000000000000c,
|
| 1335 |
+
0x0000000000000007,
|
| 1336 |
+
0x0000000000000006,
|
| 1337 |
+
0x0000000000000005,
|
| 1338 |
+
0x0000000000000004);
|
| 1339 |
+
#ifndef __msvc_cl__
|
| 1340 |
+
#pragma unroll(16)
|
| 1341 |
+
#endif
|
| 1342 |
+
for (int i = 0; i < 16; ++i) {
|
| 1343 |
+
d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]);
|
| 1344 |
+
d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]);
|
| 1345 |
+
}
|
| 1346 |
+
}
|
| 1347 |
+
|
| 1348 |
+
// Code referred to FBGEMM:
|
| 1349 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
|
| 1350 |
+
template<>
|
| 1351 |
+
inline void transpose_mxn<BFloat16>(const BFloat16* src, int64_t ld_src, BFloat16* dst, int64_t ld_dst, int M, int N) {
|
| 1352 |
+
// load from src
|
| 1353 |
+
TORCH_CHECK(M <= 32 && N <= 32, "transpose_mxn<BFloat16> expects M, N <= 32.");
|
| 1354 |
+
__m512i r[32];
|
| 1355 |
+
int i;
|
| 1356 |
+
if (N == 32) {
|
| 1357 |
+
for (i = 0; i < M; ++i) {
|
| 1358 |
+
r[i] = _mm512_loadu_si512(&src[i * ld_src]);
|
| 1359 |
+
}
|
| 1360 |
+
} else {
|
| 1361 |
+
__mmask32 src_mask = (1 << N) - 1;
|
| 1362 |
+
for (i = 0; i < M; ++i) {
|
| 1363 |
+
r[i] = _mm512_maskz_loadu_epi16(src_mask, &src[i * ld_src]);
|
| 1364 |
+
}
|
| 1365 |
+
}
|
| 1366 |
+
for (; i < 32; ++i) {
|
| 1367 |
+
r[i] = _mm512_setzero_si512();
|
| 1368 |
+
}
|
| 1369 |
+
|
| 1370 |
+
__m512i d[32];
|
| 1371 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1372 |
+
|
| 1373 |
+
// store to dst
|
| 1374 |
+
if (M == 32) {
|
| 1375 |
+
for (i = 0; i < N; ++i) {
|
| 1376 |
+
_mm512_storeu_si512(&dst[i * ld_dst], d[i]);
|
| 1377 |
+
}
|
| 1378 |
+
} else {
|
| 1379 |
+
__mmask32 dst_mask = (1 << M) - 1;
|
| 1380 |
+
for (i = 0; i < N; ++i) {
|
| 1381 |
+
_mm512_mask_storeu_epi16(&dst[i * ld_dst], dst_mask, d[i]);
|
| 1382 |
+
}
|
| 1383 |
+
}
|
| 1384 |
+
}
|
| 1385 |
+
|
| 1386 |
+
template <typename T, int M, int N,
|
| 1387 |
+
typename std::enable_if_t<std::is_same<T, BFloat16>::value && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
|
| 1388 |
+
inline void transpose_mxn(const BFloat16* src, int64_t ld_src, BFloat16* dst, int64_t ld_dst) {
|
| 1389 |
+
transpose_mxn<BFloat16>(src, ld_src, dst, ld_dst, M, N);
|
| 1390 |
+
}
|
| 1391 |
+
|
| 1392 |
+
template<>
|
| 1393 |
+
inline void transpose_mxn<Half>(const Half* src, int64_t ld_src, Half* dst, int64_t ld_dst, int M, int N) {
|
| 1394 |
+
TORCH_CHECK(M <= 32 && N <= 32, "transpose_mxn<Half> expects M, N <= 32.");
|
| 1395 |
+
// load from src
|
| 1396 |
+
__m512i r[32];
|
| 1397 |
+
int i;
|
| 1398 |
+
if (N == 32) {
|
| 1399 |
+
for (i = 0; i < M; ++i) {
|
| 1400 |
+
r[i] = _mm512_loadu_si512(&src[i * ld_src]);
|
| 1401 |
+
}
|
| 1402 |
+
} else {
|
| 1403 |
+
__mmask32 src_mask = (1 << N) - 1;
|
| 1404 |
+
for (i = 0; i < M; ++i) {
|
| 1405 |
+
r[i] = _mm512_maskz_loadu_epi16(src_mask, &src[i * ld_src]);
|
| 1406 |
+
}
|
| 1407 |
+
}
|
| 1408 |
+
for (; i < 32; ++i) {
|
| 1409 |
+
r[i] = _mm512_setzero_si512();
|
| 1410 |
+
}
|
| 1411 |
+
|
| 1412 |
+
__m512i d[32];
|
| 1413 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1414 |
+
|
| 1415 |
+
// store to dst
|
| 1416 |
+
if (M == 32) {
|
| 1417 |
+
for (i = 0; i < N; ++i) {
|
| 1418 |
+
_mm512_storeu_si512(&dst[i * ld_dst], d[i]);
|
| 1419 |
+
}
|
| 1420 |
+
} else {
|
| 1421 |
+
__mmask32 dst_mask = (1 << M) - 1;
|
| 1422 |
+
for (i = 0; i < N; ++i) {
|
| 1423 |
+
_mm512_mask_storeu_epi16(&dst[i * ld_dst], dst_mask, d[i]);
|
| 1424 |
+
}
|
| 1425 |
+
}
|
| 1426 |
+
}
|
| 1427 |
+
|
| 1428 |
+
template <typename T, int M, int N,
|
| 1429 |
+
typename std::enable_if_t<std::is_same<T, Half>::value && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
|
| 1430 |
+
inline void transpose_mxn(const Half* src, int64_t ld_src, Half* dst, int64_t ld_dst) {
|
| 1431 |
+
transpose_mxn<Half>(src, ld_src, dst, ld_dst, M, N);
|
| 1432 |
+
}
|
| 1433 |
+
|
| 1434 |
+
template <>
|
| 1435 |
+
class Vectorized<Half>: public Vectorized16<Half> {
|
| 1436 |
+
public:
|
| 1437 |
+
using Vectorized16::Vectorized16;
|
| 1438 |
+
|
| 1439 |
+
Vectorized<Half> frac() const;
|
| 1440 |
+
|
| 1441 |
+
Vectorized<Half> eq(const Vectorized<Half>& other) const;
|
| 1442 |
+
Vectorized<Half> ne(const Vectorized<Half>& other) const;
|
| 1443 |
+
Vectorized<Half> gt(const Vectorized<Half>& other) const;
|
| 1444 |
+
Vectorized<Half> ge(const Vectorized<Half>& other) const;
|
| 1445 |
+
Vectorized<Half> lt(const Vectorized<Half>& other) const;
|
| 1446 |
+
Vectorized<Half> le(const Vectorized<Half>& other) const;
|
| 1447 |
+
};
|
| 1448 |
+
|
| 1449 |
+
Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1450 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 1451 |
+
}
|
| 1452 |
+
Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1453 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 1454 |
+
}
|
| 1455 |
+
Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1456 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 1457 |
+
}
|
| 1458 |
+
Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1459 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 1460 |
+
}
|
| 1461 |
+
|
| 1462 |
+
Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1463 |
+
return _mm512_and_si512(a, b);
|
| 1464 |
+
}
|
| 1465 |
+
Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1466 |
+
return _mm512_or_si512(a, b);
|
| 1467 |
+
}
|
| 1468 |
+
Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1469 |
+
return _mm512_xor_si512(a, b);
|
| 1470 |
+
}
|
| 1471 |
+
|
| 1472 |
+
inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
|
| 1473 |
+
return (*this == other) & Vectorized<Half>(1.0f);
|
| 1474 |
+
}
|
| 1475 |
+
|
| 1476 |
+
inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
|
| 1477 |
+
return (*this != other) & Vectorized<Half>(1.0f);
|
| 1478 |
+
}
|
| 1479 |
+
|
| 1480 |
+
inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
|
| 1481 |
+
return (*this > other) & Vectorized<Half>(1.0f);
|
| 1482 |
+
}
|
| 1483 |
+
|
| 1484 |
+
inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
|
| 1485 |
+
return (*this >= other) & Vectorized<Half>(1.0f);
|
| 1486 |
+
}
|
| 1487 |
+
|
| 1488 |
+
inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
|
| 1489 |
+
return (*this < other) & Vectorized<Half>(1.0f);
|
| 1490 |
+
}
|
| 1491 |
+
|
| 1492 |
+
inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
|
| 1493 |
+
return (*this <= other) & Vectorized<Half>(1.0f);
|
| 1494 |
+
}
|
| 1495 |
+
|
| 1496 |
+
// frac. Implement this here so we can use subtraction
|
| 1497 |
+
inline Vectorized<Half> Vectorized<Half>::frac() const {
|
| 1498 |
+
return *this - this->trunc();
|
| 1499 |
+
}
|
| 1500 |
+
|
| 1501 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 1502 |
+
// either input is a NaN.
|
| 1503 |
+
template <>
|
| 1504 |
+
Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1505 |
+
__m512 a_lo, a_hi;
|
| 1506 |
+
__m512 b_lo, b_hi;
|
| 1507 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1508 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1509 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 1510 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 1511 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1512 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1513 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 1514 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 1515 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1516 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 1517 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 1518 |
+
return cvtfp32_fp16(o1, o2);
|
| 1519 |
+
}
|
| 1520 |
+
|
| 1521 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 1522 |
+
// either input is a NaN.
|
| 1523 |
+
template <>
|
| 1524 |
+
Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1525 |
+
__m512 a_lo, a_hi;
|
| 1526 |
+
__m512 b_lo, b_hi;
|
| 1527 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 1528 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1529 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1530 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 1531 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 1532 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1533 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1534 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 1535 |
+
0xFFFFFFFF));
|
| 1536 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 1537 |
+
0xFFFFFFFF));
|
| 1538 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1539 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 1540 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 1541 |
+
return cvtfp32_fp16(o1, o2);
|
| 1542 |
+
}
|
| 1543 |
+
|
| 1544 |
+
template <>
|
| 1545 |
+
Vectorized<Half> inline clamp(const Vectorized<Half>& a,
|
| 1546 |
+
const Vectorized<Half>& min, const Vectorized<Half>& max) {
|
| 1547 |
+
__m512 a_lo, a_hi;
|
| 1548 |
+
__m512 min_lo, min_hi;
|
| 1549 |
+
__m512 max_lo, max_hi;
|
| 1550 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1551 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1552 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1553 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 1554 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 1555 |
+
return cvtfp32_fp16(o1, o2);
|
| 1556 |
+
}
|
| 1557 |
+
|
| 1558 |
+
template <>
|
| 1559 |
+
Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
|
| 1560 |
+
__m512 a_lo, a_hi;
|
| 1561 |
+
__m512 max_lo, max_hi;
|
| 1562 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1563 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1564 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 1565 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 1566 |
+
return cvtfp32_fp16(o1, o2);
|
| 1567 |
+
}
|
| 1568 |
+
|
| 1569 |
+
template <>
|
| 1570 |
+
Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
|
| 1571 |
+
__m512 a_lo, a_hi;
|
| 1572 |
+
__m512 min_lo, min_hi;
|
| 1573 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1574 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1575 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 1576 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 1577 |
+
return cvtfp32_fp16(o1, o2);
|
| 1578 |
+
}
|
| 1579 |
+
|
| 1580 |
+
template <>
|
| 1581 |
+
inline void convert(const Half* src, Half* dst, int64_t n) {
|
| 1582 |
+
int64_t i;
|
| 1583 |
+
#ifndef __msvc_cl__
|
| 1584 |
+
#pragma unroll
|
| 1585 |
+
#endif
|
| 1586 |
+
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
| 1587 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 1588 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 1589 |
+
}
|
| 1590 |
+
#ifndef __msvc_cl__
|
| 1591 |
+
#pragma unroll
|
| 1592 |
+
#endif
|
| 1593 |
+
for (; i < n; i++) {
|
| 1594 |
+
dst[i] = src[i];
|
| 1595 |
+
}
|
| 1596 |
+
}
|
| 1597 |
+
|
| 1598 |
+
template <>
|
| 1599 |
+
inline void convert(const float* src, Half* dst, int64_t n) {
|
| 1600 |
+
int64_t i;
|
| 1601 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1602 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 1603 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 1604 |
+
|
| 1605 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1606 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1607 |
+
}
|
| 1608 |
+
for (; i < n; i++) {
|
| 1609 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1610 |
+
}
|
| 1611 |
+
}
|
| 1612 |
+
|
| 1613 |
+
template <>
|
| 1614 |
+
inline void convert(const double* src, Half* dst, int64_t n) {
|
| 1615 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 1616 |
+
// Load one float vector from an array of doubles
|
| 1617 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 1618 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 1619 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 1620 |
+
};
|
| 1621 |
+
|
| 1622 |
+
int64_t i;
|
| 1623 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1624 |
+
__m512 a = load_float(&src[i]);
|
| 1625 |
+
__m512 b = load_float(&src[i + 16]);
|
| 1626 |
+
|
| 1627 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1628 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1629 |
+
}
|
| 1630 |
+
for (; i < n; i++) {
|
| 1631 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1632 |
+
}
|
| 1633 |
+
}
|
| 1634 |
+
|
| 1635 |
+
template <>
|
| 1636 |
+
Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
|
| 1637 |
+
const Vectorized<Half>& b, const Vectorized<Half>& c) {
|
| 1638 |
+
__m512 a_lo, a_hi;
|
| 1639 |
+
__m512 b_lo, b_hi;
|
| 1640 |
+
__m512 c_lo, c_hi;
|
| 1641 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1642 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1643 |
+
cvtfp16_fp32(__m512i(c), c_lo, c_hi);
|
| 1644 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 1645 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 1646 |
+
return cvtfp32_fp16(o1, o2);
|
| 1647 |
+
}
|
| 1648 |
+
|
| 1649 |
+
#define CONVERT_VECTORIZED_INIT(type, name) \
|
| 1650 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1651 |
+
__m512 o1, o2; \
|
| 1652 |
+
cvt_to_fp32<type>(__m512i(a), o1, o2); \
|
| 1653 |
+
return std::make_tuple(o1, o2); \
|
| 1654 |
+
} \
|
| 1655 |
+
\
|
| 1656 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1657 |
+
return cvt_from_fp32<type>(__m512(a), __m512(b)); \
|
| 1658 |
+
}
|
| 1659 |
+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1660 |
+
CONVERT_VECTORIZED_INIT(Half, half);
|
| 1661 |
+
|
| 1662 |
+
#else //defined(CPU_CAPABILITY_AVX512)
|
| 1663 |
+
|
| 1664 |
+
#define CONVERT_NON_VECTORIZED_INIT(type, name) \
|
| 1665 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1666 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1667 |
+
__at_align__ float arr[K]; \
|
| 1668 |
+
__at_align__ type arr2[K]; \
|
| 1669 |
+
a.store(arr2); \
|
| 1670 |
+
for (const auto k : c10::irange(K)) { \
|
| 1671 |
+
arr[k] = c10::convert<float>(arr2[k]); \
|
| 1672 |
+
} \
|
| 1673 |
+
return std::make_tuple( \
|
| 1674 |
+
Vectorized<float>::loadu(arr), \
|
| 1675 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
|
| 1676 |
+
} \
|
| 1677 |
+
\
|
| 1678 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1679 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1680 |
+
__at_align__ float arr[K]; \
|
| 1681 |
+
__at_align__ type arr2[K]; \
|
| 1682 |
+
a.store(arr); \
|
| 1683 |
+
b.store(arr + Vectorized<float>::size()); \
|
| 1684 |
+
for (const auto k : c10::irange(K)) { \
|
| 1685 |
+
arr2[k] = c10::convert<type>(arr[k]); \
|
| 1686 |
+
} \
|
| 1687 |
+
return Vectorized<type>::loadu(arr2); \
|
| 1688 |
+
}
|
| 1689 |
+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1690 |
+
CONVERT_NON_VECTORIZED_INIT(Half, half);
|
| 1691 |
+
|
| 1692 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 1693 |
+
|
| 1694 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 1695 |
+
#define LOAD_FP32_VECTORIZED_INIT(type, name) \
|
| 1696 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1697 |
+
auto values = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(data)); \
|
| 1698 |
+
__m512 out_values; \
|
| 1699 |
+
cvt_to_fp32<type>(values, out_values); \
|
| 1700 |
+
out = out_values; \
|
| 1701 |
+
} \
|
| 1702 |
+
\
|
| 1703 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1704 |
+
auto vec = Vectorized<type>::loadu(data); \
|
| 1705 |
+
__m512 out1_values, out2_values; \
|
| 1706 |
+
cvt_to_fp32<type>(vec, out1_values, out2_values); \
|
| 1707 |
+
out1 = out1_values; \
|
| 1708 |
+
out2 = out2_values; \
|
| 1709 |
+
}
|
| 1710 |
+
LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
|
| 1711 |
+
LOAD_FP32_VECTORIZED_INIT(Half, fp16);
|
| 1712 |
+
|
| 1713 |
+
#else // defined(CPU_CAPABILITY_AVX512)
|
| 1714 |
+
#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
|
| 1715 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1716 |
+
__at_align__ float values[Vectorized<float>::size()]; \
|
| 1717 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) { \
|
| 1718 |
+
values[k] = data[k]; \
|
| 1719 |
+
} \
|
| 1720 |
+
out = Vectorized<float>::loadu(values); \
|
| 1721 |
+
} \
|
| 1722 |
+
\
|
| 1723 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1724 |
+
load_fp32_from_##name(data, out1); \
|
| 1725 |
+
data += Vectorized<float>::size(); \
|
| 1726 |
+
load_fp32_from_##name(data, out2); \
|
| 1727 |
+
}
|
| 1728 |
+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
|
| 1729 |
+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
|
| 1730 |
+
|
| 1731 |
+
#endif
|
| 1732 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <c10/util/complex.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
template <> class Vectorized<c10::complex<double>> {
|
| 23 |
+
private:
|
| 24 |
+
__m512d values;
|
| 25 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 26 |
+
public:
|
| 27 |
+
using value_type = c10::complex<double>;
|
| 28 |
+
using size_type = int;
|
| 29 |
+
static constexpr size_type size() {
|
| 30 |
+
return 4;
|
| 31 |
+
}
|
| 32 |
+
Vectorized() {}
|
| 33 |
+
Vectorized(__m512d v) : values(v) {}
|
| 34 |
+
Vectorized(c10::complex<double> val) {
|
| 35 |
+
double real_value = val.real();
|
| 36 |
+
double imag_value = val.imag();
|
| 37 |
+
values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value,
|
| 38 |
+
real_value, imag_value, real_value, imag_value);
|
| 39 |
+
}
|
| 40 |
+
Vectorized(c10::complex<double> val1, c10::complex<double> val2,
|
| 41 |
+
c10::complex<double> val3, c10::complex<double> val4) {
|
| 42 |
+
values = _mm512_setr_pd(val1.real(), val1.imag(),
|
| 43 |
+
val2.real(), val2.imag(),
|
| 44 |
+
val3.real(), val3.imag(),
|
| 45 |
+
val4.real(), val4.imag());
|
| 46 |
+
}
|
| 47 |
+
operator __m512d() const {
|
| 48 |
+
return values;
|
| 49 |
+
}
|
| 50 |
+
template <int64_t mask>
|
| 51 |
+
static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a,
|
| 52 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 53 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 54 |
+
// NOLINTNEXTLINE(clang-diagnostic-warning)
|
| 55 |
+
switch (mask) {
|
| 56 |
+
case 0:
|
| 57 |
+
return a;
|
| 58 |
+
case 1:
|
| 59 |
+
return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011
|
| 60 |
+
case 2:
|
| 61 |
+
return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100
|
| 62 |
+
case 3:
|
| 63 |
+
return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111
|
| 64 |
+
case 4:
|
| 65 |
+
return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000
|
| 66 |
+
case 5:
|
| 67 |
+
return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011
|
| 68 |
+
case 6:
|
| 69 |
+
return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100
|
| 70 |
+
case 7:
|
| 71 |
+
return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111
|
| 72 |
+
case 8:
|
| 73 |
+
return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000
|
| 74 |
+
case 9:
|
| 75 |
+
return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011
|
| 76 |
+
case 10:
|
| 77 |
+
return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100
|
| 78 |
+
case 11:
|
| 79 |
+
return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111
|
| 80 |
+
case 12:
|
| 81 |
+
return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000
|
| 82 |
+
case 13:
|
| 83 |
+
return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011
|
| 84 |
+
case 14:
|
| 85 |
+
return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100
|
| 86 |
+
case 15:
|
| 87 |
+
return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111
|
| 88 |
+
}
|
| 89 |
+
return b;
|
| 90 |
+
}
|
| 91 |
+
static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a,
|
| 92 |
+
const Vectorized<c10::complex<double>>& b,
|
| 93 |
+
const Vectorized<c10::complex<double>>& mask) {
|
| 94 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 95 |
+
auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values);
|
| 96 |
+
auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
|
| 97 |
+
auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ);
|
| 98 |
+
return _mm512_mask_blend_pd(mmask, a.values, b.values);
|
| 99 |
+
}
|
| 100 |
+
template<typename step_t>
|
| 101 |
+
static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0.,
|
| 102 |
+
step_t step = static_cast<step_t>(1)) {
|
| 103 |
+
return Vectorized<c10::complex<double>>(base,
|
| 104 |
+
base + c10::complex<double>(1)*step,
|
| 105 |
+
base + c10::complex<double>(2)*step,
|
| 106 |
+
base + c10::complex<double>(3)*step);
|
| 107 |
+
}
|
| 108 |
+
static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a,
|
| 109 |
+
const Vectorized<c10::complex<double>>& b,
|
| 110 |
+
int64_t count = size()) {
|
| 111 |
+
switch (count) {
|
| 112 |
+
case 0:
|
| 113 |
+
return a;
|
| 114 |
+
case 1:
|
| 115 |
+
return blend<1>(a, b);
|
| 116 |
+
case 2:
|
| 117 |
+
return blend<3>(a, b);
|
| 118 |
+
case 3:
|
| 119 |
+
return blend<7>(a, b);
|
| 120 |
+
}
|
| 121 |
+
return b;
|
| 122 |
+
}
|
| 123 |
+
static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
|
| 124 |
+
if (count == size())
|
| 125 |
+
return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
|
| 126 |
+
|
| 127 |
+
__at_align__ double tmp_values[2*size()];
|
| 128 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 129 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 130 |
+
// instructions while a loop would be compiled to one instruction.
|
| 131 |
+
for (const auto i : c10::irange(2*size())) {
|
| 132 |
+
tmp_values[i] = 0.0;
|
| 133 |
+
}
|
| 134 |
+
std::memcpy(
|
| 135 |
+
tmp_values,
|
| 136 |
+
reinterpret_cast<const double*>(ptr),
|
| 137 |
+
count * sizeof(c10::complex<double>));
|
| 138 |
+
return _mm512_load_pd(tmp_values);
|
| 139 |
+
}
|
| 140 |
+
void store(void* ptr, int count = size()) const {
|
| 141 |
+
if (count == size()) {
|
| 142 |
+
_mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
|
| 143 |
+
} else if (count > 0) {
|
| 144 |
+
double tmp_values[2*size()];
|
| 145 |
+
_mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
|
| 146 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
const c10::complex<double>& operator[](int idx) const = delete;
|
| 150 |
+
c10::complex<double>& operator[](int idx) = delete;
|
| 151 |
+
Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
|
| 152 |
+
__at_align__ c10::complex<double> tmp[size()];
|
| 153 |
+
store(tmp);
|
| 154 |
+
for (const auto i : c10::irange(size())) {
|
| 155 |
+
tmp[i] = f(tmp[i]);
|
| 156 |
+
}
|
| 157 |
+
return loadu(tmp);
|
| 158 |
+
}
|
| 159 |
+
// AVX512 doesn't have horizontal add & horizontal sub instructions.
|
| 160 |
+
// TODO: hadd_pd() & hsub_pd() may have scope for improvement.
|
| 161 |
+
static inline __m512d hadd_pd(__m512d a, __m512d b) {
|
| 162 |
+
__m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
|
| 163 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
|
| 164 |
+
return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 165 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 166 |
+
}
|
| 167 |
+
static inline __m512d hsub_pd(__m512d a, __m512d b) {
|
| 168 |
+
__m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
|
| 169 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
|
| 170 |
+
return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 171 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 172 |
+
}
|
| 173 |
+
__m512d abs_2_() const {
|
| 174 |
+
auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
|
| 175 |
+
return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
|
| 176 |
+
}
|
| 177 |
+
__m512d abs_() const {
|
| 178 |
+
auto real = _mm512_movedup_pd(values); // real real
|
| 179 |
+
// movehdup_pd does not exist...
|
| 180 |
+
auto imag = _mm512_permute_pd(values, 0xff); // imag imag
|
| 181 |
+
return Sleef_hypotd8_u05(real, imag); // abs abs
|
| 182 |
+
}
|
| 183 |
+
Vectorized<c10::complex<double>> abs() const {
|
| 184 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 185 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 186 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 187 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 188 |
+
return _mm512_and_pd(abs_(), real_mask); // abs 0
|
| 189 |
+
}
|
| 190 |
+
__m512d angle_() const {
|
| 191 |
+
//angle = atan2(b/a)
|
| 192 |
+
auto b_a = _mm512_permute_pd(values, 0x55); // b a
|
| 193 |
+
return Sleef_atan2d8_u10(values, b_a); // 90-angle angle
|
| 194 |
+
}
|
| 195 |
+
Vectorized<c10::complex<double>> angle() const {
|
| 196 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 197 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 198 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 199 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 200 |
+
auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle
|
| 201 |
+
return _mm512_and_pd(angle, real_mask); // angle 0
|
| 202 |
+
}
|
| 203 |
+
Vectorized<c10::complex<double>> sgn() const {
|
| 204 |
+
auto abs = abs_();
|
| 205 |
+
auto zero = _mm512_setzero_pd();
|
| 206 |
+
auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ);
|
| 207 |
+
auto div = _mm512_div_pd(values, abs);
|
| 208 |
+
return _mm512_mask_blend_pd(mask, div, zero);
|
| 209 |
+
}
|
| 210 |
+
__m512d real_() const {
|
| 211 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 212 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 213 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 214 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 215 |
+
return _mm512_and_pd(values, real_mask);
|
| 216 |
+
}
|
| 217 |
+
Vectorized<c10::complex<double>> real() const {
|
| 218 |
+
return real_();
|
| 219 |
+
}
|
| 220 |
+
__m512d imag_() const {
|
| 221 |
+
const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 222 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 223 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 224 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
|
| 225 |
+
return _mm512_and_pd(values, imag_mask);
|
| 226 |
+
}
|
| 227 |
+
Vectorized<c10::complex<double>> imag() const {
|
| 228 |
+
return _mm512_permute_pd(imag_(), 0x55); //b a
|
| 229 |
+
}
|
| 230 |
+
__m512d conj_() const {
|
| 231 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 232 |
+
return _mm512_xor_pd(values, sign_mask); // a -b
|
| 233 |
+
}
|
| 234 |
+
Vectorized<c10::complex<double>> conj() const {
|
| 235 |
+
return conj_();
|
| 236 |
+
}
|
| 237 |
+
Vectorized<c10::complex<double>> log() const {
|
| 238 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
| 239 |
+
return map(std::log);
|
| 240 |
+
}
|
| 241 |
+
Vectorized<c10::complex<double>> log2() const {
|
| 242 |
+
const __m512d log2_ = _mm512_set1_pd(std::log(2));
|
| 243 |
+
return _mm512_div_pd(log(), log2_);
|
| 244 |
+
}
|
| 245 |
+
Vectorized<c10::complex<double>> log10() const {
|
| 246 |
+
const __m512d log10_ = _mm512_set1_pd(std::log(10));
|
| 247 |
+
return _mm512_div_pd(log(), log10_);
|
| 248 |
+
}
|
| 249 |
+
Vectorized<c10::complex<double>> log1p() const {
|
| 250 |
+
return map(std::log1p);
|
| 251 |
+
}
|
| 252 |
+
Vectorized<c10::complex<double>> asin() const {
|
| 253 |
+
// asin(x)
|
| 254 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 255 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 256 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 257 |
+
const __m512d one = _mm512_set1_pd(1);
|
| 258 |
+
|
| 259 |
+
auto conj = conj_();
|
| 260 |
+
auto b_a = _mm512_permute_pd(conj, 0x55); //-b a
|
| 261 |
+
auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab
|
| 262 |
+
auto im = _mm512_add_pd(ab, ab); //-2ab -2ab
|
| 263 |
+
|
| 264 |
+
auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
|
| 265 |
+
auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a
|
| 266 |
+
re = _mm512_sub_pd(one, re);
|
| 267 |
+
|
| 268 |
+
auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im)
|
| 269 |
+
auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt())
|
| 270 |
+
return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln()
|
| 271 |
+
}
|
| 272 |
+
Vectorized<c10::complex<double>> acos() const {
|
| 273 |
+
// acos(x) = pi/2 - asin(x)
|
| 274 |
+
constexpr auto pi_2d = c10::pi<double> / 2;
|
| 275 |
+
const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0);
|
| 276 |
+
return _mm512_sub_pd(pi_2, asin());
|
| 277 |
+
}
|
| 278 |
+
Vectorized<c10::complex<double>> atan() const;
|
| 279 |
+
Vectorized<c10::complex<double>> atanh() const {
|
| 280 |
+
return map(std::atanh);
|
| 281 |
+
}
|
| 282 |
+
Vectorized<c10::complex<double>> exp() const {
|
| 283 |
+
//exp(a + bi)
|
| 284 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
| 285 |
+
auto exp = Sleef_expd8_u10(values); //exp(a) exp(b)
|
| 286 |
+
exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a)
|
| 287 |
+
|
| 288 |
+
auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
| 289 |
+
auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55),
|
| 290 |
+
sin_cos.x); //cos(b) sin(b)
|
| 291 |
+
return _mm512_mul_pd(exp, cos_sin);
|
| 292 |
+
}
|
| 293 |
+
Vectorized<c10::complex<double>> exp2() const {
|
| 294 |
+
// Use identity 2**x = exp(log(2) * x)
|
| 295 |
+
const __m512d ln_2 = _mm512_set1_pd(c10::ln_2<double>);
|
| 296 |
+
Vectorized<c10::complex<double>> scaled_values = _mm512_mul_pd(values, ln_2);
|
| 297 |
+
return scaled_values.exp();
|
| 298 |
+
}
|
| 299 |
+
Vectorized<c10::complex<double>> expm1() const {
|
| 300 |
+
return map(std::expm1);
|
| 301 |
+
}
|
| 302 |
+
Vectorized<c10::complex<double>> sin() const {
|
| 303 |
+
return map(std::sin);
|
| 304 |
+
}
|
| 305 |
+
Vectorized<c10::complex<double>> sinh() const {
|
| 306 |
+
return map(std::sinh);
|
| 307 |
+
}
|
| 308 |
+
Vectorized<c10::complex<double>> cos() const {
|
| 309 |
+
return map(std::cos);
|
| 310 |
+
}
|
| 311 |
+
Vectorized<c10::complex<double>> cosh() const {
|
| 312 |
+
return map(std::cosh);
|
| 313 |
+
}
|
| 314 |
+
Vectorized<c10::complex<double>> ceil() const {
|
| 315 |
+
return _mm512_ceil_pd(values);
|
| 316 |
+
}
|
| 317 |
+
Vectorized<c10::complex<double>> floor() const {
|
| 318 |
+
return _mm512_floor_pd(values);
|
| 319 |
+
}
|
| 320 |
+
Vectorized<c10::complex<double>> neg() const {
|
| 321 |
+
auto zero = _mm512_setzero_pd();
|
| 322 |
+
return _mm512_sub_pd(zero, values);
|
| 323 |
+
}
|
| 324 |
+
Vectorized<c10::complex<double>> round() const {
|
| 325 |
+
return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 326 |
+
}
|
| 327 |
+
Vectorized<c10::complex<double>> tan() const {
|
| 328 |
+
return map(std::tan);
|
| 329 |
+
}
|
| 330 |
+
Vectorized<c10::complex<double>> tanh() const {
|
| 331 |
+
return map(std::tanh);
|
| 332 |
+
}
|
| 333 |
+
Vectorized<c10::complex<double>> trunc() const {
|
| 334 |
+
return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 335 |
+
}
|
| 336 |
+
Vectorized<c10::complex<double>> sqrt() const {
|
| 337 |
+
return map(std::sqrt);
|
| 338 |
+
}
|
| 339 |
+
Vectorized<c10::complex<double>> reciprocal() const;
|
| 340 |
+
Vectorized<c10::complex<double>> rsqrt() const {
|
| 341 |
+
return sqrt().reciprocal();
|
| 342 |
+
}
|
| 343 |
+
Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
|
| 344 |
+
__at_align__ c10::complex<double> x_tmp[size()];
|
| 345 |
+
__at_align__ c10::complex<double> y_tmp[size()];
|
| 346 |
+
store(x_tmp);
|
| 347 |
+
exp.store(y_tmp);
|
| 348 |
+
for (const auto i : c10::irange(size())) {
|
| 349 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 350 |
+
}
|
| 351 |
+
return loadu(x_tmp);
|
| 352 |
+
}
|
| 353 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 354 |
+
// `O`: get false if an operand is NaN
|
| 355 |
+
// `Q`: do not raise if an operand is NaN
|
| 356 |
+
Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
|
| 357 |
+
auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
|
| 358 |
+
return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
|
| 359 |
+
0xFFFFFFFFFFFFFFFF));
|
| 360 |
+
}
|
| 361 |
+
Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
|
| 362 |
+
auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
|
| 363 |
+
return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
|
| 364 |
+
0xFFFFFFFFFFFFFFFF));
|
| 365 |
+
}
|
| 366 |
+
Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const {
|
| 367 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 368 |
+
}
|
| 369 |
+
Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const {
|
| 370 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 371 |
+
}
|
| 372 |
+
Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const {
|
| 373 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 374 |
+
}
|
| 375 |
+
Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other [[maybe_unused]]) const {
|
| 376 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
|
| 380 |
+
Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
|
| 381 |
+
};
|
| 382 |
+
|
| 383 |
+
template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a,
|
| 384 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 385 |
+
return _mm512_add_pd(a, b);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a,
|
| 389 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 390 |
+
return _mm512_sub_pd(a, b);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a,
|
| 394 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 395 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 396 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 397 |
+
auto ac_bd = _mm512_mul_pd(a, b); //ac bd
|
| 398 |
+
|
| 399 |
+
auto d_c = _mm512_permute_pd(b, 0x55); //d c
|
| 400 |
+
d_c = _mm512_xor_pd(sign_mask, d_c); //d -c
|
| 401 |
+
auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc
|
| 402 |
+
|
| 403 |
+
auto ret = Vectorized<c10::complex<double>>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
|
| 404 |
+
return ret;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a,
|
| 408 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 409 |
+
//re + im*i = (a + bi) / (c + di)
|
| 410 |
+
auto mask = _mm512_set1_pd(-0.f);
|
| 411 |
+
auto fabs_cd = _mm512_andnot_pd(mask, b); // |c| |d|
|
| 412 |
+
auto fabs_dc = _mm512_permute_pd(fabs_cd, 0x55); // |d| |c|
|
| 413 |
+
auto scale = _mm512_rcp14_pd(_mm512_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
| 414 |
+
auto a2 = _mm512_mul_pd(a, scale); // a/sc b/sc
|
| 415 |
+
auto b2 = _mm512_mul_pd(b, scale); // c/sc d/sc
|
| 416 |
+
auto acbd2 = _mm512_mul_pd(a2, b2);
|
| 417 |
+
|
| 418 |
+
const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
|
| 419 |
+
auto dc2 = _mm512_permute_pd(b2, 0x55); // d/sc c/sc
|
| 420 |
+
dc2 = _mm512_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
|
| 421 |
+
auto adbc2 = _mm512_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
|
| 422 |
+
auto res2 = Vectorized<c10::complex<double>>::hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
| 423 |
+
|
| 424 |
+
// get the denominator
|
| 425 |
+
auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 426 |
+
res2 = _mm512_div_pd(res2, denom2);
|
| 427 |
+
return res2;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
// reciprocal. Implement this here so we can use multiplication.
|
| 431 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
|
| 432 |
+
//re + im*i = (a + bi) / (c + di)
|
| 433 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
| 434 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
| 435 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 436 |
+
auto c_d = _mm512_xor_pd(sign_mask, values); //c -d
|
| 437 |
+
return _mm512_div_pd(c_d, abs_2_());
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
|
| 441 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 442 |
+
const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
|
| 443 |
+
const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
|
| 444 |
+
|
| 445 |
+
auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b
|
| 446 |
+
auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b
|
| 447 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
| 448 |
+
return i_half*ln; // i/2*ln()
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
template <>
|
| 452 |
+
Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a,
|
| 453 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 454 |
+
auto zero_vec = _mm512_set1_epi64(0);
|
| 455 |
+
auto abs_a = a.abs_2_();
|
| 456 |
+
auto abs_b = b.abs_2_();
|
| 457 |
+
auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ);
|
| 458 |
+
auto max = _mm512_mask_blend_pd(mask, a, b);
|
| 459 |
+
// Exploit the fact that all-ones is a NaN.
|
| 460 |
+
auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 461 |
+
auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
|
| 462 |
+
0xFFFFFFFFFFFFFFFF);
|
| 463 |
+
return _mm512_or_pd(max, _mm512_castsi512_pd(isnan));
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
template <>
|
| 467 |
+
Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a,
|
| 468 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 469 |
+
auto zero_vec = _mm512_set1_epi64(0);
|
| 470 |
+
auto abs_a = a.abs_2_();
|
| 471 |
+
auto abs_b = b.abs_2_();
|
| 472 |
+
auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ);
|
| 473 |
+
auto min = _mm512_mask_blend_pd(mask, a, b);
|
| 474 |
+
// Exploit the fact that all-ones is a NaN.
|
| 475 |
+
auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 476 |
+
auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
|
| 477 |
+
0xFFFFFFFFFFFFFFFF);
|
| 478 |
+
return _mm512_or_pd(min, _mm512_castsi512_pd(isnan));
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template <>
|
| 482 |
+
Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a,
|
| 483 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 484 |
+
return _mm512_and_pd(a, b);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
template <>
|
| 488 |
+
Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a,
|
| 489 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 490 |
+
return _mm512_or_pd(a, b);
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
template <>
|
| 494 |
+
Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a,
|
| 495 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 496 |
+
return _mm512_xor_pd(a, b);
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
|
| 500 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 501 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 502 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
|
| 506 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 507 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 508 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
#endif
|
| 512 |
+
|
| 513 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h
ADDED
|
@@ -0,0 +1,1019 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <c10/util/complex.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
template <> class Vectorized<c10::complex<float>> {
|
| 23 |
+
private:
|
| 24 |
+
__m512 values;
|
| 25 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 26 |
+
public:
|
| 27 |
+
using value_type = c10::complex<float>;
|
| 28 |
+
using size_type = int;
|
| 29 |
+
static constexpr size_type size() {
|
| 30 |
+
return 8;
|
| 31 |
+
}
|
| 32 |
+
Vectorized() {}
|
| 33 |
+
Vectorized(__m512 v) : values(v) {}
|
| 34 |
+
Vectorized(c10::complex<float> val) {
|
| 35 |
+
float real_value = val.real();
|
| 36 |
+
float imag_value = val.imag();
|
| 37 |
+
values = _mm512_setr_ps(real_value, imag_value,
|
| 38 |
+
real_value, imag_value,
|
| 39 |
+
real_value, imag_value,
|
| 40 |
+
real_value, imag_value,
|
| 41 |
+
real_value, imag_value,
|
| 42 |
+
real_value, imag_value,
|
| 43 |
+
real_value, imag_value,
|
| 44 |
+
real_value, imag_value);
|
| 45 |
+
}
|
| 46 |
+
Vectorized(c10::complex<float> val1, c10::complex<float> val2,
|
| 47 |
+
c10::complex<float> val3, c10::complex<float> val4,
|
| 48 |
+
c10::complex<float> val5, c10::complex<float> val6,
|
| 49 |
+
c10::complex<float> val7, c10::complex<float> val8) {
|
| 50 |
+
values = _mm512_setr_ps(val1.real(), val1.imag(),
|
| 51 |
+
val2.real(), val2.imag(),
|
| 52 |
+
val3.real(), val3.imag(),
|
| 53 |
+
val4.real(), val4.imag(),
|
| 54 |
+
val5.real(), val5.imag(),
|
| 55 |
+
val6.real(), val6.imag(),
|
| 56 |
+
val7.real(), val7.imag(),
|
| 57 |
+
val8.real(), val8.imag());
|
| 58 |
+
}
|
| 59 |
+
operator __m512() const {
|
| 60 |
+
return values;
|
| 61 |
+
}
|
| 62 |
+
template <int64_t mask>
|
| 63 |
+
static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a,
|
| 64 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 65 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 66 |
+
static_assert(mask > -1 && mask < 256, "Unexpected mask value");
|
| 67 |
+
// The compiler would hopefully convert this switch condition
|
| 68 |
+
// into a jump table
|
| 69 |
+
switch (mask) {
|
| 70 |
+
case 0:
|
| 71 |
+
return a;
|
| 72 |
+
case 1:
|
| 73 |
+
return _mm512_mask_blend_ps(0x03, a.values, b.values);
|
| 74 |
+
case 2:
|
| 75 |
+
return _mm512_mask_blend_ps(0x0C, a.values, b.values);
|
| 76 |
+
case 3:
|
| 77 |
+
return _mm512_mask_blend_ps(0x0F, a.values, b.values);
|
| 78 |
+
case 4:
|
| 79 |
+
return _mm512_mask_blend_ps(0x30, a.values, b.values);
|
| 80 |
+
case 5:
|
| 81 |
+
return _mm512_mask_blend_ps(0x33, a.values, b.values);
|
| 82 |
+
case 6:
|
| 83 |
+
return _mm512_mask_blend_ps(0x3C, a.values, b.values);
|
| 84 |
+
case 7:
|
| 85 |
+
return _mm512_mask_blend_ps(0x3F, a.values, b.values);
|
| 86 |
+
case 8:
|
| 87 |
+
return _mm512_mask_blend_ps(0xC0, a.values, b.values);
|
| 88 |
+
case 9:
|
| 89 |
+
return _mm512_mask_blend_ps(0xC3, a.values, b.values);
|
| 90 |
+
case 10:
|
| 91 |
+
return _mm512_mask_blend_ps(0xCC, a.values, b.values);
|
| 92 |
+
case 11:
|
| 93 |
+
return _mm512_mask_blend_ps(0xCF, a.values, b.values);
|
| 94 |
+
case 12:
|
| 95 |
+
return _mm512_mask_blend_ps(0xF0, a.values, b.values);
|
| 96 |
+
case 13:
|
| 97 |
+
return _mm512_mask_blend_ps(0xF3, a.values, b.values);
|
| 98 |
+
case 14:
|
| 99 |
+
return _mm512_mask_blend_ps(0xFC, a.values, b.values);
|
| 100 |
+
case 15:
|
| 101 |
+
return _mm512_mask_blend_ps(0xFF, a.values, b.values);
|
| 102 |
+
case 16:
|
| 103 |
+
return _mm512_mask_blend_ps(0x300, a.values, b.values);
|
| 104 |
+
case 17:
|
| 105 |
+
return _mm512_mask_blend_ps(0x303, a.values, b.values);
|
| 106 |
+
case 18:
|
| 107 |
+
return _mm512_mask_blend_ps(0x30C, a.values, b.values);
|
| 108 |
+
case 19:
|
| 109 |
+
return _mm512_mask_blend_ps(0x30F, a.values, b.values);
|
| 110 |
+
case 20:
|
| 111 |
+
return _mm512_mask_blend_ps(0x330, a.values, b.values);
|
| 112 |
+
case 21:
|
| 113 |
+
return _mm512_mask_blend_ps(0x333, a.values, b.values);
|
| 114 |
+
case 22:
|
| 115 |
+
return _mm512_mask_blend_ps(0x33C, a.values, b.values);
|
| 116 |
+
case 23:
|
| 117 |
+
return _mm512_mask_blend_ps(0x33F, a.values, b.values);
|
| 118 |
+
case 24:
|
| 119 |
+
return _mm512_mask_blend_ps(0x3C0, a.values, b.values);
|
| 120 |
+
case 25:
|
| 121 |
+
return _mm512_mask_blend_ps(0x3C3, a.values, b.values);
|
| 122 |
+
case 26:
|
| 123 |
+
return _mm512_mask_blend_ps(0x3CC, a.values, b.values);
|
| 124 |
+
case 27:
|
| 125 |
+
return _mm512_mask_blend_ps(0x3CF, a.values, b.values);
|
| 126 |
+
case 28:
|
| 127 |
+
return _mm512_mask_blend_ps(0x3F0, a.values, b.values);
|
| 128 |
+
case 29:
|
| 129 |
+
return _mm512_mask_blend_ps(0x3F3, a.values, b.values);
|
| 130 |
+
case 30:
|
| 131 |
+
return _mm512_mask_blend_ps(0x3FC, a.values, b.values);
|
| 132 |
+
case 31:
|
| 133 |
+
return _mm512_mask_blend_ps(0x3FF, a.values, b.values);
|
| 134 |
+
case 32:
|
| 135 |
+
return _mm512_mask_blend_ps(0xC00, a.values, b.values);
|
| 136 |
+
case 33:
|
| 137 |
+
return _mm512_mask_blend_ps(0xC03, a.values, b.values);
|
| 138 |
+
case 34:
|
| 139 |
+
return _mm512_mask_blend_ps(0xC0C, a.values, b.values);
|
| 140 |
+
case 35:
|
| 141 |
+
return _mm512_mask_blend_ps(0xC0F, a.values, b.values);
|
| 142 |
+
case 36:
|
| 143 |
+
return _mm512_mask_blend_ps(0xC30, a.values, b.values);
|
| 144 |
+
case 37:
|
| 145 |
+
return _mm512_mask_blend_ps(0xC33, a.values, b.values);
|
| 146 |
+
case 38:
|
| 147 |
+
return _mm512_mask_blend_ps(0xC3C, a.values, b.values);
|
| 148 |
+
case 39:
|
| 149 |
+
return _mm512_mask_blend_ps(0xC3F, a.values, b.values);
|
| 150 |
+
case 40:
|
| 151 |
+
return _mm512_mask_blend_ps(0xCC0, a.values, b.values);
|
| 152 |
+
case 41:
|
| 153 |
+
return _mm512_mask_blend_ps(0xCC3, a.values, b.values);
|
| 154 |
+
case 42:
|
| 155 |
+
return _mm512_mask_blend_ps(0xCCC, a.values, b.values);
|
| 156 |
+
case 43:
|
| 157 |
+
return _mm512_mask_blend_ps(0xCCF, a.values, b.values);
|
| 158 |
+
case 44:
|
| 159 |
+
return _mm512_mask_blend_ps(0xCF0, a.values, b.values);
|
| 160 |
+
case 45:
|
| 161 |
+
return _mm512_mask_blend_ps(0xCF3, a.values, b.values);
|
| 162 |
+
case 46:
|
| 163 |
+
return _mm512_mask_blend_ps(0xCFC, a.values, b.values);
|
| 164 |
+
case 47:
|
| 165 |
+
return _mm512_mask_blend_ps(0xCFF, a.values, b.values);
|
| 166 |
+
case 48:
|
| 167 |
+
return _mm512_mask_blend_ps(0xF00, a.values, b.values);
|
| 168 |
+
case 49:
|
| 169 |
+
return _mm512_mask_blend_ps(0xF03, a.values, b.values);
|
| 170 |
+
case 50:
|
| 171 |
+
return _mm512_mask_blend_ps(0xF0C, a.values, b.values);
|
| 172 |
+
case 51:
|
| 173 |
+
return _mm512_mask_blend_ps(0xF0F, a.values, b.values);
|
| 174 |
+
case 52:
|
| 175 |
+
return _mm512_mask_blend_ps(0xF30, a.values, b.values);
|
| 176 |
+
case 53:
|
| 177 |
+
return _mm512_mask_blend_ps(0xF33, a.values, b.values);
|
| 178 |
+
case 54:
|
| 179 |
+
return _mm512_mask_blend_ps(0xF3C, a.values, b.values);
|
| 180 |
+
case 55:
|
| 181 |
+
return _mm512_mask_blend_ps(0xF3F, a.values, b.values);
|
| 182 |
+
case 56:
|
| 183 |
+
return _mm512_mask_blend_ps(0xFC0, a.values, b.values);
|
| 184 |
+
case 57:
|
| 185 |
+
return _mm512_mask_blend_ps(0xFC3, a.values, b.values);
|
| 186 |
+
case 58:
|
| 187 |
+
return _mm512_mask_blend_ps(0xFCC, a.values, b.values);
|
| 188 |
+
case 59:
|
| 189 |
+
return _mm512_mask_blend_ps(0xFCF, a.values, b.values);
|
| 190 |
+
case 60:
|
| 191 |
+
return _mm512_mask_blend_ps(0xFF0, a.values, b.values);
|
| 192 |
+
case 61:
|
| 193 |
+
return _mm512_mask_blend_ps(0xFF3, a.values, b.values);
|
| 194 |
+
case 62:
|
| 195 |
+
return _mm512_mask_blend_ps(0xFFC, a.values, b.values);
|
| 196 |
+
case 63:
|
| 197 |
+
return _mm512_mask_blend_ps(0xFFF, a.values, b.values);
|
| 198 |
+
case 64:
|
| 199 |
+
return _mm512_mask_blend_ps(0x3000, a.values, b.values);
|
| 200 |
+
case 65:
|
| 201 |
+
return _mm512_mask_blend_ps(0x3003, a.values, b.values);
|
| 202 |
+
case 66:
|
| 203 |
+
return _mm512_mask_blend_ps(0x300C, a.values, b.values);
|
| 204 |
+
case 67:
|
| 205 |
+
return _mm512_mask_blend_ps(0x300F, a.values, b.values);
|
| 206 |
+
case 68:
|
| 207 |
+
return _mm512_mask_blend_ps(0x3030, a.values, b.values);
|
| 208 |
+
case 69:
|
| 209 |
+
return _mm512_mask_blend_ps(0x3033, a.values, b.values);
|
| 210 |
+
case 70:
|
| 211 |
+
return _mm512_mask_blend_ps(0x303C, a.values, b.values);
|
| 212 |
+
case 71:
|
| 213 |
+
return _mm512_mask_blend_ps(0x303F, a.values, b.values);
|
| 214 |
+
case 72:
|
| 215 |
+
return _mm512_mask_blend_ps(0x30C0, a.values, b.values);
|
| 216 |
+
case 73:
|
| 217 |
+
return _mm512_mask_blend_ps(0X30C3, a.values, b.values);
|
| 218 |
+
case 74:
|
| 219 |
+
return _mm512_mask_blend_ps(0x30CC, a.values, b.values);
|
| 220 |
+
case 75:
|
| 221 |
+
return _mm512_mask_blend_ps(0x30CF, a.values, b.values);
|
| 222 |
+
case 76:
|
| 223 |
+
return _mm512_mask_blend_ps(0x30F0, a.values, b.values);
|
| 224 |
+
case 77:
|
| 225 |
+
return _mm512_mask_blend_ps(0x30F3, a.values, b.values);
|
| 226 |
+
case 78:
|
| 227 |
+
return _mm512_mask_blend_ps(0x30FC, a.values, b.values);
|
| 228 |
+
case 79:
|
| 229 |
+
return _mm512_mask_blend_ps(0x30FF, a.values, b.values);
|
| 230 |
+
case 80:
|
| 231 |
+
return _mm512_mask_blend_ps(0x3300, a.values, b.values);
|
| 232 |
+
case 81:
|
| 233 |
+
return _mm512_mask_blend_ps(0X3303, a.values, b.values);
|
| 234 |
+
case 82:
|
| 235 |
+
return _mm512_mask_blend_ps(0x330C, a.values, b.values);
|
| 236 |
+
case 83:
|
| 237 |
+
return _mm512_mask_blend_ps(0x330F, a.values, b.values);
|
| 238 |
+
case 84:
|
| 239 |
+
return _mm512_mask_blend_ps(0x3330, a.values, b.values);
|
| 240 |
+
case 85:
|
| 241 |
+
return _mm512_mask_blend_ps(0x3333, a.values, b.values);
|
| 242 |
+
case 86:
|
| 243 |
+
return _mm512_mask_blend_ps(0x333C, a.values, b.values);
|
| 244 |
+
case 87:
|
| 245 |
+
return _mm512_mask_blend_ps(0X333F, a.values, b.values);
|
| 246 |
+
case 88:
|
| 247 |
+
return _mm512_mask_blend_ps(0x33C0, a.values, b.values);
|
| 248 |
+
case 89:
|
| 249 |
+
return _mm512_mask_blend_ps(0x33C3, a.values, b.values);
|
| 250 |
+
case 90:
|
| 251 |
+
return _mm512_mask_blend_ps(0x33CC, a.values, b.values);
|
| 252 |
+
case 91:
|
| 253 |
+
return _mm512_mask_blend_ps(0x33CF, a.values, b.values);
|
| 254 |
+
case 92:
|
| 255 |
+
return _mm512_mask_blend_ps(0x33F0, a.values, b.values);
|
| 256 |
+
case 93:
|
| 257 |
+
return _mm512_mask_blend_ps(0x33F3, a.values, b.values);
|
| 258 |
+
case 94:
|
| 259 |
+
return _mm512_mask_blend_ps(0x33FC, a.values, b.values);
|
| 260 |
+
case 95:
|
| 261 |
+
return _mm512_mask_blend_ps(0x33FF, a.values, b.values);
|
| 262 |
+
case 96:
|
| 263 |
+
return _mm512_mask_blend_ps(0X3C00, a.values, b.values);
|
| 264 |
+
case 97:
|
| 265 |
+
return _mm512_mask_blend_ps(0x3C03, a.values, b.values);
|
| 266 |
+
case 98:
|
| 267 |
+
return _mm512_mask_blend_ps(0x3C0C, a.values, b.values);
|
| 268 |
+
case 99:
|
| 269 |
+
return _mm512_mask_blend_ps(0x3C0F, a.values, b.values);
|
| 270 |
+
case 100:
|
| 271 |
+
return _mm512_mask_blend_ps(0x3C30, a.values, b.values);
|
| 272 |
+
case 101:
|
| 273 |
+
return _mm512_mask_blend_ps(0x3C33, a.values, b.values);
|
| 274 |
+
case 102:
|
| 275 |
+
return _mm512_mask_blend_ps(0x3C3C, a.values, b.values);
|
| 276 |
+
case 103:
|
| 277 |
+
return _mm512_mask_blend_ps(0x3C3F, a.values, b.values);
|
| 278 |
+
case 104:
|
| 279 |
+
return _mm512_mask_blend_ps(0x3CC0, a.values, b.values);
|
| 280 |
+
case 105:
|
| 281 |
+
return _mm512_mask_blend_ps(0x3CC3, a.values, b.values);
|
| 282 |
+
case 106:
|
| 283 |
+
return _mm512_mask_blend_ps(0x3CCC, a.values, b.values);
|
| 284 |
+
case 107:
|
| 285 |
+
return _mm512_mask_blend_ps(0x3CCF, a.values, b.values);
|
| 286 |
+
case 108:
|
| 287 |
+
return _mm512_mask_blend_ps(0x3CF0, a.values, b.values);
|
| 288 |
+
case 109:
|
| 289 |
+
return _mm512_mask_blend_ps(0x3CF3, a.values, b.values);
|
| 290 |
+
case 110:
|
| 291 |
+
return _mm512_mask_blend_ps(0x3CFC, a.values, b.values);
|
| 292 |
+
case 111:
|
| 293 |
+
return _mm512_mask_blend_ps(0x3CFF, a.values, b.values);
|
| 294 |
+
case 112:
|
| 295 |
+
return _mm512_mask_blend_ps(0x3F00, a.values, b.values);
|
| 296 |
+
case 113:
|
| 297 |
+
return _mm512_mask_blend_ps(0x3F03, a.values, b.values);
|
| 298 |
+
case 114:
|
| 299 |
+
return _mm512_mask_blend_ps(0x3F0C, a.values, b.values);
|
| 300 |
+
case 115:
|
| 301 |
+
return _mm512_mask_blend_ps(0x3F0F, a.values, b.values);
|
| 302 |
+
case 116:
|
| 303 |
+
return _mm512_mask_blend_ps(0x3F30, a.values, b.values);
|
| 304 |
+
case 117:
|
| 305 |
+
return _mm512_mask_blend_ps(0x3F33, a.values, b.values);
|
| 306 |
+
case 118:
|
| 307 |
+
return _mm512_mask_blend_ps(0x3F3C, a.values, b.values);
|
| 308 |
+
case 119:
|
| 309 |
+
return _mm512_mask_blend_ps(0x3F3F, a.values, b.values);
|
| 310 |
+
case 120:
|
| 311 |
+
return _mm512_mask_blend_ps(0x3FC0, a.values, b.values);
|
| 312 |
+
case 121:
|
| 313 |
+
return _mm512_mask_blend_ps(0x3FC3, a.values, b.values);
|
| 314 |
+
case 122:
|
| 315 |
+
return _mm512_mask_blend_ps(0x3FCC, a.values, b.values);
|
| 316 |
+
case 123:
|
| 317 |
+
return _mm512_mask_blend_ps(0x3FCF, a.values, b.values);
|
| 318 |
+
case 124:
|
| 319 |
+
return _mm512_mask_blend_ps(0x3FF0, a.values, b.values);
|
| 320 |
+
case 125:
|
| 321 |
+
return _mm512_mask_blend_ps(0x3FF3, a.values, b.values);
|
| 322 |
+
case 126:
|
| 323 |
+
return _mm512_mask_blend_ps(0x3FFC, a.values, b.values);
|
| 324 |
+
case 127:
|
| 325 |
+
return _mm512_mask_blend_ps(0x3FFF, a.values, b.values);
|
| 326 |
+
case 128:
|
| 327 |
+
return _mm512_mask_blend_ps(0xC000, a.values, b.values);
|
| 328 |
+
case 129:
|
| 329 |
+
return _mm512_mask_blend_ps(0xC003, a.values, b.values);
|
| 330 |
+
case 130:
|
| 331 |
+
return _mm512_mask_blend_ps(0xC00C, a.values, b.values);
|
| 332 |
+
case 131:
|
| 333 |
+
return _mm512_mask_blend_ps(0xC00F, a.values, b.values);
|
| 334 |
+
case 132:
|
| 335 |
+
return _mm512_mask_blend_ps(0xC030, a.values, b.values);
|
| 336 |
+
case 133:
|
| 337 |
+
return _mm512_mask_blend_ps(0xC033, a.values, b.values);
|
| 338 |
+
case 134:
|
| 339 |
+
return _mm512_mask_blend_ps(0xC03C, a.values, b.values);
|
| 340 |
+
case 135:
|
| 341 |
+
return _mm512_mask_blend_ps(0xC03F, a.values, b.values);
|
| 342 |
+
case 136:
|
| 343 |
+
return _mm512_mask_blend_ps(0xC0C0, a.values, b.values);
|
| 344 |
+
case 137:
|
| 345 |
+
return _mm512_mask_blend_ps(0xC0C3, a.values, b.values);
|
| 346 |
+
case 138:
|
| 347 |
+
return _mm512_mask_blend_ps(0xC0CC, a.values, b.values);
|
| 348 |
+
case 139:
|
| 349 |
+
return _mm512_mask_blend_ps(0xC0CF, a.values, b.values);
|
| 350 |
+
case 140:
|
| 351 |
+
return _mm512_mask_blend_ps(0xC0F0, a.values, b.values);
|
| 352 |
+
case 141:
|
| 353 |
+
return _mm512_mask_blend_ps(0xC0F3, a.values, b.values);
|
| 354 |
+
case 142:
|
| 355 |
+
return _mm512_mask_blend_ps(0xC0FC, a.values, b.values);
|
| 356 |
+
case 143:
|
| 357 |
+
return _mm512_mask_blend_ps(0xC0FF, a.values, b.values);
|
| 358 |
+
case 144:
|
| 359 |
+
return _mm512_mask_blend_ps(0xC300, a.values, b.values);
|
| 360 |
+
case 145:
|
| 361 |
+
return _mm512_mask_blend_ps(0xC303, a.values, b.values);
|
| 362 |
+
case 146:
|
| 363 |
+
return _mm512_mask_blend_ps(0xC30C, a.values, b.values);
|
| 364 |
+
case 147:
|
| 365 |
+
return _mm512_mask_blend_ps(0xC30F, a.values, b.values);
|
| 366 |
+
case 148:
|
| 367 |
+
return _mm512_mask_blend_ps(0xC330, a.values, b.values);
|
| 368 |
+
case 149:
|
| 369 |
+
return _mm512_mask_blend_ps(0xC333, a.values, b.values);
|
| 370 |
+
case 150:
|
| 371 |
+
return _mm512_mask_blend_ps(0xC33C, a.values, b.values);
|
| 372 |
+
case 151:
|
| 373 |
+
return _mm512_mask_blend_ps(0xC33F, a.values, b.values);
|
| 374 |
+
case 152:
|
| 375 |
+
return _mm512_mask_blend_ps(0xC3C0, a.values, b.values);
|
| 376 |
+
case 153:
|
| 377 |
+
return _mm512_mask_blend_ps(0xC3C3, a.values, b.values);
|
| 378 |
+
case 154:
|
| 379 |
+
return _mm512_mask_blend_ps(0xC3CC, a.values, b.values);
|
| 380 |
+
case 155:
|
| 381 |
+
return _mm512_mask_blend_ps(0xC3CF, a.values, b.values);
|
| 382 |
+
case 156:
|
| 383 |
+
return _mm512_mask_blend_ps(0xC3F0, a.values, b.values);
|
| 384 |
+
case 157:
|
| 385 |
+
return _mm512_mask_blend_ps(0xC3F3, a.values, b.values);
|
| 386 |
+
case 158:
|
| 387 |
+
return _mm512_mask_blend_ps(0xC3FC, a.values, b.values);
|
| 388 |
+
case 159:
|
| 389 |
+
return _mm512_mask_blend_ps(0xC3FF, a.values, b.values);
|
| 390 |
+
case 160:
|
| 391 |
+
return _mm512_mask_blend_ps(0xCC00, a.values, b.values);
|
| 392 |
+
case 161:
|
| 393 |
+
return _mm512_mask_blend_ps(0xCC03, a.values, b.values);
|
| 394 |
+
case 162:
|
| 395 |
+
return _mm512_mask_blend_ps(0xCC0C, a.values, b.values);
|
| 396 |
+
case 163:
|
| 397 |
+
return _mm512_mask_blend_ps(0xCC0F, a.values, b.values);
|
| 398 |
+
case 164:
|
| 399 |
+
return _mm512_mask_blend_ps(0xCC30, a.values, b.values);
|
| 400 |
+
case 165:
|
| 401 |
+
return _mm512_mask_blend_ps(0xCC33, a.values, b.values);
|
| 402 |
+
case 166:
|
| 403 |
+
return _mm512_mask_blend_ps(0xCC3C, a.values, b.values);
|
| 404 |
+
case 167:
|
| 405 |
+
return _mm512_mask_blend_ps(0xCC3F, a.values, b.values);
|
| 406 |
+
case 168:
|
| 407 |
+
return _mm512_mask_blend_ps(0xCCC0, a.values, b.values);
|
| 408 |
+
case 169:
|
| 409 |
+
return _mm512_mask_blend_ps(0xCCC3, a.values, b.values);
|
| 410 |
+
case 170:
|
| 411 |
+
return _mm512_mask_blend_ps(0xCCCC, a.values, b.values);
|
| 412 |
+
case 171:
|
| 413 |
+
return _mm512_mask_blend_ps(0xCCCF, a.values, b.values);
|
| 414 |
+
case 172:
|
| 415 |
+
return _mm512_mask_blend_ps(0xCCF0, a.values, b.values);
|
| 416 |
+
case 173:
|
| 417 |
+
return _mm512_mask_blend_ps(0xCCF3, a.values, b.values);
|
| 418 |
+
case 174:
|
| 419 |
+
return _mm512_mask_blend_ps(0xCCFC, a.values, b.values);
|
| 420 |
+
case 175:
|
| 421 |
+
return _mm512_mask_blend_ps(0xCCFF, a.values, b.values);
|
| 422 |
+
case 176:
|
| 423 |
+
return _mm512_mask_blend_ps(0xCF00, a.values, b.values);
|
| 424 |
+
case 177:
|
| 425 |
+
return _mm512_mask_blend_ps(0xCF03, a.values, b.values);
|
| 426 |
+
case 178:
|
| 427 |
+
return _mm512_mask_blend_ps(0xCF0C, a.values, b.values);
|
| 428 |
+
case 179:
|
| 429 |
+
return _mm512_mask_blend_ps(0xCF0F, a.values, b.values);
|
| 430 |
+
case 180:
|
| 431 |
+
return _mm512_mask_blend_ps(0xCF30, a.values, b.values);
|
| 432 |
+
case 181:
|
| 433 |
+
return _mm512_mask_blend_ps(0xCF33, a.values, b.values);
|
| 434 |
+
case 182:
|
| 435 |
+
return _mm512_mask_blend_ps(0xCF3C, a.values, b.values);
|
| 436 |
+
case 183:
|
| 437 |
+
return _mm512_mask_blend_ps(0xCF3F, a.values, b.values);
|
| 438 |
+
case 184:
|
| 439 |
+
return _mm512_mask_blend_ps(0xCFC0, a.values, b.values);
|
| 440 |
+
case 185:
|
| 441 |
+
return _mm512_mask_blend_ps(0xCFC3, a.values, b.values);
|
| 442 |
+
case 186:
|
| 443 |
+
return _mm512_mask_blend_ps(0xCFCC, a.values, b.values);
|
| 444 |
+
case 187:
|
| 445 |
+
return _mm512_mask_blend_ps(0xCFCF, a.values, b.values);
|
| 446 |
+
case 188:
|
| 447 |
+
return _mm512_mask_blend_ps(0xCFF0, a.values, b.values);
|
| 448 |
+
case 189:
|
| 449 |
+
return _mm512_mask_blend_ps(0xCFF3, a.values, b.values);
|
| 450 |
+
case 190:
|
| 451 |
+
return _mm512_mask_blend_ps(0xCFFC, a.values, b.values);
|
| 452 |
+
case 191:
|
| 453 |
+
return _mm512_mask_blend_ps(0xCFFF, a.values, b.values);
|
| 454 |
+
case 192:
|
| 455 |
+
return _mm512_mask_blend_ps(0xF000, a.values, b.values);
|
| 456 |
+
case 193:
|
| 457 |
+
return _mm512_mask_blend_ps(0xF003, a.values, b.values);
|
| 458 |
+
case 194:
|
| 459 |
+
return _mm512_mask_blend_ps(0xF00C, a.values, b.values);
|
| 460 |
+
case 195:
|
| 461 |
+
return _mm512_mask_blend_ps(0xF00F, a.values, b.values);
|
| 462 |
+
case 196:
|
| 463 |
+
return _mm512_mask_blend_ps(0xF030, a.values, b.values);
|
| 464 |
+
case 197:
|
| 465 |
+
return _mm512_mask_blend_ps(0xF033, a.values, b.values);
|
| 466 |
+
case 198:
|
| 467 |
+
return _mm512_mask_blend_ps(0xF03C, a.values, b.values);
|
| 468 |
+
case 199:
|
| 469 |
+
return _mm512_mask_blend_ps(0xF03F, a.values, b.values);
|
| 470 |
+
case 200:
|
| 471 |
+
return _mm512_mask_blend_ps(0XF0C0, a.values, b.values);
|
| 472 |
+
case 201:
|
| 473 |
+
return _mm512_mask_blend_ps(0xF0C3, a.values, b.values);
|
| 474 |
+
case 202:
|
| 475 |
+
return _mm512_mask_blend_ps(0xF0CC, a.values, b.values);
|
| 476 |
+
case 203:
|
| 477 |
+
return _mm512_mask_blend_ps(0xF0CF, a.values, b.values);
|
| 478 |
+
case 204:
|
| 479 |
+
return _mm512_mask_blend_ps(0xF0F0, a.values, b.values);
|
| 480 |
+
case 205:
|
| 481 |
+
return _mm512_mask_blend_ps(0xF0F3, a.values, b.values);
|
| 482 |
+
case 206:
|
| 483 |
+
return _mm512_mask_blend_ps(0xF0FC, a.values, b.values);
|
| 484 |
+
case 207:
|
| 485 |
+
return _mm512_mask_blend_ps(0xF0FF, a.values, b.values);
|
| 486 |
+
case 208:
|
| 487 |
+
return _mm512_mask_blend_ps(0XF300, a.values, b.values);
|
| 488 |
+
case 209:
|
| 489 |
+
return _mm512_mask_blend_ps(0xF303, a.values, b.values);
|
| 490 |
+
case 210:
|
| 491 |
+
return _mm512_mask_blend_ps(0xF30C, a.values, b.values);
|
| 492 |
+
case 211:
|
| 493 |
+
return _mm512_mask_blend_ps(0xF30F, a.values, b.values);
|
| 494 |
+
case 212:
|
| 495 |
+
return _mm512_mask_blend_ps(0xF330, a.values, b.values);
|
| 496 |
+
case 213:
|
| 497 |
+
return _mm512_mask_blend_ps(0xF333, a.values, b.values);
|
| 498 |
+
case 214:
|
| 499 |
+
return _mm512_mask_blend_ps(0XF33C, a.values, b.values);
|
| 500 |
+
case 215:
|
| 501 |
+
return _mm512_mask_blend_ps(0xF33F, a.values, b.values);
|
| 502 |
+
case 216:
|
| 503 |
+
return _mm512_mask_blend_ps(0xF3C0, a.values, b.values);
|
| 504 |
+
case 217:
|
| 505 |
+
return _mm512_mask_blend_ps(0xF3C3, a.values, b.values);
|
| 506 |
+
case 218:
|
| 507 |
+
return _mm512_mask_blend_ps(0xF3CC, a.values, b.values);
|
| 508 |
+
case 219:
|
| 509 |
+
return _mm512_mask_blend_ps(0xF3CF, a.values, b.values);
|
| 510 |
+
case 220:
|
| 511 |
+
return _mm512_mask_blend_ps(0xF3F0, a.values, b.values);
|
| 512 |
+
case 221:
|
| 513 |
+
return _mm512_mask_blend_ps(0xF3F3, a.values, b.values);
|
| 514 |
+
case 222:
|
| 515 |
+
return _mm512_mask_blend_ps(0xF3FC, a.values, b.values);
|
| 516 |
+
case 223:
|
| 517 |
+
return _mm512_mask_blend_ps(0XF3FF, a.values, b.values);
|
| 518 |
+
case 224:
|
| 519 |
+
return _mm512_mask_blend_ps(0xFC00, a.values, b.values);
|
| 520 |
+
case 225:
|
| 521 |
+
return _mm512_mask_blend_ps(0xFC03, a.values, b.values);
|
| 522 |
+
case 226:
|
| 523 |
+
return _mm512_mask_blend_ps(0xFC0C, a.values, b.values);
|
| 524 |
+
case 227:
|
| 525 |
+
return _mm512_mask_blend_ps(0xFC0F, a.values, b.values);
|
| 526 |
+
case 228:
|
| 527 |
+
return _mm512_mask_blend_ps(0xFC30, a.values, b.values);
|
| 528 |
+
case 229:
|
| 529 |
+
return _mm512_mask_blend_ps(0xFC33, a.values, b.values);
|
| 530 |
+
case 230:
|
| 531 |
+
return _mm512_mask_blend_ps(0xFC3C, a.values, b.values);
|
| 532 |
+
case 231:
|
| 533 |
+
return _mm512_mask_blend_ps(0xFC3F, a.values, b.values);
|
| 534 |
+
case 232:
|
| 535 |
+
return _mm512_mask_blend_ps(0xFCC0, a.values, b.values);
|
| 536 |
+
case 233:
|
| 537 |
+
return _mm512_mask_blend_ps(0xFCC3, a.values, b.values);
|
| 538 |
+
case 234:
|
| 539 |
+
return _mm512_mask_blend_ps(0xFCCC, a.values, b.values);
|
| 540 |
+
case 235:
|
| 541 |
+
return _mm512_mask_blend_ps(0xFCCF, a.values, b.values);
|
| 542 |
+
case 236:
|
| 543 |
+
return _mm512_mask_blend_ps(0xFCF0, a.values, b.values);
|
| 544 |
+
case 237:
|
| 545 |
+
return _mm512_mask_blend_ps(0xFCF3, a.values, b.values);
|
| 546 |
+
case 238:
|
| 547 |
+
return _mm512_mask_blend_ps(0xFCFC, a.values, b.values);
|
| 548 |
+
case 239:
|
| 549 |
+
return _mm512_mask_blend_ps(0xFCFF, a.values, b.values);
|
| 550 |
+
case 240:
|
| 551 |
+
return _mm512_mask_blend_ps(0xFF00, a.values, b.values);
|
| 552 |
+
case 241:
|
| 553 |
+
return _mm512_mask_blend_ps(0xFF03, a.values, b.values);
|
| 554 |
+
case 242:
|
| 555 |
+
return _mm512_mask_blend_ps(0xFF0C, a.values, b.values);
|
| 556 |
+
case 243:
|
| 557 |
+
return _mm512_mask_blend_ps(0xFF0F, a.values, b.values);
|
| 558 |
+
case 244:
|
| 559 |
+
return _mm512_mask_blend_ps(0xFF30, a.values, b.values);
|
| 560 |
+
case 245:
|
| 561 |
+
return _mm512_mask_blend_ps(0xFF33, a.values, b.values);
|
| 562 |
+
case 246:
|
| 563 |
+
return _mm512_mask_blend_ps(0xFF3C, a.values, b.values);
|
| 564 |
+
case 247:
|
| 565 |
+
return _mm512_mask_blend_ps(0xFF3F, a.values, b.values);
|
| 566 |
+
case 248:
|
| 567 |
+
return _mm512_mask_blend_ps(0xFFC0, a.values, b.values);
|
| 568 |
+
case 249:
|
| 569 |
+
return _mm512_mask_blend_ps(0xFFC3, a.values, b.values);
|
| 570 |
+
case 250:
|
| 571 |
+
return _mm512_mask_blend_ps(0xFFCC, a.values, b.values);
|
| 572 |
+
case 251:
|
| 573 |
+
return _mm512_mask_blend_ps(0xFFCF, a.values, b.values);
|
| 574 |
+
case 252:
|
| 575 |
+
return _mm512_mask_blend_ps(0xFFF0, a.values, b.values);
|
| 576 |
+
case 253:
|
| 577 |
+
return _mm512_mask_blend_ps(0xFFF3, a.values, b.values);
|
| 578 |
+
case 254:
|
| 579 |
+
return _mm512_mask_blend_ps(0xFFFC, a.values, b.values);
|
| 580 |
+
default: break;
|
| 581 |
+
}
|
| 582 |
+
return b;
|
| 583 |
+
}
|
| 584 |
+
static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a,
|
| 585 |
+
const Vectorized<c10::complex<float>>& b,
|
| 586 |
+
const Vectorized<c10::complex<float>>& mask) {
|
| 587 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 588 |
+
auto mask_ = _mm512_unpacklo_ps(mask.values, mask.values);
|
| 589 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 590 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask_), all_ones, _MM_CMPINT_EQ);
|
| 591 |
+
return _mm512_mask_blend_ps(mmask, a.values, b.values);
|
| 592 |
+
}
|
| 593 |
+
template<typename step_t>
|
| 594 |
+
static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0.,
|
| 595 |
+
step_t step = static_cast<step_t>(1)) {
|
| 596 |
+
return Vectorized<c10::complex<float>>(base,
|
| 597 |
+
base + step,
|
| 598 |
+
base + c10::complex<float>(2)*step,
|
| 599 |
+
base + c10::complex<float>(3)*step,
|
| 600 |
+
base + c10::complex<float>(4)*step,
|
| 601 |
+
base + c10::complex<float>(5)*step,
|
| 602 |
+
base + c10::complex<float>(6)*step,
|
| 603 |
+
base + c10::complex<float>(7)*step);
|
| 604 |
+
}
|
| 605 |
+
static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a,
|
| 606 |
+
const Vectorized<c10::complex<float>>& b,
|
| 607 |
+
int64_t count = size()) {
|
| 608 |
+
switch (count) {
|
| 609 |
+
case 0:
|
| 610 |
+
return a;
|
| 611 |
+
case 1:
|
| 612 |
+
return blend<1>(a, b);
|
| 613 |
+
case 2:
|
| 614 |
+
return blend<3>(a, b);
|
| 615 |
+
case 3:
|
| 616 |
+
return blend<7>(a, b);
|
| 617 |
+
case 4:
|
| 618 |
+
return blend<15>(a, b);
|
| 619 |
+
case 5:
|
| 620 |
+
return blend<31>(a, b);
|
| 621 |
+
case 6:
|
| 622 |
+
return blend<63>(a, b);
|
| 623 |
+
case 7:
|
| 624 |
+
return blend<127>(a, b);
|
| 625 |
+
}
|
| 626 |
+
return b;
|
| 627 |
+
}
|
| 628 |
+
static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
|
| 629 |
+
if (count == size())
|
| 630 |
+
return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
|
| 631 |
+
|
| 632 |
+
__at_align__ float tmp_values[2*size()];
|
| 633 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 634 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 635 |
+
// instructions while a loop would be compiled to one instruction.
|
| 636 |
+
for (const auto i : c10::irange(2*size())) {
|
| 637 |
+
tmp_values[i] = 0.0;
|
| 638 |
+
}
|
| 639 |
+
std::memcpy(
|
| 640 |
+
tmp_values,
|
| 641 |
+
reinterpret_cast<const float*>(ptr),
|
| 642 |
+
count * sizeof(c10::complex<float>));
|
| 643 |
+
return _mm512_load_ps(tmp_values);
|
| 644 |
+
}
|
| 645 |
+
void store(void* ptr, int count = size()) const {
|
| 646 |
+
if (count == size()) {
|
| 647 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
|
| 648 |
+
} else if (count > 0) {
|
| 649 |
+
float tmp_values[2*size()];
|
| 650 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
|
| 651 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
|
| 652 |
+
}
|
| 653 |
+
}
|
| 654 |
+
// AVX512 doesn't have horizontal add & horizontal sub instructions.
|
| 655 |
+
// TODO: hadd_pd() & hsub_pd() may have scope for improvement.
|
| 656 |
+
static inline __m512 hadd_ps(__m512 a, __m512 b) {
|
| 657 |
+
__m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
|
| 658 |
+
__m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
|
| 659 |
+
return _mm512_add_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 660 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 661 |
+
}
|
| 662 |
+
static inline __m512 hsub_ps(__m512 a, __m512 b) {
|
| 663 |
+
__m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
|
| 664 |
+
__m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
|
| 665 |
+
return _mm512_sub_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 666 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 667 |
+
}
|
| 668 |
+
const c10::complex<float>& operator[](int idx) const = delete;
|
| 669 |
+
c10::complex<float>& operator[](int idx) = delete;
|
| 670 |
+
Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
|
| 671 |
+
__at_align__ c10::complex<float> tmp[size()];
|
| 672 |
+
store(tmp);
|
| 673 |
+
for (const auto i : c10::irange(size())) {
|
| 674 |
+
tmp[i] = f(tmp[i]);
|
| 675 |
+
}
|
| 676 |
+
return loadu(tmp);
|
| 677 |
+
}
|
| 678 |
+
__m512 abs_2_() const {
|
| 679 |
+
auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
|
| 680 |
+
auto ret = hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
|
| 681 |
+
return ret;
|
| 682 |
+
}
|
| 683 |
+
__m512 abs_() const {
|
| 684 |
+
auto real = _mm512_moveldup_ps(values); // real real
|
| 685 |
+
auto imag = _mm512_movehdup_ps(values); // imag imag
|
| 686 |
+
return Sleef_hypotf16_u05(real, imag); // abs abs
|
| 687 |
+
}
|
| 688 |
+
Vectorized<c10::complex<float>> abs() const {
|
| 689 |
+
const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 690 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 691 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 692 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 693 |
+
return _mm512_and_ps(abs_(), real_mask); // abs 0
|
| 694 |
+
}
|
| 695 |
+
__m512 angle_() const {
|
| 696 |
+
//angle = atan2(b/a)
|
| 697 |
+
auto b_a = _mm512_permute_ps(values, 0xB1); // b a
|
| 698 |
+
return Sleef_atan2f16_u10(values, b_a); // 90-angle angle
|
| 699 |
+
}
|
| 700 |
+
Vectorized<c10::complex<float>> angle() const {
|
| 701 |
+
const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 702 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 703 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 704 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 705 |
+
auto angle = _mm512_permute_ps(angle_(), 0xB1); // angle 90-angle
|
| 706 |
+
return _mm512_and_ps(angle, real_mask); // angle 0
|
| 707 |
+
}
|
| 708 |
+
Vectorized<c10::complex<float>> sgn() const {
|
| 709 |
+
auto abs = abs_();
|
| 710 |
+
auto zero = _mm512_setzero_ps();
|
| 711 |
+
auto mask = _mm512_cmp_ps_mask(abs, zero, _CMP_EQ_OQ);
|
| 712 |
+
auto div = _mm512_div_ps(values, abs);
|
| 713 |
+
return _mm512_mask_blend_ps(mask, div, zero);
|
| 714 |
+
}
|
| 715 |
+
__m512 real_() const {
|
| 716 |
+
const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 717 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 718 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 719 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 720 |
+
return _mm512_and_ps(values, real_mask);
|
| 721 |
+
}
|
| 722 |
+
Vectorized<c10::complex<float>> real() const {
|
| 723 |
+
return real_();
|
| 724 |
+
}
|
| 725 |
+
__m512 imag_() const {
|
| 726 |
+
const __m512 imag_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
|
| 727 |
+
0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
|
| 728 |
+
0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
|
| 729 |
+
0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
|
| 730 |
+
return _mm512_and_ps(values, imag_mask);
|
| 731 |
+
}
|
| 732 |
+
Vectorized<c10::complex<float>> imag() const {
|
| 733 |
+
return _mm512_permute_ps(imag_(), 0xB1); //b a
|
| 734 |
+
}
|
| 735 |
+
__m512 conj_() const {
|
| 736 |
+
const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
|
| 737 |
+
0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 738 |
+
return _mm512_xor_ps(values, sign_mask); // a -b
|
| 739 |
+
}
|
| 740 |
+
Vectorized<c10::complex<float>> conj() const {
|
| 741 |
+
return conj_();
|
| 742 |
+
}
|
| 743 |
+
Vectorized<c10::complex<float>> log() const {
|
| 744 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
| 745 |
+
return map(std::log);
|
| 746 |
+
}
|
| 747 |
+
Vectorized<c10::complex<float>> log2() const {
|
| 748 |
+
const __m512 log2_ = _mm512_set1_ps(std::log(2));
|
| 749 |
+
return _mm512_div_ps(log(), log2_);
|
| 750 |
+
}
|
| 751 |
+
Vectorized<c10::complex<float>> log10() const {
|
| 752 |
+
const __m512 log10_ = _mm512_set1_ps(std::log(10));
|
| 753 |
+
return _mm512_div_ps(log(), log10_);
|
| 754 |
+
}
|
| 755 |
+
Vectorized<c10::complex<float>> log1p() const {
|
| 756 |
+
return map(std::log1p);
|
| 757 |
+
}
|
| 758 |
+
Vectorized<c10::complex<float>> asin() const {
|
| 759 |
+
// asin(x)
|
| 760 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 761 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 762 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 763 |
+
const __m512 one = _mm512_set1_ps(1);
|
| 764 |
+
|
| 765 |
+
auto conj = conj_();
|
| 766 |
+
auto b_a = _mm512_permute_ps(conj, 0xB1); //-b a
|
| 767 |
+
auto ab = _mm512_mul_ps(conj, b_a); //-ab -ab
|
| 768 |
+
auto im = _mm512_add_ps(ab, ab); //-2ab -2ab
|
| 769 |
+
|
| 770 |
+
auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
|
| 771 |
+
auto re = hsub_ps(val_2, _mm512_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
|
| 772 |
+
re = _mm512_sub_ps(one, re);
|
| 773 |
+
|
| 774 |
+
auto root = Vectorized(_mm512_mask_blend_ps(0xAAAA, re, im)).sqrt(); //sqrt(re + i*im)
|
| 775 |
+
auto ln = Vectorized(_mm512_add_ps(b_a, root)).log(); //ln(iz + sqrt())
|
| 776 |
+
return Vectorized(_mm512_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
|
| 777 |
+
}
|
| 778 |
+
Vectorized<c10::complex<float>> acos() const {
|
| 779 |
+
return map(std::acos);
|
| 780 |
+
}
|
| 781 |
+
Vectorized<c10::complex<float>> atan() const;
|
| 782 |
+
Vectorized<c10::complex<float>> atanh() const {
|
| 783 |
+
return map(std::atanh);
|
| 784 |
+
}
|
| 785 |
+
Vectorized<c10::complex<float>> exp() const {
|
| 786 |
+
//exp(a + bi)
|
| 787 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
| 788 |
+
auto exp = Sleef_expf16_u10(values); //exp(a) exp(b)
|
| 789 |
+
exp = _mm512_mask_blend_ps(0xAAAA, exp, _mm512_permute_ps(exp, 0xB1)); //exp(a) exp(a)
|
| 790 |
+
|
| 791 |
+
auto sin_cos = Sleef_sincosf16_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
| 792 |
+
auto cos_sin = _mm512_mask_blend_ps(0xAAAA, _mm512_permute_ps(sin_cos.y, 0xB1),
|
| 793 |
+
sin_cos.x); //cos(b) sin(b)
|
| 794 |
+
return _mm512_mul_ps(exp, cos_sin);
|
| 795 |
+
}
|
| 796 |
+
Vectorized<c10::complex<float>> exp2() const {
|
| 797 |
+
// Use identity 2**x = exp(log(2) * x)
|
| 798 |
+
const __m512 ln_2 = _mm512_set1_ps(c10::ln_2<float>);
|
| 799 |
+
Vectorized<c10::complex<float>> scaled_values = _mm512_mul_ps(values, ln_2);
|
| 800 |
+
return scaled_values.exp();
|
| 801 |
+
}
|
| 802 |
+
Vectorized<c10::complex<float>> expm1() const {
|
| 803 |
+
return map(std::expm1);
|
| 804 |
+
}
|
| 805 |
+
Vectorized<c10::complex<float>> sin() const {
|
| 806 |
+
return map(std::sin);
|
| 807 |
+
}
|
| 808 |
+
Vectorized<c10::complex<float>> sinh() const {
|
| 809 |
+
return map(std::sinh);
|
| 810 |
+
}
|
| 811 |
+
Vectorized<c10::complex<float>> cos() const {
|
| 812 |
+
return map(std::cos);
|
| 813 |
+
}
|
| 814 |
+
Vectorized<c10::complex<float>> cosh() const {
|
| 815 |
+
return map(std::cosh);
|
| 816 |
+
}
|
| 817 |
+
Vectorized<c10::complex<float>> ceil() const {
|
| 818 |
+
return _mm512_ceil_ps(values);
|
| 819 |
+
}
|
| 820 |
+
Vectorized<c10::complex<float>> floor() const {
|
| 821 |
+
return _mm512_floor_ps(values);
|
| 822 |
+
}
|
| 823 |
+
Vectorized<c10::complex<float>> neg() const {
|
| 824 |
+
auto zero = _mm512_setzero_ps();
|
| 825 |
+
return _mm512_sub_ps(zero, values);
|
| 826 |
+
}
|
| 827 |
+
Vectorized<c10::complex<float>> round() const {
|
| 828 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 829 |
+
}
|
| 830 |
+
Vectorized<c10::complex<float>> tan() const {
|
| 831 |
+
return map(std::tan);
|
| 832 |
+
}
|
| 833 |
+
Vectorized<c10::complex<float>> tanh() const {
|
| 834 |
+
return map(std::tanh);
|
| 835 |
+
}
|
| 836 |
+
Vectorized<c10::complex<float>> trunc() const {
|
| 837 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 838 |
+
}
|
| 839 |
+
Vectorized<c10::complex<float>> sqrt() const {
|
| 840 |
+
return map(std::sqrt);
|
| 841 |
+
}
|
| 842 |
+
Vectorized<c10::complex<float>> reciprocal() const;
|
| 843 |
+
Vectorized<c10::complex<float>> rsqrt() const {
|
| 844 |
+
return sqrt().reciprocal();
|
| 845 |
+
}
|
| 846 |
+
Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
|
| 847 |
+
__at_align__ c10::complex<float> x_tmp[size()];
|
| 848 |
+
__at_align__ c10::complex<float> y_tmp[size()];
|
| 849 |
+
store(x_tmp);
|
| 850 |
+
exp.store(y_tmp);
|
| 851 |
+
for (const auto i : c10::irange(size())) {
|
| 852 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 853 |
+
}
|
| 854 |
+
return loadu(x_tmp);
|
| 855 |
+
}
|
| 856 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 857 |
+
// `O`: get false if an operand is NaN
|
| 858 |
+
// `Q`: do not raise if an operand is NaN
|
| 859 |
+
Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
|
| 860 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
|
| 861 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
|
| 862 |
+
}
|
| 863 |
+
Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
|
| 864 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
|
| 865 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
|
| 866 |
+
}
|
| 867 |
+
Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const {
|
| 868 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 869 |
+
}
|
| 870 |
+
Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const {
|
| 871 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 872 |
+
}
|
| 873 |
+
Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const {
|
| 874 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 875 |
+
}
|
| 876 |
+
Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& other [[maybe_unused]]) const {
|
| 877 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
|
| 881 |
+
Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
|
| 882 |
+
};
|
| 883 |
+
|
| 884 |
+
template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a,
|
| 885 |
+
const Vectorized<c10::complex<float>> &b) {
|
| 886 |
+
return _mm512_add_ps(a, b);
|
| 887 |
+
}
|
| 888 |
+
|
| 889 |
+
template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a,
|
| 890 |
+
const Vectorized<c10::complex<float>> &b) {
|
| 891 |
+
return _mm512_sub_ps(a, b);
|
| 892 |
+
}
|
| 893 |
+
|
| 894 |
+
template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a,
|
| 895 |
+
const Vectorized<c10::complex<float>> &b) {
|
| 896 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 897 |
+
const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
|
| 898 |
+
0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 899 |
+
auto ac_bd = _mm512_mul_ps(a, b); //ac bd
|
| 900 |
+
|
| 901 |
+
auto d_c = _mm512_permute_ps(b, 0xB1); //d c
|
| 902 |
+
d_c = _mm512_xor_ps(sign_mask, d_c); //d -c
|
| 903 |
+
auto ad_bc = _mm512_mul_ps(a, d_c); //ad -bc
|
| 904 |
+
|
| 905 |
+
auto ret = Vectorized<c10::complex<float>>::hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
|
| 906 |
+
return ret;
|
| 907 |
+
}
|
| 908 |
+
|
| 909 |
+
template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a,
|
| 910 |
+
const Vectorized<c10::complex<float>> &b) {
|
| 911 |
+
//re + im*i = (a + bi) / (c + di)
|
| 912 |
+
auto mask = _mm512_set1_ps(-0.f);
|
| 913 |
+
auto fabs_cd = _mm512_andnot_ps(mask, b); // |c| |d|
|
| 914 |
+
auto fabs_dc = _mm512_permute_ps(fabs_cd, 0xB1); // |d| |c|
|
| 915 |
+
auto scale = _mm512_rcp14_ps(_mm512_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
| 916 |
+
auto a2 = _mm512_mul_ps(a, scale); // a/sc b/sc
|
| 917 |
+
auto b2 = _mm512_mul_ps(b, scale); // c/sc d/sc
|
| 918 |
+
auto acbd2 = _mm512_mul_ps(a2, b2);
|
| 919 |
+
|
| 920 |
+
const __m512 sign_mask = _mm512_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0,
|
| 921 |
+
-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
|
| 922 |
+
auto dc2 = _mm512_permute_ps(b2, 0xB1); // d/sc c/sc
|
| 923 |
+
dc2 = _mm512_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
|
| 924 |
+
auto adbc2 = _mm512_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
|
| 925 |
+
auto res2 = Vectorized<c10::complex<float>>::hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
| 926 |
+
|
| 927 |
+
// get the denominator
|
| 928 |
+
auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 929 |
+
res2 = _mm512_div_ps(res2, denom2);
|
| 930 |
+
return res2;
|
| 931 |
+
}
|
| 932 |
+
|
| 933 |
+
// reciprocal. Implement this here so we can use multiplication.
|
| 934 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
|
| 935 |
+
//re + im*i = (a + bi) / (c + di)
|
| 936 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
| 937 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
| 938 |
+
const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
|
| 939 |
+
0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 940 |
+
auto c_d = _mm512_xor_ps(sign_mask, values); //c -d
|
| 941 |
+
return _mm512_div_ps(c_d, abs_2_());
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
|
| 945 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 946 |
+
const __m512 i = _mm512_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
|
| 947 |
+
0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
|
| 948 |
+
const Vectorized i_half = _mm512_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5,
|
| 949 |
+
0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
|
| 950 |
+
|
| 951 |
+
auto sum = Vectorized(_mm512_add_ps(i, values)); // a 1+b
|
| 952 |
+
auto sub = Vectorized(_mm512_sub_ps(i, values)); // -a 1-b
|
| 953 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
| 954 |
+
return i_half*ln; // i/2*ln()
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
template <>
|
| 958 |
+
Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a,
|
| 959 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 960 |
+
auto zero_vector = _mm512_set1_epi32(0);
|
| 961 |
+
auto abs_a = a.abs_2_();
|
| 962 |
+
auto abs_b = b.abs_2_();
|
| 963 |
+
auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_LT_OQ);
|
| 964 |
+
auto max = _mm512_mask_blend_ps(mask, a, b);
|
| 965 |
+
// Exploit the fact that all-ones is a NaN.
|
| 966 |
+
auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 967 |
+
auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
|
| 968 |
+
return _mm512_or_ps(max, _mm512_castsi512_ps(isnan));
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
template <>
|
| 972 |
+
Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a,
|
| 973 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 974 |
+
auto zero_vector = _mm512_set1_epi32(0);
|
| 975 |
+
auto abs_a = a.abs_2_();
|
| 976 |
+
auto abs_b = b.abs_2_();
|
| 977 |
+
auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_GT_OQ);
|
| 978 |
+
auto min = _mm512_mask_blend_ps(mask, a, b);
|
| 979 |
+
// Exploit the fact that all-ones is a NaN.
|
| 980 |
+
auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 981 |
+
auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
|
| 982 |
+
return _mm512_or_ps(min, _mm512_castsi512_ps(isnan));
|
| 983 |
+
}
|
| 984 |
+
|
| 985 |
+
template <>
|
| 986 |
+
Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a,
|
| 987 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 988 |
+
return _mm512_and_ps(a, b);
|
| 989 |
+
}
|
| 990 |
+
|
| 991 |
+
template <>
|
| 992 |
+
Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a,
|
| 993 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 994 |
+
return _mm512_or_ps(a, b);
|
| 995 |
+
}
|
| 996 |
+
|
| 997 |
+
template <>
|
| 998 |
+
Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a,
|
| 999 |
+
const Vectorized<c10::complex<float>>& b) {
|
| 1000 |
+
return _mm512_xor_ps(a, b);
|
| 1001 |
+
}
|
| 1002 |
+
|
| 1003 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
|
| 1004 |
+
const Vectorized<c10::complex<float>>& other) const {
|
| 1005 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 1006 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 1007 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
|
| 1008 |
+
}
|
| 1009 |
+
|
| 1010 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
|
| 1011 |
+
const Vectorized<c10::complex<float>>& other) const {
|
| 1012 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 1013 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 1014 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
|
| 1015 |
+
}
|
| 1016 |
+
|
| 1017 |
+
#endif
|
| 1018 |
+
|
| 1019 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_convert.h
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec512/vec512_bfloat16.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 6 |
+
#include <ATen/cpu/vec/vec_convert.h>
|
| 7 |
+
|
| 8 |
+
namespace at::vec {
|
| 9 |
+
inline namespace CPU_CAPABILITY {
|
| 10 |
+
|
| 11 |
+
#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
|
| 12 |
+
|
| 13 |
+
template <>
|
| 14 |
+
struct VecConvert<float, 1, BFloat16, 1> {
|
| 15 |
+
static inline VectorizedN<float, 1> apply(
|
| 16 |
+
const VectorizedN<BFloat16, 1>& src) {
|
| 17 |
+
VectorizedN<float, 1> result;
|
| 18 |
+
__m512 value;
|
| 19 |
+
cvtbf16_fp32(_mm512_castsi512_si256(src[0]), value);
|
| 20 |
+
result[0] = value;
|
| 21 |
+
return result;
|
| 22 |
+
}
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
template <>
|
| 26 |
+
struct VecConvert<float, 1, Half, 1> {
|
| 27 |
+
static inline VectorizedN<float, 1> apply(const VectorizedN<Half, 1>& src) {
|
| 28 |
+
VectorizedN<float, 1> result;
|
| 29 |
+
__m512 value;
|
| 30 |
+
cvtfp16_fp32(_mm512_castsi512_si256(src[0]), value);
|
| 31 |
+
result[0] = value;
|
| 32 |
+
return result;
|
| 33 |
+
}
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
template <>
|
| 37 |
+
struct VecConvert<BFloat16, 1, float, 1> {
|
| 38 |
+
static inline VectorizedN<BFloat16, 1> apply(
|
| 39 |
+
const VectorizedN<float, 1>& src) {
|
| 40 |
+
VectorizedN<BFloat16, 1> result;
|
| 41 |
+
result[0] = _mm512_castsi256_si512(cvtfp32_bf16(src[0]));
|
| 42 |
+
return result;
|
| 43 |
+
}
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
template <>
|
| 47 |
+
struct VecConvert<BFloat16, 1, float, 2> {
|
| 48 |
+
static inline VectorizedN<BFloat16, 1> apply(
|
| 49 |
+
const VectorizedN<float, 2>& src) {
|
| 50 |
+
VectorizedN<BFloat16, 1> result;
|
| 51 |
+
result[0] = convert_float_bfloat16(src[0], src[1]);
|
| 52 |
+
return result;
|
| 53 |
+
}
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
template <>
|
| 57 |
+
struct VecConvert<float, 2, BFloat16, 1> {
|
| 58 |
+
static inline VectorizedN<float, 2> apply(
|
| 59 |
+
const VectorizedN<BFloat16, 1>& src) {
|
| 60 |
+
VectorizedN<float, 2> result;
|
| 61 |
+
std::tie(result[0], result[1]) = convert_bfloat16_float(src[0]);
|
| 62 |
+
return result;
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
struct VecConvert<Half, 1, float, 1> {
|
| 68 |
+
static inline VectorizedN<Half, 1> apply(const VectorizedN<float, 1>& src) {
|
| 69 |
+
VectorizedN<Half, 1> result;
|
| 70 |
+
result[0] = _mm512_castsi256_si512(cvtfp32_fp16(src[0]));
|
| 71 |
+
return result;
|
| 72 |
+
}
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
template <>
|
| 76 |
+
struct VecConvert<Half, 1, float, 2> {
|
| 77 |
+
static inline VectorizedN<Half, 1> apply(const VectorizedN<float, 2>& src) {
|
| 78 |
+
VectorizedN<Half, 1> result;
|
| 79 |
+
result[0] = convert_float_half(src[0], src[1]);
|
| 80 |
+
return result;
|
| 81 |
+
}
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
template <>
|
| 85 |
+
struct VecConvert<float, 2, Half, 1> {
|
| 86 |
+
static inline VectorizedN<float, 2> apply(const VectorizedN<Half, 1>& src) {
|
| 87 |
+
VectorizedN<float, 2> result;
|
| 88 |
+
std::tie(result[0], result[1]) = convert_half_float(src[0]);
|
| 89 |
+
return result;
|
| 90 |
+
}
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
template <>
|
| 94 |
+
struct VecConvert<float, 1, int64_t, 2> {
|
| 95 |
+
static inline VectorizedN<float, 1> apply(
|
| 96 |
+
const VectorizedN<int64_t, 2>& src) {
|
| 97 |
+
auto low = _mm512_cvtepi64_ps(src[0]);
|
| 98 |
+
auto high = _mm512_cvtepi64_ps(src[1]);
|
| 99 |
+
return Vectorized<float>(
|
| 100 |
+
_mm512_insertf32x8(_mm512_castps256_ps512(low), high, 1));
|
| 101 |
+
}
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
template <>
|
| 105 |
+
struct VecConvert<int64_t, 2, float, 1> {
|
| 106 |
+
static inline VectorizedN<int64_t, 2> apply(
|
| 107 |
+
const VectorizedN<float, 1>& src) {
|
| 108 |
+
at::vec::VectorizedN<int64_t, 2> result;
|
| 109 |
+
result[0] = _mm512_cvt_roundps_epi64(
|
| 110 |
+
_mm512_castps512_ps256(src[0]), _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
|
| 111 |
+
result[1] = _mm512_cvt_roundps_epi64(
|
| 112 |
+
_mm512_extractf32x8_ps(src[0], 1),
|
| 113 |
+
_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
|
| 114 |
+
return result;
|
| 115 |
+
}
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
template <>
|
| 119 |
+
struct VecConvert<int32_t, 1, int64_t, 2> {
|
| 120 |
+
static inline VectorizedN<int32_t, 1> apply(
|
| 121 |
+
const VectorizedN<int64_t, 2>& src) {
|
| 122 |
+
auto low = _mm512_cvtepi64_epi32(src[0]);
|
| 123 |
+
auto high = _mm512_cvtepi64_epi32(src[1]);
|
| 124 |
+
return Vectorized<int32_t>(
|
| 125 |
+
_mm512_inserti32x8(_mm512_castsi256_si512(low), high, 1));
|
| 126 |
+
}
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
template <>
|
| 130 |
+
struct VecConvert<int64_t, 2, int32_t, 1> {
|
| 131 |
+
static inline VectorizedN<int64_t, 2> apply(
|
| 132 |
+
const VectorizedN<int32_t, 1>& src) {
|
| 133 |
+
at::vec::VectorizedN<int64_t, 2> result;
|
| 134 |
+
result[0] = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(src[0]));
|
| 135 |
+
result[1] = _mm512_cvtepi32_epi64(_mm512_extracti32x8_epi32(src[0], 1));
|
| 136 |
+
return result;
|
| 137 |
+
}
|
| 138 |
+
};
|
| 139 |
+
|
| 140 |
+
template <>
|
| 141 |
+
struct VecConvert<int32_t, 1, int8_t, 1> {
|
| 142 |
+
static inline VectorizedN<int32_t, 1> apply(
|
| 143 |
+
const VectorizedN<int8_t, 1>& src) {
|
| 144 |
+
auto src128 = _mm512_castsi512_si128(src[0]);
|
| 145 |
+
return Vectorized<int32_t>(_mm512_cvtepi8_epi32(src128));
|
| 146 |
+
}
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
template <>
|
| 150 |
+
struct VecConvert<int32_t, 1, uint8_t, 1> {
|
| 151 |
+
static inline VectorizedN<int32_t, 1> apply(
|
| 152 |
+
const VectorizedN<uint8_t, 1>& src) {
|
| 153 |
+
auto src128 = _mm512_castsi512_si128(src[0]);
|
| 154 |
+
return Vectorized<int32_t>(_mm512_cvtepu8_epi32(src128));
|
| 155 |
+
}
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
template <>
|
| 159 |
+
struct VecConvert<int32_t, 1, float, 1> {
|
| 160 |
+
static inline VectorizedN<int32_t, 1> apply(
|
| 161 |
+
const VectorizedN<float, 1>& src) {
|
| 162 |
+
return Vectorized<int32_t>(_mm512_cvttps_epi32(src[0]));
|
| 163 |
+
}
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
template <>
|
| 167 |
+
struct VecConvert<float, 1, int32_t, 1> {
|
| 168 |
+
static inline VectorizedN<float, 1> apply(
|
| 169 |
+
const VectorizedN<int32_t, 1>& src) {
|
| 170 |
+
return Vectorized<float>(_mm512_cvtepi32_ps(src[0]));
|
| 171 |
+
}
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
template <>
|
| 175 |
+
struct VecConvert<int16_t, 1, uint8_t, 1> {
|
| 176 |
+
static inline VectorizedN<int16_t, 1> apply(
|
| 177 |
+
const VectorizedN<uint8_t, 1>& src) {
|
| 178 |
+
auto src256 = _mm512_castsi512_si256(src[0]);
|
| 179 |
+
return Vectorized<int16_t>(_mm512_cvtepu8_epi16(src256));
|
| 180 |
+
}
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
template <>
|
| 184 |
+
struct VecConvert<int8_t, 1, int32_t, 1> {
|
| 185 |
+
static inline VectorizedN<int8_t, 1> apply(
|
| 186 |
+
const VectorizedN<int32_t, 1>& src) {
|
| 187 |
+
auto src128 = _mm512_cvtepi32_epi8(src[0]);
|
| 188 |
+
return Vectorized<int8_t>(_mm512_castsi128_si512(src128));
|
| 189 |
+
}
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
template <>
|
| 193 |
+
struct VecConvert<int8_t, 1, int16_t, 1> {
|
| 194 |
+
static inline VectorizedN<int8_t, 1> apply(
|
| 195 |
+
const VectorizedN<int16_t, 1>& src) {
|
| 196 |
+
auto src256 = _mm512_cvtepi16_epi8(src[0]);
|
| 197 |
+
return Vectorized<int8_t>(_mm512_castsi256_si512(src256));
|
| 198 |
+
}
|
| 199 |
+
};
|
| 200 |
+
|
| 201 |
+
template <typename dst_t, typename src_t>
|
| 202 |
+
struct VecConvert<
|
| 203 |
+
dst_t,
|
| 204 |
+
1,
|
| 205 |
+
src_t,
|
| 206 |
+
1,
|
| 207 |
+
typename std::enable_if_t<
|
| 208 |
+
(is_reduced_floating_point_v<dst_t> && is_8bit_integer_v<src_t>) ||
|
| 209 |
+
(is_reduced_floating_point_v<src_t> && is_8bit_integer_v<dst_t>),
|
| 210 |
+
void>> {
|
| 211 |
+
static inline VectorizedN<dst_t, 1> apply(const VectorizedN<src_t, 1>& src) {
|
| 212 |
+
VectorizedN<float, 1> tmp_fp32 = VecConvert<float, 1, src_t, 1>::apply(src);
|
| 213 |
+
return VecConvert<dst_t, 1, float, 1>::apply(tmp_fp32);
|
| 214 |
+
}
|
| 215 |
+
};
|
| 216 |
+
|
| 217 |
+
template <typename dst_t>
|
| 218 |
+
struct VecConvert<
|
| 219 |
+
dst_t,
|
| 220 |
+
1,
|
| 221 |
+
float,
|
| 222 |
+
1,
|
| 223 |
+
typename std::enable_if_t<is_8bit_integer_v<dst_t>,
|
| 224 |
+
void>> {
|
| 225 |
+
static inline VectorizedN<dst_t, 1> apply(const VectorizedN<float, 1>& src) {
|
| 226 |
+
return convert_float_to_int8<dst_t>(src[0]);
|
| 227 |
+
}
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
template <typename src_t>
|
| 231 |
+
struct VecConvert<
|
| 232 |
+
float,
|
| 233 |
+
1,
|
| 234 |
+
src_t,
|
| 235 |
+
1,
|
| 236 |
+
typename std::enable_if_t<is_8bit_integer_v<src_t>,
|
| 237 |
+
void>> {
|
| 238 |
+
static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
|
| 239 |
+
return convert_int8_to_float<src_t>(src[0]);
|
| 240 |
+
}
|
| 241 |
+
};
|
| 242 |
+
|
| 243 |
+
template <typename dst_t>
|
| 244 |
+
struct VecConvert<
|
| 245 |
+
dst_t,
|
| 246 |
+
1,
|
| 247 |
+
int64_t,
|
| 248 |
+
2,
|
| 249 |
+
typename std::enable_if<
|
| 250 |
+
std::is_same_v<dst_t, int8_t> ||
|
| 251 |
+
std::is_same_v<dst_t, uint8_t>>::type> {
|
| 252 |
+
static inline VectorizedN<dst_t, 1> apply(
|
| 253 |
+
const VectorizedN<int64_t, 2>& src) {
|
| 254 |
+
return VecConvert<dst_t, 1, int32_t, 1>::apply(
|
| 255 |
+
VecConvert<int32_t, 1, int64_t, 2>::apply(src));
|
| 256 |
+
}
|
| 257 |
+
};
|
| 258 |
+
|
| 259 |
+
#endif
|
| 260 |
+
|
| 261 |
+
} // namespace CPU_CAPABILITY
|
| 262 |
+
} // namespace at::vec
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h
ADDED
|
@@ -0,0 +1,708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 10 |
+
#define SLEEF_STATIC_LIBS
|
| 11 |
+
#include <sleef.h>
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
namespace at {
|
| 15 |
+
namespace vec {
|
| 16 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 17 |
+
inline namespace CPU_CAPABILITY {
|
| 18 |
+
|
| 19 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 20 |
+
|
| 21 |
+
template <> class Vectorized<float> {
|
| 22 |
+
private:
|
| 23 |
+
static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0};
|
| 24 |
+
public:
|
| 25 |
+
__m512 values;
|
| 26 |
+
using value_type = float;
|
| 27 |
+
using size_type = int;
|
| 28 |
+
static constexpr size_type size() {
|
| 29 |
+
return 16;
|
| 30 |
+
}
|
| 31 |
+
Vectorized() {}
|
| 32 |
+
Vectorized(__m512 v) : values(v) {}
|
| 33 |
+
Vectorized(float val) {
|
| 34 |
+
values = _mm512_set1_ps(val);
|
| 35 |
+
}
|
| 36 |
+
Vectorized(float val1, float val2, float val3, float val4,
|
| 37 |
+
float val5, float val6, float val7, float val8,
|
| 38 |
+
float val9, float val10, float val11, float val12,
|
| 39 |
+
float val13, float val14, float val15, float val16) {
|
| 40 |
+
values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8,
|
| 41 |
+
val9, val10, val11, val12, val13, val14, val15, val16);
|
| 42 |
+
}
|
| 43 |
+
operator __m512() const {
|
| 44 |
+
return values;
|
| 45 |
+
}
|
| 46 |
+
template <int64_t mask>
|
| 47 |
+
static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 48 |
+
return _mm512_mask_blend_ps(mask, a.values, b.values);
|
| 49 |
+
}
|
| 50 |
+
static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 51 |
+
const Vectorized<float>& mask) {
|
| 52 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 53 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ);
|
| 54 |
+
return _mm512_mask_blend_ps(mmask, a.values, b.values);
|
| 55 |
+
}
|
| 56 |
+
template<typename step_t>
|
| 57 |
+
static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 58 |
+
return Vectorized<float>(
|
| 59 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 60 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 61 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 62 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
| 63 |
+
}
|
| 64 |
+
static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 65 |
+
int64_t count = size()) {
|
| 66 |
+
switch (count) {
|
| 67 |
+
case 0:
|
| 68 |
+
return a;
|
| 69 |
+
case 1:
|
| 70 |
+
return blend<1>(a, b);
|
| 71 |
+
case 2:
|
| 72 |
+
return blend<3>(a, b);
|
| 73 |
+
case 3:
|
| 74 |
+
return blend<7>(a, b);
|
| 75 |
+
case 4:
|
| 76 |
+
return blend<15>(a, b);
|
| 77 |
+
case 5:
|
| 78 |
+
return blend<31>(a, b);
|
| 79 |
+
case 6:
|
| 80 |
+
return blend<63>(a, b);
|
| 81 |
+
case 7:
|
| 82 |
+
return blend<127>(a, b);
|
| 83 |
+
case 8:
|
| 84 |
+
return blend<255>(a, b);
|
| 85 |
+
case 9:
|
| 86 |
+
return blend<511>(a, b);
|
| 87 |
+
case 10:
|
| 88 |
+
return blend<1023>(a, b);
|
| 89 |
+
case 11:
|
| 90 |
+
return blend<2047>(a, b);
|
| 91 |
+
case 12:
|
| 92 |
+
return blend<4095>(a, b);
|
| 93 |
+
case 13:
|
| 94 |
+
return blend<8191>(a, b);
|
| 95 |
+
case 14:
|
| 96 |
+
return blend<16383>(a, b);
|
| 97 |
+
case 15:
|
| 98 |
+
return blend<32767>(a, b);
|
| 99 |
+
}
|
| 100 |
+
return b;
|
| 101 |
+
}
|
| 102 |
+
static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
|
| 103 |
+
if (count == size())
|
| 104 |
+
return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
|
| 105 |
+
|
| 106 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 107 |
+
return _mm512_maskz_loadu_ps(mask, ptr);
|
| 108 |
+
}
|
| 109 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 110 |
+
if (count == size()) {
|
| 111 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
|
| 112 |
+
} else if (count > 0) {
|
| 113 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 114 |
+
_mm512_mask_storeu_ps(reinterpret_cast<float*>(ptr), mask, values);
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
const float& operator[](int idx) const = delete;
|
| 118 |
+
float& operator[](int idx) = delete;
|
| 119 |
+
int zero_mask() const {
|
| 120 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 121 |
+
__mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ);
|
| 122 |
+
return static_cast<int32_t>(cmp);
|
| 123 |
+
}
|
| 124 |
+
Vectorized<float> isnan() const {
|
| 125 |
+
auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q);
|
| 126 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 127 |
+
0xFFFFFFFF));
|
| 128 |
+
}
|
| 129 |
+
bool has_inf_nan() const {
|
| 130 |
+
__m512 self_sub = _mm512_sub_ps(values, values);
|
| 131 |
+
return (_mm512_movepi8_mask(_mm512_castps_si512(self_sub)) & 0x7777777777777777) != 0;
|
| 132 |
+
}
|
| 133 |
+
Vectorized<float> map(float (*const f)(float)) const {
|
| 134 |
+
__at_align__ float tmp[size()];
|
| 135 |
+
store(tmp);
|
| 136 |
+
for (const auto i : c10::irange(size())) {
|
| 137 |
+
tmp[i] = f(tmp[i]);
|
| 138 |
+
}
|
| 139 |
+
return loadu(tmp);
|
| 140 |
+
}
|
| 141 |
+
Vectorized<float> abs() const {
|
| 142 |
+
auto mask = _mm512_set1_ps(-0.f);
|
| 143 |
+
return _mm512_andnot_ps(mask, values);
|
| 144 |
+
}
|
| 145 |
+
Vectorized<float> angle() const {
|
| 146 |
+
__m512 zero_vec = _mm512_set1_ps(0.f);
|
| 147 |
+
const auto nan_vec = _mm512_set1_ps(NAN);
|
| 148 |
+
const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
|
| 149 |
+
const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
|
| 150 |
+
not_nan_mask, 0xFFFFFFFF);
|
| 151 |
+
const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec),
|
| 152 |
+
zero_vec, _CMP_EQ_OQ);
|
| 153 |
+
const auto pi = _mm512_set1_ps(c10::pi<double>);
|
| 154 |
+
|
| 155 |
+
const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
|
| 156 |
+
auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
|
| 157 |
+
angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
|
| 158 |
+
return angle;
|
| 159 |
+
}
|
| 160 |
+
Vectorized<float> real() const {
|
| 161 |
+
return *this;
|
| 162 |
+
}
|
| 163 |
+
Vectorized<float> imag() const {
|
| 164 |
+
return _mm512_set1_ps(0);
|
| 165 |
+
}
|
| 166 |
+
Vectorized<float> conj() const {
|
| 167 |
+
return *this;
|
| 168 |
+
}
|
| 169 |
+
Vectorized<float> acos() const {
|
| 170 |
+
return Vectorized<float>(Sleef_acosf16_u10(values));
|
| 171 |
+
}
|
| 172 |
+
Vectorized<float> acosh() const {
|
| 173 |
+
return Vectorized<float>(Sleef_acoshf16_u10(values));
|
| 174 |
+
}
|
| 175 |
+
Vectorized<float> asin() const {
|
| 176 |
+
return Vectorized<float>(Sleef_asinf16_u10(values));
|
| 177 |
+
}
|
| 178 |
+
Vectorized<float> atan() const {
|
| 179 |
+
return Vectorized<float>(Sleef_atanf16_u10(values));
|
| 180 |
+
}
|
| 181 |
+
Vectorized<float> atanh() const {
|
| 182 |
+
return Vectorized<float>(Sleef_atanhf16_u10(values));
|
| 183 |
+
}
|
| 184 |
+
Vectorized<float> atan2(const Vectorized<float> &b) const {
|
| 185 |
+
return Vectorized<float>(Sleef_atan2f16_u10(values, b));
|
| 186 |
+
}
|
| 187 |
+
Vectorized<float> copysign(const Vectorized<float> &sign) const {
|
| 188 |
+
return Vectorized<float>(Sleef_copysignf16(values, sign));
|
| 189 |
+
}
|
| 190 |
+
Vectorized<float> erf() const {
|
| 191 |
+
// constants
|
| 192 |
+
const auto neg_zero_vec = _mm512_set1_ps(-0.f);
|
| 193 |
+
const auto one_vec = _mm512_set1_ps(1.0f);
|
| 194 |
+
const auto p = _mm512_set1_ps(0.3275911f);
|
| 195 |
+
const auto p1 = _mm512_set1_ps(0.254829592f);
|
| 196 |
+
const auto p2 = _mm512_set1_ps(-0.284496736f);
|
| 197 |
+
const auto p3 = _mm512_set1_ps(1.421413741f);
|
| 198 |
+
const auto p4 = _mm512_set1_ps(-1.453152027f);
|
| 199 |
+
const auto p5 = _mm512_set1_ps(1.061405429f);
|
| 200 |
+
// sign(x)
|
| 201 |
+
auto sign_mask = _mm512_and_ps(neg_zero_vec, values);
|
| 202 |
+
auto abs_vec = _mm512_abs_ps(values);
|
| 203 |
+
// t = 1 / (p * abs(x) + 1)
|
| 204 |
+
auto tmp0 = _mm512_fmadd_ps(p, abs_vec, one_vec);
|
| 205 |
+
auto t = _mm512_div_ps(one_vec, tmp0);
|
| 206 |
+
// r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
|
| 207 |
+
auto tmp1 = _mm512_fmadd_ps(p5, t, p4);
|
| 208 |
+
auto tmp2 = _mm512_fmadd_ps(tmp1, t, p3);
|
| 209 |
+
auto tmp3 = _mm512_fmadd_ps(tmp2, t, p2);
|
| 210 |
+
auto r = _mm512_fmadd_ps(tmp3, t, p1);
|
| 211 |
+
// - exp(- x * x)
|
| 212 |
+
auto pow_2 = _mm512_mul_ps(values, values);
|
| 213 |
+
auto neg_pow_2 = _mm512_xor_ps(neg_zero_vec, pow_2);
|
| 214 |
+
// auto tmp4 = exp(neg_pow_2);
|
| 215 |
+
auto tmp4 = Vectorized<float>(Sleef_expf16_u10(neg_pow_2));
|
| 216 |
+
auto tmp5 = _mm512_xor_ps(neg_zero_vec, tmp4);
|
| 217 |
+
// erf(x) = sign(x) * (1 - r * t * exp(- x * x))
|
| 218 |
+
auto tmp6 = _mm512_mul_ps(tmp5, t);
|
| 219 |
+
auto tmp7 = _mm512_fmadd_ps(tmp6, r, one_vec);
|
| 220 |
+
return _mm512_xor_ps(sign_mask, tmp7);
|
| 221 |
+
}
|
| 222 |
+
Vectorized<float> erfc() const {
|
| 223 |
+
return Vectorized<float>(Sleef_erfcf16_u15(values));
|
| 224 |
+
}
|
| 225 |
+
Vectorized<float> erfinv() const {
|
| 226 |
+
return map(calc_erfinv);
|
| 227 |
+
}
|
| 228 |
+
Vectorized<float> exp() const {
|
| 229 |
+
return Vectorized<float>(Sleef_expf16_u10(values));
|
| 230 |
+
}
|
| 231 |
+
Vectorized<float> exp2() const {
|
| 232 |
+
return Vectorized<float>(Sleef_exp2f16_u10(values));
|
| 233 |
+
}
|
| 234 |
+
Vectorized<float> expm1() const {
|
| 235 |
+
return Vectorized<float>(Sleef_expm1f16_u10(values));
|
| 236 |
+
}
|
| 237 |
+
Vectorized<float> exp_u20() const {
|
| 238 |
+
// A faster version of exp with ULP=20
|
| 239 |
+
static __m512 vec_factorial_1 =
|
| 240 |
+
_mm512_set1_ps(0.999999701f); // 1/factorial(1)
|
| 241 |
+
static __m512 vec_factorial_2 =
|
| 242 |
+
_mm512_set1_ps(0.499991506f); // 1/factorial(2)
|
| 243 |
+
static __m512 vec_factorial_3 =
|
| 244 |
+
_mm512_set1_ps(0.166676521f); // 1/factorial(3)
|
| 245 |
+
static __m512 vec_factorial_4 =
|
| 246 |
+
_mm512_set1_ps(0.0418978221f); // 1/factorial(4)
|
| 247 |
+
static __m512 vec_factorial_5 =
|
| 248 |
+
_mm512_set1_ps(0.00828929059f); // 1/factorial(5)
|
| 249 |
+
static __m512 vec_exp_log2ef =
|
| 250 |
+
_mm512_castsi512_ps(_mm512_set1_epi32(0x3fb8aa3b)); // log2(e)
|
| 251 |
+
static __m512 vec_half = _mm512_set1_ps(0.5f);
|
| 252 |
+
static __m512 vec_one = _mm512_set1_ps(1.f);
|
| 253 |
+
static __m512 vec_zero = _mm512_set1_ps(0.f);
|
| 254 |
+
static __m512 vec_two = _mm512_set1_ps(2.f);
|
| 255 |
+
static __m512 vec_ln2f = _mm512_castsi512_ps(_mm512_set1_epi32(0x3f317218)); // ln(2)
|
| 256 |
+
static __m512 vec_ln_flt_min = _mm512_castsi512_ps(_mm512_set1_epi32(0xc2aeac50));
|
| 257 |
+
static __m512 vec_ln_flt_max = _mm512_castsi512_ps(_mm512_set1_epi32(0x42b17218));
|
| 258 |
+
static __m512i vec_127 = _mm512_set1_epi32(0x0000007f);
|
| 259 |
+
static int n_mantissa_bits = 23;
|
| 260 |
+
|
| 261 |
+
// exp(x) =
|
| 262 |
+
// = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
|
| 263 |
+
// = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
|
| 264 |
+
|
| 265 |
+
auto less_ln_flt_min_mask =
|
| 266 |
+
_mm512_cmp_ps_mask(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
|
| 267 |
+
auto vec_src = _mm512_min_ps(values, vec_ln_flt_max);
|
| 268 |
+
vec_src = _mm512_max_ps(vec_src, vec_ln_flt_min);
|
| 269 |
+
|
| 270 |
+
// fx = floorf(x * log2ef + 0.5)
|
| 271 |
+
auto vec_fx = _mm512_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
|
| 272 |
+
auto vec_fx_i = _mm512_cvt_roundps_epi32(
|
| 273 |
+
vec_fx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
|
| 274 |
+
vec_fx = _mm512_cvtepi32_ps(vec_fx_i);
|
| 275 |
+
|
| 276 |
+
// x = x - fx * ln2
|
| 277 |
+
auto vec_exp_poly = _mm512_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
|
| 278 |
+
|
| 279 |
+
// compute polynomial
|
| 280 |
+
auto vec_res =
|
| 281 |
+
_mm512_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
|
| 282 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
|
| 283 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
|
| 284 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
|
| 285 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_one);
|
| 286 |
+
|
| 287 |
+
// compute 2^(n-1)
|
| 288 |
+
auto vec_exp_number = _mm512_sub_ps(vec_fx, vec_one);
|
| 289 |
+
auto vec_exp_number_i = _mm512_cvtps_epi32(vec_exp_number);
|
| 290 |
+
auto vec_two_pow_n_i = _mm512_add_epi32(vec_exp_number_i, vec_127);
|
| 291 |
+
vec_two_pow_n_i = _mm512_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
|
| 292 |
+
auto vec_two_pow_n = _mm512_castsi512_ps(vec_two_pow_n_i);
|
| 293 |
+
vec_two_pow_n =
|
| 294 |
+
_mm512_mask_blend_ps(less_ln_flt_min_mask, vec_two_pow_n, vec_zero);
|
| 295 |
+
|
| 296 |
+
// y = y * 2^n
|
| 297 |
+
vec_res = _mm512_mul_ps(vec_res, vec_two_pow_n);
|
| 298 |
+
vec_res = _mm512_mul_ps(vec_res, vec_two);
|
| 299 |
+
return vec_res;
|
| 300 |
+
}
|
| 301 |
+
Vectorized<float> fmod(const Vectorized<float>& q) const {
|
| 302 |
+
return Vectorized<float>(Sleef_fmodf16(values, q));
|
| 303 |
+
}
|
| 304 |
+
Vectorized<float> log() const {
|
| 305 |
+
return Vectorized<float>(Sleef_logf16_u10(values));
|
| 306 |
+
}
|
| 307 |
+
Vectorized<float> log2() const {
|
| 308 |
+
return Vectorized<float>(Sleef_log2f16_u10(values));
|
| 309 |
+
}
|
| 310 |
+
Vectorized<float> log10() const {
|
| 311 |
+
return Vectorized<float>(Sleef_log10f16_u10(values));
|
| 312 |
+
}
|
| 313 |
+
Vectorized<float> log1p() const {
|
| 314 |
+
return Vectorized<float>(Sleef_log1pf16_u10(values));
|
| 315 |
+
}
|
| 316 |
+
Vectorized<float> frac() const;
|
| 317 |
+
Vectorized<float> sin() const {
|
| 318 |
+
return Vectorized<float>(Sleef_sinf16_u35(values));
|
| 319 |
+
}
|
| 320 |
+
Vectorized<float> sinh() const {
|
| 321 |
+
return Vectorized<float>(Sleef_sinhf16_u10(values));
|
| 322 |
+
}
|
| 323 |
+
Vectorized<float> cos() const {
|
| 324 |
+
return Vectorized<float>(Sleef_cosf16_u35(values));
|
| 325 |
+
}
|
| 326 |
+
Vectorized<float> cosh() const {
|
| 327 |
+
return Vectorized<float>(Sleef_coshf16_u10(values));
|
| 328 |
+
}
|
| 329 |
+
Vectorized<float> ceil() const {
|
| 330 |
+
return _mm512_ceil_ps(values);
|
| 331 |
+
}
|
| 332 |
+
Vectorized<float> floor() const {
|
| 333 |
+
return _mm512_floor_ps(values);
|
| 334 |
+
}
|
| 335 |
+
Vectorized<float> hypot(const Vectorized<float> &b) const {
|
| 336 |
+
return Vectorized<float>(Sleef_hypotf16_u05(values, b));
|
| 337 |
+
}
|
| 338 |
+
Vectorized<float> i0() const {
|
| 339 |
+
return map(calc_i0);
|
| 340 |
+
}
|
| 341 |
+
Vectorized<float> i0e() const {
|
| 342 |
+
return map(calc_i0e);
|
| 343 |
+
}
|
| 344 |
+
Vectorized<float> digamma() const {
|
| 345 |
+
return map(calc_digamma);
|
| 346 |
+
}
|
| 347 |
+
Vectorized<float> igamma(const Vectorized<float> &x) const {
|
| 348 |
+
__at_align__ float tmp[size()];
|
| 349 |
+
__at_align__ float tmp_x[size()];
|
| 350 |
+
store(tmp);
|
| 351 |
+
x.store(tmp_x);
|
| 352 |
+
for (const auto i : c10::irange(size())) {
|
| 353 |
+
tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
|
| 354 |
+
}
|
| 355 |
+
return loadu(tmp);
|
| 356 |
+
}
|
| 357 |
+
Vectorized<float> igammac(const Vectorized<float> &x) const {
|
| 358 |
+
__at_align__ float tmp[size()];
|
| 359 |
+
__at_align__ float tmp_x[size()];
|
| 360 |
+
store(tmp);
|
| 361 |
+
x.store(tmp_x);
|
| 362 |
+
for (const auto i : c10::irange(size())) {
|
| 363 |
+
tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
|
| 364 |
+
}
|
| 365 |
+
return loadu(tmp);
|
| 366 |
+
}
|
| 367 |
+
Vectorized<float> neg() const {
|
| 368 |
+
return _mm512_xor_ps(_mm512_set1_ps(-0.f), values);
|
| 369 |
+
}
|
| 370 |
+
Vectorized<float> nextafter(const Vectorized<float> &b) const {
|
| 371 |
+
return Vectorized<float>(Sleef_nextafterf16(values, b));
|
| 372 |
+
}
|
| 373 |
+
Vectorized<float> round() const {
|
| 374 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 375 |
+
}
|
| 376 |
+
Vectorized<float> tan() const {
|
| 377 |
+
return Vectorized<float>(Sleef_tanf16_u10(values));
|
| 378 |
+
}
|
| 379 |
+
Vectorized<float> tanh() const {
|
| 380 |
+
return Vectorized<float>(Sleef_tanhf16_u10(values));
|
| 381 |
+
}
|
| 382 |
+
Vectorized<float> trunc() const {
|
| 383 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 384 |
+
}
|
| 385 |
+
Vectorized<float> lgamma() const {
|
| 386 |
+
return Vectorized<float>(Sleef_lgammaf16_u10(values));
|
| 387 |
+
}
|
| 388 |
+
Vectorized<float> sqrt() const {
|
| 389 |
+
return _mm512_sqrt_ps(values);
|
| 390 |
+
}
|
| 391 |
+
Vectorized<float> reciprocal() const {
|
| 392 |
+
return _mm512_div_ps(_mm512_set1_ps(1), values);
|
| 393 |
+
}
|
| 394 |
+
Vectorized<float> rsqrt() const {
|
| 395 |
+
return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values));
|
| 396 |
+
}
|
| 397 |
+
Vectorized<float> pow(const Vectorized<float> &b) const {
|
| 398 |
+
return Vectorized<float>(Sleef_powf16_u10(values, b));
|
| 399 |
+
}
|
| 400 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 401 |
+
// `O`: get false if an operand is NaN
|
| 402 |
+
// `Q`: do not raise if an operand is NaN
|
| 403 |
+
Vectorized<float> operator==(const Vectorized<float>& other) const {
|
| 404 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
|
| 405 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 406 |
+
0xFFFFFFFF));
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
Vectorized<float> operator!=(const Vectorized<float>& other) const {
|
| 410 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
|
| 411 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 412 |
+
0xFFFFFFFF));
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
Vectorized<float> operator<(const Vectorized<float>& other) const {
|
| 416 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ);
|
| 417 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 418 |
+
0xFFFFFFFF));
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
Vectorized<float> operator<=(const Vectorized<float>& other) const {
|
| 422 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ);
|
| 423 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 424 |
+
0xFFFFFFFF));
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
Vectorized<float> operator>(const Vectorized<float>& other) const {
|
| 428 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ);
|
| 429 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 430 |
+
0xFFFFFFFF));
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
Vectorized<float> operator>=(const Vectorized<float>& other) const {
|
| 434 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ);
|
| 435 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 436 |
+
0xFFFFFFFF));
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
Vectorized<float> eq(const Vectorized<float>& other) const;
|
| 440 |
+
Vectorized<float> ne(const Vectorized<float>& other) const;
|
| 441 |
+
Vectorized<float> gt(const Vectorized<float>& other) const;
|
| 442 |
+
Vectorized<float> ge(const Vectorized<float>& other) const;
|
| 443 |
+
Vectorized<float> lt(const Vectorized<float>& other) const;
|
| 444 |
+
Vectorized<float> le(const Vectorized<float>& other) const;
|
| 445 |
+
};
|
| 446 |
+
|
| 447 |
+
template <>
|
| 448 |
+
Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 449 |
+
return _mm512_add_ps(a, b);
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
template <>
|
| 453 |
+
Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 454 |
+
return _mm512_sub_ps(a, b);
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
template <>
|
| 458 |
+
Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 459 |
+
return _mm512_mul_ps(a, b);
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
template <>
|
| 463 |
+
Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 464 |
+
return _mm512_div_ps(a, b);
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
// frac. Implement this here so we can use subtraction
|
| 468 |
+
inline Vectorized<float> Vectorized<float>::frac() const {
|
| 469 |
+
return *this - this->trunc();
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 473 |
+
// either input is a NaN.
|
| 474 |
+
template <>
|
| 475 |
+
Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 476 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 477 |
+
auto max = _mm512_max_ps(a, b);
|
| 478 |
+
auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
|
| 479 |
+
auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
|
| 480 |
+
0xFFFFFFFF));
|
| 481 |
+
// Exploit the fact that all-ones is a NaN.
|
| 482 |
+
return _mm512_or_ps(max, isnan);
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 486 |
+
// either input is a NaN.
|
| 487 |
+
template <>
|
| 488 |
+
Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 489 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 490 |
+
auto min = _mm512_min_ps(a, b);
|
| 491 |
+
auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
|
| 492 |
+
auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
|
| 493 |
+
0xFFFFFFFF));
|
| 494 |
+
// Exploit the fact that all-ones is a NaN.
|
| 495 |
+
return _mm512_or_ps(min, isnan);
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
template <>
|
| 499 |
+
Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
|
| 500 |
+
return _mm512_min_ps(max, _mm512_max_ps(min, a));
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
template <>
|
| 504 |
+
Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
|
| 505 |
+
return _mm512_min_ps(max, a);
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
template <>
|
| 509 |
+
Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
|
| 510 |
+
return _mm512_max_ps(min, a);
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
template <>
|
| 514 |
+
Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 515 |
+
return _mm512_and_ps(a, b);
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
template <>
|
| 519 |
+
Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 520 |
+
return _mm512_or_ps(a, b);
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
template <>
|
| 524 |
+
Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 525 |
+
return _mm512_xor_ps(a, b);
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
|
| 529 |
+
return (*this == other) & Vectorized<float>(1.0f);
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
|
| 533 |
+
return (*this != other) & Vectorized<float>(1.0f);
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
|
| 537 |
+
return (*this > other) & Vectorized<float>(1.0f);
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
|
| 541 |
+
return (*this >= other) & Vectorized<float>(1.0f);
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
|
| 545 |
+
return (*this < other) & Vectorized<float>(1.0f);
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
|
| 549 |
+
return (*this <= other) & Vectorized<float>(1.0f);
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
template <>
|
| 553 |
+
inline void convert(const float* src, float* dst, int64_t n) {
|
| 554 |
+
int64_t i;
|
| 555 |
+
#ifndef __msvc_cl__
|
| 556 |
+
#pragma unroll
|
| 557 |
+
#endif
|
| 558 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 559 |
+
_mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i));
|
| 560 |
+
}
|
| 561 |
+
#ifndef __msvc_cl__
|
| 562 |
+
#pragma unroll
|
| 563 |
+
#endif
|
| 564 |
+
for (; i < n; i++) {
|
| 565 |
+
dst[i] = src[i];
|
| 566 |
+
}
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
template <>
|
| 570 |
+
Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 571 |
+
return _mm512_fmadd_ps(a, b, c);
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
template <>
|
| 575 |
+
Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 576 |
+
return _mm512_fmsub_ps(a, b, c);
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
// TODO(jgong5): rewrite with ATEN vectorized (need to add unpack and shuffle)
|
| 580 |
+
// Used by Inductor CPP codegen
|
| 581 |
+
// Code referred to FBGEMM:
|
| 582 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L230-L304
|
| 583 |
+
// kernel for transposing mxn where m, n <= 16
|
| 584 |
+
// M + (M + 1) / 2 * 2 + (M + 3) / 4 * 4 + (M + 7) / 8 * 8 + 2 * N instructions
|
| 585 |
+
inline void transpose_mxn_16x16(const float* src, int64_t ld_src, float* dst, int64_t ld_dst, int M, int N) {
|
| 586 |
+
TORCH_CHECK(M <= 16 && N <= 16, "transpose_mxn<float> expects M, N <= 16.");
|
| 587 |
+
// load from src to registers
|
| 588 |
+
__m512 input[16];
|
| 589 |
+
int i;
|
| 590 |
+
if (N == 16) {
|
| 591 |
+
for (i = 0; i < M; ++i) {
|
| 592 |
+
input[i] = _mm512_loadu_ps(&src[i * ld_src]);
|
| 593 |
+
}
|
| 594 |
+
} else {
|
| 595 |
+
__mmask16 src_mask = (1 << N) - 1;
|
| 596 |
+
for (i = 0; i < M; ++i) {
|
| 597 |
+
input[i] = _mm512_maskz_loadu_ps(src_mask, &src[i * ld_src]);
|
| 598 |
+
}
|
| 599 |
+
}
|
| 600 |
+
for (; i < 16; ++i) {
|
| 601 |
+
// Not really needed but to avoid uninitialized variable warning.
|
| 602 |
+
// Shouldn't be much overhead because xor can be executed in parallel with
|
| 603 |
+
// other instructions.
|
| 604 |
+
input[i] = _mm512_setzero_ps();
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
// unpacking and interleaving 32-bit elements
|
| 608 |
+
__m512 temp[16];
|
| 609 |
+
for (i = 0; i < (M + 1) / 2; ++i) {
|
| 610 |
+
temp[2 * i] = _mm512_unpacklo_ps(input[2 * i], input[2 * i + 1]);
|
| 611 |
+
temp[2 * i + 1] = _mm512_unpackhi_ps(input[2 * i], input[2 * i + 1]);
|
| 612 |
+
}
|
| 613 |
+
for (i = i * 2; i < 16; ++i) {
|
| 614 |
+
temp[i] = _mm512_setzero_ps();
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
// unpacking and interleaving 64-bit elements
|
| 618 |
+
for (i = 0; i < (M + 3) / 4; ++i) {
|
| 619 |
+
input[4 * i] = _mm512_castpd_ps(_mm512_unpacklo_pd(
|
| 620 |
+
_mm512_castps_pd(temp[4 * i]), _mm512_castps_pd(temp[4 * i + 2])));
|
| 621 |
+
input[4 * i + 1] = _mm512_castpd_ps(_mm512_unpackhi_pd(
|
| 622 |
+
_mm512_castps_pd(temp[4 * i]), _mm512_castps_pd(temp[4 * i + 2])));
|
| 623 |
+
input[4 * i + 2] = _mm512_castpd_ps(_mm512_unpacklo_pd(
|
| 624 |
+
_mm512_castps_pd(temp[4 * i + 1]), _mm512_castps_pd(temp[4 * i + 3])));
|
| 625 |
+
input[4 * i + 3] = _mm512_castpd_ps(_mm512_unpackhi_pd(
|
| 626 |
+
_mm512_castps_pd(temp[4 * i + 1]), _mm512_castps_pd(temp[4 * i + 3])));
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
// shuffle 128-bits (composed of 4 32-bit elements)
|
| 630 |
+
for (i = 0; i < (M + 7) / 8; ++i) {
|
| 631 |
+
temp[8 * i] = _mm512_shuffle_f32x4(input[8 * i], input[8 * i + 4], 0x88);
|
| 632 |
+
temp[8 * i + 1] =
|
| 633 |
+
_mm512_shuffle_f32x4(input[8 * i + 1], input[8 * i + 5], 0x88);
|
| 634 |
+
temp[8 * i + 2] =
|
| 635 |
+
_mm512_shuffle_f32x4(input[8 * i + 2], input[8 * i + 6], 0x88);
|
| 636 |
+
temp[8 * i + 3] =
|
| 637 |
+
_mm512_shuffle_f32x4(input[8 * i + 3], input[8 * i + 7], 0x88);
|
| 638 |
+
temp[8 * i + 4] =
|
| 639 |
+
_mm512_shuffle_f32x4(input[8 * i], input[8 * i + 4], 0xdd);
|
| 640 |
+
temp[8 * i + 5] =
|
| 641 |
+
_mm512_shuffle_f32x4(input[8 * i + 1], input[8 * i + 5], 0xdd);
|
| 642 |
+
temp[8 * i + 6] =
|
| 643 |
+
_mm512_shuffle_f32x4(input[8 * i + 2], input[8 * i + 6], 0xdd);
|
| 644 |
+
temp[8 * i + 7] =
|
| 645 |
+
_mm512_shuffle_f32x4(input[8 * i + 3], input[8 * i + 7], 0xdd);
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
for (i = 0; i < N; ++i) {
|
| 649 |
+
if (i < 8) {
|
| 650 |
+
input[i] = _mm512_shuffle_f32x4(temp[i], temp[8 + i], 0x88);
|
| 651 |
+
} else {
|
| 652 |
+
input[i] = _mm512_shuffle_f32x4(temp[i - 8], temp[i], 0xdd);
|
| 653 |
+
}
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
// store from registers to dst
|
| 657 |
+
if (M == 16) {
|
| 658 |
+
for (i = 0; i < N; ++i) {
|
| 659 |
+
_mm512_storeu_ps(&dst[i * ld_dst], input[i]);
|
| 660 |
+
}
|
| 661 |
+
} else {
|
| 662 |
+
__mmask16 dst_mask = (1 << M) - 1;
|
| 663 |
+
for (i = 0; i < N; ++i) {
|
| 664 |
+
_mm512_mask_storeu_ps(&dst[i * ld_dst], dst_mask, input[i]);
|
| 665 |
+
}
|
| 666 |
+
}
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
template<>
|
| 670 |
+
inline void transpose_mxn<float>(const float* src, int64_t ld_src, float* dst, int64_t ld_dst, int M, int N) {
|
| 671 |
+
int64_t i = 0;
|
| 672 |
+
for (; i < M / 16 * 16; i += 16) {
|
| 673 |
+
int64_t j = 0;
|
| 674 |
+
for (; j < N / 16 * 16; j += 16) {
|
| 675 |
+
transpose_mxn_16x16(
|
| 676 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, 16, 16);
|
| 677 |
+
}
|
| 678 |
+
// handle remainder j
|
| 679 |
+
int nrem = N - j;
|
| 680 |
+
if (nrem > 0) {
|
| 681 |
+
transpose_mxn_16x16(
|
| 682 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, 16, nrem);
|
| 683 |
+
}
|
| 684 |
+
}
|
| 685 |
+
// handle remainder i
|
| 686 |
+
int mrem = M - i;
|
| 687 |
+
if (mrem > 0) {
|
| 688 |
+
int j = 0;
|
| 689 |
+
for (; j < N / 16 * 16; j += 16) {
|
| 690 |
+
transpose_mxn_16x16(
|
| 691 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, mrem, 16);
|
| 692 |
+
}
|
| 693 |
+
// handle remainder j
|
| 694 |
+
int nrem = N - j;
|
| 695 |
+
transpose_mxn_16x16(
|
| 696 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, mrem, nrem);
|
| 697 |
+
}
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
template <typename T, int M, int N,
|
| 701 |
+
typename std::enable_if_t<std::is_same<T, float>::value, int> = 0>
|
| 702 |
+
inline void transpose_mxn(const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
|
| 703 |
+
transpose_mxn<float>(src, ld_src, dst, ld_dst, M, N);
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
#endif
|
| 707 |
+
|
| 708 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h
ADDED
|
@@ -0,0 +1,1459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
#include <c10/util/irange.h>
|
| 10 |
+
|
| 11 |
+
namespace at {
|
| 12 |
+
namespace vec {
|
| 13 |
+
inline namespace CPU_CAPABILITY {
|
| 14 |
+
|
| 15 |
+
#ifdef CPU_CAPABILITY_AVX512
|
| 16 |
+
|
| 17 |
+
struct Vectorizedi {
|
| 18 |
+
protected:
|
| 19 |
+
__m512i values;
|
| 20 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 21 |
+
static inline __m512i invert(const __m512i& v) {
|
| 22 |
+
const auto ones = _mm512_set1_epi64(-1);
|
| 23 |
+
return _mm512_xor_si512(ones, v);
|
| 24 |
+
}
|
| 25 |
+
public:
|
| 26 |
+
Vectorizedi() {}
|
| 27 |
+
Vectorizedi(__m512i v) : values(v) {}
|
| 28 |
+
operator __m512i() const {
|
| 29 |
+
return values;
|
| 30 |
+
}
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
#else
|
| 34 |
+
|
| 35 |
+
struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
|
| 36 |
+
|
| 37 |
+
#endif // CPU_CAPABILITY_AVX512
|
| 38 |
+
|
| 39 |
+
#ifdef CPU_CAPABILITY_AVX512
|
| 40 |
+
|
| 41 |
+
template <>
|
| 42 |
+
class Vectorized<int64_t> : public Vectorizedi {
|
| 43 |
+
private:
|
| 44 |
+
static const Vectorized<int64_t> ones;
|
| 45 |
+
public:
|
| 46 |
+
using value_type = int64_t;
|
| 47 |
+
using size_type = int;
|
| 48 |
+
static constexpr size_type size() {
|
| 49 |
+
return 8;
|
| 50 |
+
}
|
| 51 |
+
using Vectorizedi::Vectorizedi;
|
| 52 |
+
Vectorized() {}
|
| 53 |
+
Vectorized(int64_t v) { values = _mm512_set1_epi64(v); }
|
| 54 |
+
Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4,
|
| 55 |
+
int64_t val5, int64_t val6, int64_t val7, int64_t val8) {
|
| 56 |
+
values = _mm512_setr_epi64(val1, val2, val3, val4,
|
| 57 |
+
val5, val6, val7, val8);
|
| 58 |
+
}
|
| 59 |
+
template <int64_t mask>
|
| 60 |
+
static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
|
| 61 |
+
return _mm512_mask_blend_epi64(mask, a.values, b.values);
|
| 62 |
+
}
|
| 63 |
+
static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
|
| 64 |
+
const Vectorized<int64_t>& mask) {
|
| 65 |
+
auto msb_one = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
|
| 66 |
+
auto mask_ = _mm512_cmp_epi64_mask(mask, msb_one, _MM_CMPINT_EQ);
|
| 67 |
+
return _mm512_mask_blend_epi64(mask_, a.values, b.values);
|
| 68 |
+
}
|
| 69 |
+
template <typename step_t>
|
| 70 |
+
static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 71 |
+
return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step,
|
| 72 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
|
| 73 |
+
}
|
| 74 |
+
static Vectorized<int64_t>
|
| 75 |
+
set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
|
| 76 |
+
switch (count) {
|
| 77 |
+
case 0:
|
| 78 |
+
return a;
|
| 79 |
+
case 1:
|
| 80 |
+
return blend<1>(a, b);
|
| 81 |
+
case 2:
|
| 82 |
+
return blend<3>(a, b);
|
| 83 |
+
case 3:
|
| 84 |
+
return blend<7>(a, b);
|
| 85 |
+
case 4:
|
| 86 |
+
return blend<15>(a, b);
|
| 87 |
+
case 5:
|
| 88 |
+
return blend<31>(a, b);
|
| 89 |
+
case 6:
|
| 90 |
+
return blend<63>(a, b);
|
| 91 |
+
case 7:
|
| 92 |
+
return blend<127>(a, b);
|
| 93 |
+
}
|
| 94 |
+
return b;
|
| 95 |
+
}
|
| 96 |
+
static Vectorized<int64_t> loadu(const void* ptr) {
|
| 97 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 98 |
+
}
|
| 99 |
+
static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
|
| 100 |
+
if (count == size()) {
|
| 101 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 102 |
+
} else {
|
| 103 |
+
__mmask8 mask = (1ULL << count) - 1;
|
| 104 |
+
return _mm512_maskz_loadu_epi64(mask, ptr);
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
void store(void* ptr, int count = size()) const {
|
| 108 |
+
if (count == size()) {
|
| 109 |
+
// ptr need not to be aligned here. See
|
| 110 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
|
| 111 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 112 |
+
} else if (count > 0) {
|
| 113 |
+
__mmask8 mask = (1ULL << count) - 1;
|
| 114 |
+
_mm512_mask_storeu_epi64(ptr, mask, values);
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
const int64_t& operator[](int idx) const = delete;
|
| 118 |
+
int64_t& operator[](int idx) = delete;
|
| 119 |
+
Vectorized<int64_t> abs() const {
|
| 120 |
+
auto is_larger_mask = _mm512_cmpgt_epi64_mask(zero_vector, values);
|
| 121 |
+
auto is_larger = _mm512_mask_set1_epi64(zero_vector, is_larger_mask, 0xFFFFFFFFFFFFFFFF);
|
| 122 |
+
auto inverse = _mm512_xor_si512(values, is_larger);
|
| 123 |
+
return _mm512_sub_epi64(inverse, is_larger);
|
| 124 |
+
}
|
| 125 |
+
Vectorized<int64_t> real() const {
|
| 126 |
+
return *this;
|
| 127 |
+
}
|
| 128 |
+
Vectorized<int64_t> imag() const {
|
| 129 |
+
return _mm512_set1_epi64(0);
|
| 130 |
+
}
|
| 131 |
+
Vectorized<int64_t> conj() const {
|
| 132 |
+
return *this;
|
| 133 |
+
}
|
| 134 |
+
Vectorized<int64_t> neg() const;
|
| 135 |
+
Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
|
| 136 |
+
auto mask = _mm512_cmpeq_epi64_mask(values, other.values);
|
| 137 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 138 |
+
}
|
| 139 |
+
Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
|
| 140 |
+
auto mask = _mm512_cmpneq_epi64_mask(values, other.values);
|
| 141 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 142 |
+
}
|
| 143 |
+
Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
|
| 144 |
+
auto mask = _mm512_cmplt_epi64_mask(values, other.values);
|
| 145 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 146 |
+
}
|
| 147 |
+
Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
|
| 148 |
+
auto mask = _mm512_cmple_epi64_mask(values, other.values);
|
| 149 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 150 |
+
}
|
| 151 |
+
Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
|
| 152 |
+
auto mask = _mm512_cmpgt_epi64_mask(values, other.values);
|
| 153 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 154 |
+
}
|
| 155 |
+
Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
|
| 156 |
+
auto mask = _mm512_cmpge_epi64_mask(values, other.values);
|
| 157 |
+
return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
|
| 161 |
+
Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
|
| 162 |
+
Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
|
| 163 |
+
Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
|
| 164 |
+
Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
|
| 165 |
+
Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
|
| 166 |
+
};
|
| 167 |
+
|
| 168 |
+
template <>
|
| 169 |
+
class Vectorized<int32_t> : public Vectorizedi {
|
| 170 |
+
private:
|
| 171 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 172 |
+
static const Vectorized<int32_t> ones;
|
| 173 |
+
public:
|
| 174 |
+
using value_type = int32_t;
|
| 175 |
+
static constexpr int size() {
|
| 176 |
+
return 16;
|
| 177 |
+
}
|
| 178 |
+
using Vectorizedi::Vectorizedi;
|
| 179 |
+
Vectorized() {}
|
| 180 |
+
Vectorized(int32_t v) { values = _mm512_set1_epi32(v); }
|
| 181 |
+
Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
|
| 182 |
+
int32_t val5, int32_t val6, int32_t val7, int32_t val8,
|
| 183 |
+
int32_t val9, int32_t val10, int32_t val11, int32_t val12,
|
| 184 |
+
int32_t val13, int32_t val14, int32_t val15, int32_t val16) {
|
| 185 |
+
values = _mm512_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8,
|
| 186 |
+
val9, val10, val11, val12, val13, val14, val15, val16);
|
| 187 |
+
}
|
| 188 |
+
template <int64_t mask>
|
| 189 |
+
static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
|
| 190 |
+
return _mm512_mask_blend_epi32(mask, a.values, b.values);
|
| 191 |
+
}
|
| 192 |
+
static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
|
| 193 |
+
const Vectorized<int32_t>& mask) {
|
| 194 |
+
auto msb_one = _mm512_set1_epi32(0xFFFFFFFF);
|
| 195 |
+
auto mask_ = _mm512_cmp_epi32_mask(mask, msb_one, _MM_CMPINT_EQ);
|
| 196 |
+
return _mm512_mask_blend_epi32(mask_, a.values, b.values);
|
| 197 |
+
}
|
| 198 |
+
template <typename step_t>
|
| 199 |
+
static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 200 |
+
return Vectorized<int32_t>(
|
| 201 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 202 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 203 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 204 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
| 205 |
+
}
|
| 206 |
+
static Vectorized<int32_t>
|
| 207 |
+
set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
|
| 208 |
+
switch (count) {
|
| 209 |
+
case 0:
|
| 210 |
+
return a;
|
| 211 |
+
case 1:
|
| 212 |
+
return blend<1>(a, b);
|
| 213 |
+
case 2:
|
| 214 |
+
return blend<3>(a, b);
|
| 215 |
+
case 3:
|
| 216 |
+
return blend<7>(a, b);
|
| 217 |
+
case 4:
|
| 218 |
+
return blend<15>(a, b);
|
| 219 |
+
case 5:
|
| 220 |
+
return blend<31>(a, b);
|
| 221 |
+
case 6:
|
| 222 |
+
return blend<63>(a, b);
|
| 223 |
+
case 7:
|
| 224 |
+
return blend<127>(a, b);
|
| 225 |
+
case 8:
|
| 226 |
+
return blend<255>(a, b);
|
| 227 |
+
case 9:
|
| 228 |
+
return blend<511>(a, b);
|
| 229 |
+
case 10:
|
| 230 |
+
return blend<1023>(a, b);
|
| 231 |
+
case 11:
|
| 232 |
+
return blend<2047>(a, b);
|
| 233 |
+
case 12:
|
| 234 |
+
return blend<4095>(a, b);
|
| 235 |
+
case 13:
|
| 236 |
+
return blend<8191>(a, b);
|
| 237 |
+
case 14:
|
| 238 |
+
return blend<16383>(a, b);
|
| 239 |
+
case 15:
|
| 240 |
+
return blend<32767>(a, b);
|
| 241 |
+
}
|
| 242 |
+
return b;
|
| 243 |
+
}
|
| 244 |
+
static Vectorized<int32_t> loadu(const void* ptr) {
|
| 245 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 246 |
+
}
|
| 247 |
+
static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
|
| 248 |
+
if (count == size()) {
|
| 249 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 250 |
+
} else {
|
| 251 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 252 |
+
return _mm512_maskz_loadu_epi32(mask, ptr);
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
void store(void* ptr, int count = size()) const {
|
| 256 |
+
if (count == size()) {
|
| 257 |
+
// ptr need not to be aligned here. See
|
| 258 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
|
| 259 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 260 |
+
} else if (count > 0) {
|
| 261 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 262 |
+
_mm512_mask_storeu_epi32(ptr, mask, values);
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
const int32_t& operator[](int idx) const = delete;
|
| 266 |
+
int32_t& operator[](int idx) = delete;
|
| 267 |
+
Vectorized<int32_t> abs() const {
|
| 268 |
+
return _mm512_abs_epi32(values);
|
| 269 |
+
}
|
| 270 |
+
Vectorized<int32_t> real() const {
|
| 271 |
+
return *this;
|
| 272 |
+
}
|
| 273 |
+
Vectorized<int32_t> imag() const {
|
| 274 |
+
return _mm512_set1_epi32(0);
|
| 275 |
+
}
|
| 276 |
+
Vectorized<int32_t> conj() const {
|
| 277 |
+
return *this;
|
| 278 |
+
}
|
| 279 |
+
Vectorized<int32_t> neg() const;
|
| 280 |
+
Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
|
| 281 |
+
auto mask = _mm512_cmpeq_epi32_mask(values, other.values);
|
| 282 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 283 |
+
}
|
| 284 |
+
Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
|
| 285 |
+
auto mask = _mm512_cmpneq_epi32_mask(values, other.values);
|
| 286 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 287 |
+
}
|
| 288 |
+
Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
|
| 289 |
+
auto mask = _mm512_cmplt_epi32_mask(values, other.values);
|
| 290 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 291 |
+
}
|
| 292 |
+
Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
|
| 293 |
+
auto mask = _mm512_cmple_epi32_mask(values, other.values);
|
| 294 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 295 |
+
}
|
| 296 |
+
Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
|
| 297 |
+
auto mask = _mm512_cmpgt_epi32_mask(values, other.values);
|
| 298 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 299 |
+
}
|
| 300 |
+
Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
|
| 301 |
+
auto mask = _mm512_cmpge_epi32_mask(values, other.values);
|
| 302 |
+
return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
|
| 303 |
+
}
|
| 304 |
+
Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
|
| 305 |
+
Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
|
| 306 |
+
Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
|
| 307 |
+
Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
|
| 308 |
+
Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
|
| 309 |
+
Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
template <>
|
| 313 |
+
inline void convert(const int32_t *src, float *dst, int64_t n) {
|
| 314 |
+
int64_t i;
|
| 315 |
+
// int32_t and float have same size
|
| 316 |
+
#ifndef _MSC_VER
|
| 317 |
+
# pragma unroll
|
| 318 |
+
#endif
|
| 319 |
+
for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
|
| 320 |
+
auto input_vec = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i));
|
| 321 |
+
auto output_vec = _mm512_cvtepi32_ps(input_vec);
|
| 322 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
|
| 323 |
+
}
|
| 324 |
+
#ifndef _MSC_VER
|
| 325 |
+
# pragma unroll
|
| 326 |
+
#endif
|
| 327 |
+
for (; i < n; i++) {
|
| 328 |
+
dst[i] = static_cast<float>(src[i]);
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
template <>
|
| 333 |
+
inline void convert(const int32_t *src, double *dst, int64_t n) {
|
| 334 |
+
int64_t i;
|
| 335 |
+
// int32_t has half the size of double
|
| 336 |
+
#ifndef _MSC_VER
|
| 337 |
+
# pragma unroll
|
| 338 |
+
#endif
|
| 339 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
| 340 |
+
auto input_256_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
|
| 341 |
+
auto output_vec = _mm512_cvtepi32_pd(input_256_vec);
|
| 342 |
+
_mm512_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
|
| 343 |
+
}
|
| 344 |
+
#ifndef _MSC_VER
|
| 345 |
+
# pragma unroll
|
| 346 |
+
#endif
|
| 347 |
+
for (; i < n; i++) {
|
| 348 |
+
dst[i] = static_cast<double>(src[i]);
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
template <>
|
| 353 |
+
class Vectorized<int16_t> : public Vectorizedi {
|
| 354 |
+
private:
|
| 355 |
+
static const Vectorized<int16_t> ones;
|
| 356 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 357 |
+
public:
|
| 358 |
+
using value_type = int16_t;
|
| 359 |
+
static constexpr int size() {
|
| 360 |
+
return 32;
|
| 361 |
+
}
|
| 362 |
+
using Vectorizedi::Vectorizedi;
|
| 363 |
+
Vectorized() {}
|
| 364 |
+
Vectorized(int16_t v) { values = _mm512_set1_epi16(v); }
|
| 365 |
+
Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
|
| 366 |
+
int16_t val5, int16_t val6, int16_t val7, int16_t val8,
|
| 367 |
+
int16_t val9, int16_t val10, int16_t val11, int16_t val12,
|
| 368 |
+
int16_t val13, int16_t val14, int16_t val15, int16_t val16,
|
| 369 |
+
int16_t val17, int16_t val18, int16_t val19, int16_t val20,
|
| 370 |
+
int16_t val21, int16_t val22, int16_t val23, int16_t val24,
|
| 371 |
+
int16_t val25, int16_t val26, int16_t val27, int16_t val28,
|
| 372 |
+
int16_t val29, int16_t val30, int16_t val31, int16_t val32) {
|
| 373 |
+
values = _mm512_set_epi16(val32, val31, val30, val29, val28, val27, val26, val25,
|
| 374 |
+
val24, val23, val22, val21, val20, val19, val18, val17,
|
| 375 |
+
val16, val15, val14, val13, val12, val11, val10, val9,
|
| 376 |
+
val8, val7, val6, val5, val4, val3, val2, val1);
|
| 377 |
+
}
|
| 378 |
+
template <int64_t mask>
|
| 379 |
+
static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
|
| 380 |
+
return _mm512_mask_blend_epi16(mask, a.values, b.values);
|
| 381 |
+
}
|
| 382 |
+
static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
|
| 383 |
+
const Vectorized<int16_t>& mask) {
|
| 384 |
+
auto msb_one = _mm512_set1_epi16(0xFFFF);
|
| 385 |
+
auto mask_ = _mm512_cmp_epi16_mask(mask, msb_one, _MM_CMPINT_EQ);
|
| 386 |
+
return _mm512_mask_blend_epi16(mask_, a.values, b.values);
|
| 387 |
+
}
|
| 388 |
+
template <typename step_t>
|
| 389 |
+
static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 390 |
+
return Vectorized<int16_t>(
|
| 391 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 392 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 393 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 394 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 395 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 396 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 397 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 398 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step
|
| 399 |
+
);
|
| 400 |
+
}
|
| 401 |
+
static Vectorized<int16_t>
|
| 402 |
+
set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
|
| 403 |
+
switch (count) {
|
| 404 |
+
case 0:
|
| 405 |
+
return a;
|
| 406 |
+
case 1:
|
| 407 |
+
return blend<0x1>(a, b);
|
| 408 |
+
case 2:
|
| 409 |
+
return blend<0x3>(a, b);
|
| 410 |
+
case 3:
|
| 411 |
+
return blend<0x7>(a, b);
|
| 412 |
+
case 4:
|
| 413 |
+
return blend<0xF>(a, b);
|
| 414 |
+
case 5:
|
| 415 |
+
return blend<0x1F>(a, b);
|
| 416 |
+
case 6:
|
| 417 |
+
return blend<0x3F>(a, b);
|
| 418 |
+
case 7:
|
| 419 |
+
return blend<0x7F>(a, b);
|
| 420 |
+
case 8:
|
| 421 |
+
return blend<0xFF>(a, b);
|
| 422 |
+
case 9:
|
| 423 |
+
return blend<0x1FF>(a, b);
|
| 424 |
+
case 10:
|
| 425 |
+
return blend<0x3FF>(a, b);
|
| 426 |
+
case 11:
|
| 427 |
+
return blend<0x7FF>(a, b);
|
| 428 |
+
case 12:
|
| 429 |
+
return blend<0xFFF>(a, b);
|
| 430 |
+
case 13:
|
| 431 |
+
return blend<0x1FFF>(a, b);
|
| 432 |
+
case 14:
|
| 433 |
+
return blend<0x3FFF>(a, b);
|
| 434 |
+
case 15:
|
| 435 |
+
return blend<0x7FFF>(a, b);
|
| 436 |
+
case 16:
|
| 437 |
+
return blend<0xFFFF>(a, b);
|
| 438 |
+
case 17:
|
| 439 |
+
return blend<0x1FFFF>(a, b);
|
| 440 |
+
case 18:
|
| 441 |
+
return blend<0x3FFFF>(a, b);
|
| 442 |
+
case 19:
|
| 443 |
+
return blend<0x7FFFF>(a, b);
|
| 444 |
+
case 20:
|
| 445 |
+
return blend<0xFFFFF>(a, b);
|
| 446 |
+
case 21:
|
| 447 |
+
return blend<0x1FFFFF>(a, b);
|
| 448 |
+
case 22:
|
| 449 |
+
return blend<0x3FFFFF>(a, b);
|
| 450 |
+
case 23:
|
| 451 |
+
return blend<0x7FFFFF>(a, b);
|
| 452 |
+
case 24:
|
| 453 |
+
return blend<0xFFFFFF>(a, b);
|
| 454 |
+
case 25:
|
| 455 |
+
return blend<0x1FFFFFF>(a, b);
|
| 456 |
+
case 26:
|
| 457 |
+
return blend<0x3FFFFFF>(a, b);
|
| 458 |
+
case 27:
|
| 459 |
+
return blend<0x7FFFFFF>(a, b);
|
| 460 |
+
case 28:
|
| 461 |
+
return blend<0xFFFFFFF>(a, b);
|
| 462 |
+
case 29:
|
| 463 |
+
return blend<0x1FFFFFFF>(a, b);
|
| 464 |
+
case 30:
|
| 465 |
+
return blend<0x3FFFFFFF>(a, b);
|
| 466 |
+
case 31:
|
| 467 |
+
return blend<0x7FFFFFFF>(a, b);
|
| 468 |
+
}
|
| 469 |
+
return b;
|
| 470 |
+
}
|
| 471 |
+
static Vectorized<int16_t> loadu(const void* ptr) {
|
| 472 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 473 |
+
}
|
| 474 |
+
static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
|
| 475 |
+
if (count == size()) {
|
| 476 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 477 |
+
} else {
|
| 478 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 479 |
+
return _mm512_maskz_loadu_epi16(mask, ptr);
|
| 480 |
+
}
|
| 481 |
+
}
|
| 482 |
+
void store(void* ptr, int count = size()) const {
|
| 483 |
+
if (count == size()) {
|
| 484 |
+
// ptr need not to be aligned here. See
|
| 485 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
|
| 486 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 487 |
+
} else if (count > 0) {
|
| 488 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 489 |
+
_mm512_mask_storeu_epi16(ptr, mask, values);
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
const int16_t& operator[](int idx) const = delete;
|
| 493 |
+
int16_t& operator[](int idx) = delete;
|
| 494 |
+
Vectorized<int16_t> abs() const {
|
| 495 |
+
return _mm512_abs_epi16(values);
|
| 496 |
+
}
|
| 497 |
+
Vectorized<int16_t> real() const {
|
| 498 |
+
return *this;
|
| 499 |
+
}
|
| 500 |
+
Vectorized<int16_t> imag() const {
|
| 501 |
+
return _mm512_set1_epi16(0);
|
| 502 |
+
}
|
| 503 |
+
Vectorized<int16_t> conj() const {
|
| 504 |
+
return *this;
|
| 505 |
+
}
|
| 506 |
+
Vectorized<int16_t> neg() const;
|
| 507 |
+
Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
|
| 508 |
+
auto mask = _mm512_cmpeq_epi16_mask(values, other.values);
|
| 509 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 510 |
+
}
|
| 511 |
+
Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
|
| 512 |
+
auto mask = _mm512_cmpneq_epi16_mask(values, other.values);
|
| 513 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 514 |
+
}
|
| 515 |
+
Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
|
| 516 |
+
auto mask = _mm512_cmplt_epi16_mask(values, other.values);
|
| 517 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 518 |
+
}
|
| 519 |
+
Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
|
| 520 |
+
auto mask = _mm512_cmple_epi16_mask(values, other.values);
|
| 521 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 522 |
+
}
|
| 523 |
+
Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
|
| 524 |
+
auto mask = _mm512_cmpgt_epi16_mask(values, other.values);
|
| 525 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 526 |
+
}
|
| 527 |
+
Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
|
| 528 |
+
auto mask = _mm512_cmpge_epi16_mask(values, other.values);
|
| 529 |
+
return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
|
| 533 |
+
Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
|
| 534 |
+
Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
|
| 535 |
+
Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
|
| 536 |
+
Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
|
| 537 |
+
Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
|
| 538 |
+
};
|
| 539 |
+
|
| 540 |
+
template <typename T>
|
| 541 |
+
class Vectorized8 : public Vectorizedi {
|
| 542 |
+
static_assert(
|
| 543 |
+
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>,
|
| 544 |
+
"Only int8_t/uint8_t are supported");
|
| 545 |
+
protected:
|
| 546 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 547 |
+
static const Vectorized<T> ones;
|
| 548 |
+
public:
|
| 549 |
+
using value_type = T;
|
| 550 |
+
static constexpr int size() {
|
| 551 |
+
return 64;
|
| 552 |
+
}
|
| 553 |
+
using Vectorizedi::Vectorizedi;
|
| 554 |
+
Vectorized8() {}
|
| 555 |
+
Vectorized8(T v) { values = _mm512_set1_epi8(v); }
|
| 556 |
+
Vectorized8(T val1, T val2, T val3, T val4,
|
| 557 |
+
T val5, T val6, T val7, T val8,
|
| 558 |
+
T val9, T val10, T val11, T val12,
|
| 559 |
+
T val13, T val14, T val15, T val16,
|
| 560 |
+
T val17, T val18, T val19, T val20,
|
| 561 |
+
T val21, T val22, T val23, T val24,
|
| 562 |
+
T val25, T val26, T val27, T val28,
|
| 563 |
+
T val29, T val30, T val31, T val32,
|
| 564 |
+
T val33, T val34, T val35, T val36,
|
| 565 |
+
T val37, T val38, T val39, T val40,
|
| 566 |
+
T val41, T val42, T val43, T val44,
|
| 567 |
+
T val45, T val46, T val47, T val48,
|
| 568 |
+
T val49, T val50, T val51, T val52,
|
| 569 |
+
T val53, T val54, T val55, T val56,
|
| 570 |
+
T val57, T val58, T val59, T val60,
|
| 571 |
+
T val61, T val62, T val63, T val64){
|
| 572 |
+
values = _mm512_set_epi8(val64, val63, val62, val61, val60, val59, val58, val57,
|
| 573 |
+
val56, val55, val54, val53,val52, val51, val50, val49,
|
| 574 |
+
val48, val47, val46, val45, val44, val43, val42, val41,
|
| 575 |
+
val40, val39, val38, val37, val36, val35, val34, val33,
|
| 576 |
+
val32, val31, val30, val29, val28, val27, val26, val25,
|
| 577 |
+
val24, val23, val22, val21, val20, val19, val18, val17,
|
| 578 |
+
val16, val15, val14, val13, val12, val11, val10, val9,
|
| 579 |
+
val8, val7, val6, val5, val4, val3, val2, val1);
|
| 580 |
+
}
|
| 581 |
+
template <int64_t mask>
|
| 582 |
+
static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
|
| 583 |
+
return _mm512_mask_blend_epi8(mask, a.values, b.values);
|
| 584 |
+
}
|
| 585 |
+
template <typename step_t>
|
| 586 |
+
static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
|
| 587 |
+
return Vectorized<T>(
|
| 588 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 589 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 590 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 591 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 592 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 593 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 594 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 595 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step,
|
| 596 |
+
base + 32 * step, base + 33 * step, base + 34 * step, base + 35 * step,
|
| 597 |
+
base + 36 * step, base + 37 * step, base + 38 * step, base + 39 * step,
|
| 598 |
+
base + 40 * step, base + 41 * step, base + 42 * step, base + 43 * step,
|
| 599 |
+
base + 44 * step, base + 45 * step, base + 46 * step, base + 47 * step,
|
| 600 |
+
base + 48 * step, base + 49 * step, base + 50 * step, base + 51 * step,
|
| 601 |
+
base + 52 * step, base + 53 * step, base + 54 * step, base + 55 * step,
|
| 602 |
+
base + 56 * step, base + 57 * step, base + 58 * step, base + 59 * step,
|
| 603 |
+
base + 60 * step, base + 61 * step, base + 62 * step, base + 63 * step);
|
| 604 |
+
}
|
| 605 |
+
static Vectorized<T>
|
| 606 |
+
set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
|
| 607 |
+
switch (count) {
|
| 608 |
+
case 0:
|
| 609 |
+
return a;
|
| 610 |
+
case 1:
|
| 611 |
+
return blend<0x1>(a, b);
|
| 612 |
+
case 2:
|
| 613 |
+
return blend<0x3>(a, b);
|
| 614 |
+
case 3:
|
| 615 |
+
return blend<0x7>(a, b);
|
| 616 |
+
case 4:
|
| 617 |
+
return blend<0xF>(a, b);
|
| 618 |
+
case 5:
|
| 619 |
+
return blend<0x1F>(a, b);
|
| 620 |
+
case 6:
|
| 621 |
+
return blend<0x3F>(a, b);
|
| 622 |
+
case 7:
|
| 623 |
+
return blend<0x7F>(a, b);
|
| 624 |
+
case 8:
|
| 625 |
+
return blend<0xFF>(a, b);
|
| 626 |
+
case 9:
|
| 627 |
+
return blend<0x1FF>(a, b);
|
| 628 |
+
case 10:
|
| 629 |
+
return blend<0x3FF>(a, b);
|
| 630 |
+
case 11:
|
| 631 |
+
return blend<0x7FF>(a, b);
|
| 632 |
+
case 12:
|
| 633 |
+
return blend<0xFFF>(a, b);
|
| 634 |
+
case 13:
|
| 635 |
+
return blend<0x1FFF>(a, b);
|
| 636 |
+
case 14:
|
| 637 |
+
return blend<0x3FFF>(a, b);
|
| 638 |
+
case 15:
|
| 639 |
+
return blend<0x7FFF>(a, b);
|
| 640 |
+
case 16:
|
| 641 |
+
return blend<0xFFFF>(a, b);
|
| 642 |
+
case 17:
|
| 643 |
+
return blend<0x1FFFF>(a, b);
|
| 644 |
+
case 18:
|
| 645 |
+
return blend<0x3FFFF>(a, b);
|
| 646 |
+
case 19:
|
| 647 |
+
return blend<0x7FFFF>(a, b);
|
| 648 |
+
case 20:
|
| 649 |
+
return blend<0xFFFFF>(a, b);
|
| 650 |
+
case 21:
|
| 651 |
+
return blend<0x1FFFFF>(a, b);
|
| 652 |
+
case 22:
|
| 653 |
+
return blend<0x3FFFFF>(a, b);
|
| 654 |
+
case 23:
|
| 655 |
+
return blend<0x7FFFFF>(a, b);
|
| 656 |
+
case 24:
|
| 657 |
+
return blend<0xFFFFFF>(a, b);
|
| 658 |
+
case 25:
|
| 659 |
+
return blend<0x1FFFFFF>(a, b);
|
| 660 |
+
case 26:
|
| 661 |
+
return blend<0x3FFFFFF>(a, b);
|
| 662 |
+
case 27:
|
| 663 |
+
return blend<0x7FFFFFF>(a, b);
|
| 664 |
+
case 28:
|
| 665 |
+
return blend<0xFFFFFFF>(a, b);
|
| 666 |
+
case 29:
|
| 667 |
+
return blend<0x1FFFFFFF>(a, b);
|
| 668 |
+
case 30:
|
| 669 |
+
return blend<0x3FFFFFFF>(a, b);
|
| 670 |
+
case 31:
|
| 671 |
+
return blend<0x7FFFFFFF>(a, b);
|
| 672 |
+
case 32:
|
| 673 |
+
return blend<0xFFFFFFFF>(a, b);
|
| 674 |
+
case 33:
|
| 675 |
+
return blend<0x1FFFFFFFF>(a, b);
|
| 676 |
+
case 34:
|
| 677 |
+
return blend<0x3FFFFFFFF>(a, b);
|
| 678 |
+
case 35:
|
| 679 |
+
return blend<0x7FFFFFFFF>(a, b);
|
| 680 |
+
case 36:
|
| 681 |
+
return blend<0xFFFFFFFFF>(a, b);
|
| 682 |
+
case 37:
|
| 683 |
+
return blend<0x1FFFFFFFFF>(a, b);
|
| 684 |
+
case 38:
|
| 685 |
+
return blend<0x3FFFFFFFFF>(a, b);
|
| 686 |
+
case 39:
|
| 687 |
+
return blend<0x7FFFFFFFFF>(a, b);
|
| 688 |
+
case 40:
|
| 689 |
+
return blend<0xFFFFFFFFFF>(a, b);
|
| 690 |
+
case 41:
|
| 691 |
+
return blend<0x1FFFFFFFFFF>(a, b);
|
| 692 |
+
case 42:
|
| 693 |
+
return blend<0x3FFFFFFFFFF>(a, b);
|
| 694 |
+
case 43:
|
| 695 |
+
return blend<0x7FFFFFFFFFF>(a, b);
|
| 696 |
+
case 44:
|
| 697 |
+
return blend<0xFFFFFFFFFFF>(a, b);
|
| 698 |
+
case 45:
|
| 699 |
+
return blend<0x1FFFFFFFFFFF>(a, b);
|
| 700 |
+
case 46:
|
| 701 |
+
return blend<0x3FFFFFFFFFFF>(a, b);
|
| 702 |
+
case 47:
|
| 703 |
+
return blend<0x7FFFFFFFFFFF>(a, b);
|
| 704 |
+
case 48:
|
| 705 |
+
return blend<0xFFFFFFFFFFFF>(a, b);
|
| 706 |
+
case 49:
|
| 707 |
+
return blend<0x1FFFFFFFFFFFF>(a, b);
|
| 708 |
+
case 50:
|
| 709 |
+
return blend<0x3FFFFFFFFFFFF>(a, b);
|
| 710 |
+
case 51:
|
| 711 |
+
return blend<0x7FFFFFFFFFFFF>(a, b);
|
| 712 |
+
case 52:
|
| 713 |
+
return blend<0xFFFFFFFFFFFFF>(a, b);
|
| 714 |
+
case 53:
|
| 715 |
+
return blend<0x1FFFFFFFFFFFFF>(a, b);
|
| 716 |
+
case 54:
|
| 717 |
+
return blend<0x3FFFFFFFFFFFFF>(a, b);
|
| 718 |
+
case 55:
|
| 719 |
+
return blend<0x7FFFFFFFFFFFFF>(a, b);
|
| 720 |
+
case 56:
|
| 721 |
+
return blend<0xFFFFFFFFFFFFFF>(a, b);
|
| 722 |
+
case 57:
|
| 723 |
+
return blend<0x1FFFFFFFFFFFFFF>(a, b);
|
| 724 |
+
case 58:
|
| 725 |
+
return blend<0x3FFFFFFFFFFFFFF>(a, b);
|
| 726 |
+
case 59:
|
| 727 |
+
return blend<0x7FFFFFFFFFFFFFF>(a, b);
|
| 728 |
+
case 60:
|
| 729 |
+
return blend<0xFFFFFFFFFFFFFFF>(a, b);
|
| 730 |
+
case 61:
|
| 731 |
+
return blend<0x1FFFFFFFFFFFFFFF>(a, b);
|
| 732 |
+
case 62:
|
| 733 |
+
return blend<0x3FFFFFFFFFFFFFFF>(a, b);
|
| 734 |
+
case 63:
|
| 735 |
+
return blend<0x7FFFFFFFFFFFFFFF>(a, b);
|
| 736 |
+
}
|
| 737 |
+
return b;
|
| 738 |
+
}
|
| 739 |
+
static Vectorized<T> loadu(const void* ptr) {
|
| 740 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 741 |
+
}
|
| 742 |
+
static Vectorized<T> loadu_one_fourth(const void* ptr) {
|
| 743 |
+
// Fast path if only load element number of 16.
|
| 744 |
+
// Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
|
| 745 |
+
// Because loadu(const void* ptr, T count) requires zero initialization for upper 384 bits.
|
| 746 |
+
// However, by using _mm512_castsi128_si512, the upper 384 bits of the result are undefined.
|
| 747 |
+
// TODO<leslie> We can use _mm512_zextsi128_si512 in the furture,
|
| 748 |
+
// since gcc 9.3 doesn't support it now.
|
| 749 |
+
__m128i input_128 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr));
|
| 750 |
+
return _mm512_castsi128_si512(input_128);
|
| 751 |
+
}
|
| 752 |
+
static Vectorized<T> loadu(const void* ptr, T count) {
|
| 753 |
+
if (count == size()) {
|
| 754 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 755 |
+
} else if (count == 16) {
|
| 756 |
+
// Fast path if only load element number of 16
|
| 757 |
+
return loadu_one_fourth(ptr);
|
| 758 |
+
} else {
|
| 759 |
+
__mmask64 mask = (1ULL << count) - 1;
|
| 760 |
+
return _mm512_maskz_loadu_epi8(mask, ptr);
|
| 761 |
+
}
|
| 762 |
+
}
|
| 763 |
+
void store(void* ptr, int count = size()) const {
|
| 764 |
+
if (count == size()) {
|
| 765 |
+
// ptr need not to be aligned here. See
|
| 766 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
|
| 767 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 768 |
+
} else if (count > 0) {
|
| 769 |
+
if (count == 16) {
|
| 770 |
+
// Fast path if only store element number of 16
|
| 771 |
+
_mm_storeu_si128(
|
| 772 |
+
reinterpret_cast<__m128i*>(ptr),
|
| 773 |
+
_mm512_castsi512_si128(values));
|
| 774 |
+
} else {
|
| 775 |
+
__mmask64 mask = (1ULL << count) - 1;
|
| 776 |
+
_mm512_mask_storeu_epi8(ptr, mask, values);
|
| 777 |
+
}
|
| 778 |
+
}
|
| 779 |
+
}
|
| 780 |
+
const T& operator[](int idx) const = delete;
|
| 781 |
+
T& operator[](int idx) = delete;
|
| 782 |
+
Vectorized<T> real() const {
|
| 783 |
+
return *this;
|
| 784 |
+
}
|
| 785 |
+
Vectorized<T> imag() const {
|
| 786 |
+
return _mm512_set1_epi8(0);
|
| 787 |
+
}
|
| 788 |
+
Vectorized<T> conj() const {
|
| 789 |
+
return *this;
|
| 790 |
+
}
|
| 791 |
+
};
|
| 792 |
+
|
| 793 |
+
template<>
|
| 794 |
+
class Vectorized<int8_t>: public Vectorized8<int8_t> {
|
| 795 |
+
public:
|
| 796 |
+
using Vectorized8::Vectorized8;
|
| 797 |
+
|
| 798 |
+
static Vectorized<int8_t> blendv(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b,
|
| 799 |
+
const Vectorized<int8_t>& mask) {
|
| 800 |
+
auto msb_one = _mm512_set1_epi8(0xFF);
|
| 801 |
+
auto mask_ = _mm512_cmp_epi8_mask(mask, msb_one, _MM_CMPINT_EQ);
|
| 802 |
+
return _mm512_mask_blend_epi8(mask_, a.values, b.values);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
Vectorized<int8_t> neg() const;
|
| 806 |
+
|
| 807 |
+
Vectorized<int8_t> abs() const {
|
| 808 |
+
return _mm512_abs_epi8(values);
|
| 809 |
+
}
|
| 810 |
+
|
| 811 |
+
Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
|
| 812 |
+
auto mask = _mm512_cmpeq_epi8_mask(values, other.values);
|
| 813 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 814 |
+
}
|
| 815 |
+
Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
|
| 816 |
+
auto mask = _mm512_cmpneq_epi8_mask(values, other.values);
|
| 817 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 818 |
+
}
|
| 819 |
+
Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
|
| 820 |
+
auto mask = _mm512_cmplt_epi8_mask(values, other.values);
|
| 821 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 822 |
+
}
|
| 823 |
+
Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
|
| 824 |
+
auto mask = _mm512_cmple_epi8_mask(values, other.values);
|
| 825 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 826 |
+
}
|
| 827 |
+
Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
|
| 828 |
+
return other < *this;
|
| 829 |
+
}
|
| 830 |
+
Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
|
| 831 |
+
return other <= *this;
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
|
| 835 |
+
Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
|
| 836 |
+
Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
|
| 837 |
+
Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
|
| 838 |
+
Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
|
| 839 |
+
Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
|
| 840 |
+
};
|
| 841 |
+
|
| 842 |
+
template<>
|
| 843 |
+
class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
|
| 844 |
+
public:
|
| 845 |
+
using Vectorized8::Vectorized8;
|
| 846 |
+
|
| 847 |
+
static Vectorized<uint8_t> blendv(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b,
|
| 848 |
+
const Vectorized<uint8_t>& mask) {
|
| 849 |
+
auto msb_one = _mm512_set1_epi8(0xFF);
|
| 850 |
+
auto mask_ = _mm512_cmp_epu8_mask(mask, msb_one, _MM_CMPINT_EQ);
|
| 851 |
+
return _mm512_mask_blend_epi8(mask_, a.values, b.values);
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
Vectorized<uint8_t> neg() const;
|
| 855 |
+
|
| 856 |
+
Vectorized<uint8_t> abs() const {
|
| 857 |
+
return *this;
|
| 858 |
+
}
|
| 859 |
+
|
| 860 |
+
Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
|
| 861 |
+
auto mask = _mm512_cmpeq_epu8_mask(values, other.values);
|
| 862 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 863 |
+
}
|
| 864 |
+
Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
|
| 865 |
+
auto mask = _mm512_cmpneq_epu8_mask(values, other.values);
|
| 866 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 867 |
+
}
|
| 868 |
+
Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
|
| 869 |
+
auto mask = _mm512_cmplt_epu8_mask(values, other.values);
|
| 870 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 871 |
+
}
|
| 872 |
+
Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
|
| 873 |
+
auto mask = _mm512_cmple_epu8_mask(values, other.values);
|
| 874 |
+
return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
|
| 875 |
+
}
|
| 876 |
+
Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
|
| 877 |
+
return other < *this;
|
| 878 |
+
}
|
| 879 |
+
Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
|
| 880 |
+
return other <= *this;
|
| 881 |
+
}
|
| 882 |
+
|
| 883 |
+
Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
|
| 884 |
+
Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
|
| 885 |
+
Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
|
| 886 |
+
Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
|
| 887 |
+
Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
|
| 888 |
+
Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
|
| 889 |
+
};
|
| 890 |
+
|
| 891 |
+
template <>
|
| 892 |
+
Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 893 |
+
return _mm512_add_epi64(a, b);
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
template <>
|
| 897 |
+
Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 898 |
+
return _mm512_add_epi32(a, b);
|
| 899 |
+
}
|
| 900 |
+
|
| 901 |
+
template <>
|
| 902 |
+
Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 903 |
+
return _mm512_add_epi16(a, b);
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
template <>
|
| 907 |
+
Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 908 |
+
return _mm512_add_epi8(a, b);
|
| 909 |
+
}
|
| 910 |
+
|
| 911 |
+
template <>
|
| 912 |
+
Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 913 |
+
return _mm512_add_epi8(a, b);
|
| 914 |
+
}
|
| 915 |
+
|
| 916 |
+
template <>
|
| 917 |
+
Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 918 |
+
return _mm512_sub_epi64(a, b);
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
template <>
|
| 922 |
+
Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 923 |
+
return _mm512_sub_epi32(a, b);
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
template <>
|
| 927 |
+
Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 928 |
+
return _mm512_sub_epi16(a, b);
|
| 929 |
+
}
|
| 930 |
+
|
| 931 |
+
template <>
|
| 932 |
+
Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 933 |
+
return _mm512_sub_epi8(a, b);
|
| 934 |
+
}
|
| 935 |
+
|
| 936 |
+
template <>
|
| 937 |
+
Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 938 |
+
return _mm512_sub_epi8(a, b);
|
| 939 |
+
}
|
| 940 |
+
|
| 941 |
+
// Negation. Defined here so we can utilize operator-
|
| 942 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
|
| 943 |
+
return Vectorized<int64_t>(0) - *this;
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
|
| 947 |
+
return Vectorized<int32_t>(0) - *this;
|
| 948 |
+
}
|
| 949 |
+
|
| 950 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
|
| 951 |
+
return Vectorized<int16_t>(0) - *this;
|
| 952 |
+
}
|
| 953 |
+
|
| 954 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
|
| 955 |
+
return Vectorized<int8_t>(0) - *this;
|
| 956 |
+
}
|
| 957 |
+
|
| 958 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
|
| 959 |
+
return Vectorized<uint8_t>(0) - *this;
|
| 960 |
+
}
|
| 961 |
+
|
| 962 |
+
template <>
|
| 963 |
+
Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 964 |
+
return _mm512_mullo_epi64(a, b);
|
| 965 |
+
}
|
| 966 |
+
|
| 967 |
+
template <>
|
| 968 |
+
Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 969 |
+
return _mm512_mullo_epi32(a, b);
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
template <>
|
| 973 |
+
Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 974 |
+
return _mm512_mullo_epi16(a, b);
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
template <typename T, typename Op>
|
| 978 |
+
Vectorized<T> inline int_elementwise_binary_512(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 979 |
+
T values_a[Vectorized<T>::size()];
|
| 980 |
+
T values_b[Vectorized<T>::size()];
|
| 981 |
+
a.store(values_a);
|
| 982 |
+
b.store(values_b);
|
| 983 |
+
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
| 984 |
+
values_a[i] = op(values_a[i], values_b[i]);
|
| 985 |
+
}
|
| 986 |
+
return Vectorized<T>::loadu(values_a);
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
template <>
|
| 990 |
+
Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 991 |
+
// We don't have an instruction for multiplying int8_t
|
| 992 |
+
#ifndef CPU_CAPABILITY_AVX512
|
| 993 |
+
return int_elementwise_binary_512(a, b, std::multiplies<int8_t>());
|
| 994 |
+
#else
|
| 995 |
+
__m512i mask00FF = _mm512_set1_epi16(0x00FF);
|
| 996 |
+
__m512i a_lo = _mm512_srai_epi16(_mm512_slli_epi16(a, 8), 8);
|
| 997 |
+
__m512i b_lo = _mm512_srai_epi16(_mm512_slli_epi16(b, 8), 8);
|
| 998 |
+
__m512i a_hi = _mm512_srai_epi16(a, 8);
|
| 999 |
+
__m512i b_hi = _mm512_srai_epi16(b, 8);
|
| 1000 |
+
__m512i res_lo = _mm512_and_si512(_mm512_mullo_epi16(a_lo, b_lo), mask00FF);
|
| 1001 |
+
__m512i res_hi = _mm512_slli_epi16(_mm512_mullo_epi16(a_hi, b_hi), 8);
|
| 1002 |
+
__m512i res = _mm512_or_si512(res_hi, res_lo);
|
| 1003 |
+
return res;
|
| 1004 |
+
#endif
|
| 1005 |
+
}
|
| 1006 |
+
|
| 1007 |
+
template <>
|
| 1008 |
+
Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1009 |
+
// We don't have an instruction for multiplying uint8_t
|
| 1010 |
+
#ifndef CPU_CAPABILITY_AVX512
|
| 1011 |
+
return int_elementwise_binary_512(a, b, std::multiplies<uint8_t>());
|
| 1012 |
+
#else
|
| 1013 |
+
__m512i mask00FF = _mm512_set1_epi16(0x00FF);
|
| 1014 |
+
__m512i a_lo = _mm512_and_si512 (a, mask00FF);
|
| 1015 |
+
__m512i b_lo = _mm512_and_si512 (b, mask00FF);
|
| 1016 |
+
__m512i a_hi = _mm512_srli_epi16(a, 8);
|
| 1017 |
+
__m512i b_hi = _mm512_srli_epi16(b, 8);
|
| 1018 |
+
__m512i res_lo = _mm512_and_si512(_mm512_mullo_epi16(a_lo, b_lo), mask00FF);
|
| 1019 |
+
__m512i res_hi = _mm512_slli_epi16(_mm512_mullo_epi16(a_hi, b_hi), 8);
|
| 1020 |
+
__m512i res = _mm512_or_si512(res_hi, res_lo);
|
| 1021 |
+
return res;
|
| 1022 |
+
#endif
|
| 1023 |
+
}
|
| 1024 |
+
|
| 1025 |
+
template <>
|
| 1026 |
+
Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1027 |
+
return _mm512_min_epi64(a, b);
|
| 1028 |
+
}
|
| 1029 |
+
|
| 1030 |
+
template <>
|
| 1031 |
+
Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1032 |
+
return _mm512_min_epi32(a, b);
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
template <>
|
| 1036 |
+
Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1037 |
+
return _mm512_min_epi16(a, b);
|
| 1038 |
+
}
|
| 1039 |
+
|
| 1040 |
+
template <>
|
| 1041 |
+
Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1042 |
+
return _mm512_min_epi8(a, b);
|
| 1043 |
+
}
|
| 1044 |
+
|
| 1045 |
+
template <>
|
| 1046 |
+
Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1047 |
+
return _mm512_min_epu8(a, b);
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
template <>
|
| 1051 |
+
Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1052 |
+
return _mm512_max_epi64(a, b);
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
template <>
|
| 1056 |
+
Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1057 |
+
return _mm512_max_epi32(a, b);
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
template <>
|
| 1061 |
+
Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1062 |
+
return _mm512_max_epi16(a, b);
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
template <>
|
| 1066 |
+
Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1067 |
+
return _mm512_max_epi8(a, b);
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
template <>
|
| 1071 |
+
Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1072 |
+
return _mm512_max_epu8(a, b);
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
template <>
|
| 1076 |
+
Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
|
| 1077 |
+
return _mm512_min_epi64(max_val, _mm512_max_epi64(a, min_val));
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
template <>
|
| 1081 |
+
Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
|
| 1082 |
+
return _mm512_min_epi32(max_val, _mm512_max_epi32(a, min_val));
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
template <>
|
| 1086 |
+
Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
|
| 1087 |
+
return _mm512_min_epi16(max_val, _mm512_max_epi16(a, min_val));
|
| 1088 |
+
}
|
| 1089 |
+
|
| 1090 |
+
template <>
|
| 1091 |
+
Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
|
| 1092 |
+
return _mm512_min_epi8(max_val, _mm512_max_epi8(a, min_val));
|
| 1093 |
+
}
|
| 1094 |
+
|
| 1095 |
+
template <>
|
| 1096 |
+
Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
|
| 1097 |
+
return _mm512_min_epu8(max_val, _mm512_max_epu8(a, min_val));
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
template <>
|
| 1101 |
+
Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
|
| 1102 |
+
return _mm512_min_epi64(max_val, a);
|
| 1103 |
+
}
|
| 1104 |
+
|
| 1105 |
+
template <>
|
| 1106 |
+
Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
|
| 1107 |
+
return _mm512_min_epi32(max_val, a);
|
| 1108 |
+
}
|
| 1109 |
+
|
| 1110 |
+
template <>
|
| 1111 |
+
Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
|
| 1112 |
+
return _mm512_min_epi16(max_val, a);
|
| 1113 |
+
}
|
| 1114 |
+
|
| 1115 |
+
template <>
|
| 1116 |
+
Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
|
| 1117 |
+
return _mm512_min_epi8(max_val, a);
|
| 1118 |
+
}
|
| 1119 |
+
|
| 1120 |
+
template <>
|
| 1121 |
+
Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
|
| 1122 |
+
return _mm512_min_epu8(max_val, a);
|
| 1123 |
+
}
|
| 1124 |
+
|
| 1125 |
+
template <>
|
| 1126 |
+
Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
|
| 1127 |
+
return _mm512_max_epi64(min_val, a);
|
| 1128 |
+
}
|
| 1129 |
+
|
| 1130 |
+
template <>
|
| 1131 |
+
Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
|
| 1132 |
+
return _mm512_max_epi32(min_val, a);
|
| 1133 |
+
}
|
| 1134 |
+
|
| 1135 |
+
template <>
|
| 1136 |
+
Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
|
| 1137 |
+
return _mm512_max_epi16(min_val, a);
|
| 1138 |
+
}
|
| 1139 |
+
|
| 1140 |
+
template <>
|
| 1141 |
+
Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
|
| 1142 |
+
return _mm512_max_epi8(min_val, a);
|
| 1143 |
+
}
|
| 1144 |
+
|
| 1145 |
+
template <>
|
| 1146 |
+
Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
|
| 1147 |
+
return _mm512_max_epu8(min_val, a);
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
template<typename T>
|
| 1151 |
+
Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
|
| 1152 |
+
return Vectorized<int32_t>::loadu(ptr);
|
| 1153 |
+
}
|
| 1154 |
+
|
| 1155 |
+
template<>
|
| 1156 |
+
Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
|
| 1157 |
+
return _mm512_cvtepi8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
|
| 1158 |
+
}
|
| 1159 |
+
|
| 1160 |
+
template<>
|
| 1161 |
+
Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
|
| 1162 |
+
return _mm512_cvtepu8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
|
| 1163 |
+
}
|
| 1164 |
+
|
| 1165 |
+
template <>
|
| 1166 |
+
Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1167 |
+
return int_elementwise_binary_512(a, b, std::divides<int64_t>());
|
| 1168 |
+
}
|
| 1169 |
+
template <>
|
| 1170 |
+
Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1171 |
+
return int_elementwise_binary_512(a, b, std::divides<int32_t>());
|
| 1172 |
+
}
|
| 1173 |
+
template <>
|
| 1174 |
+
Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1175 |
+
return int_elementwise_binary_512(a, b, std::divides<int16_t>());
|
| 1176 |
+
}
|
| 1177 |
+
template <>
|
| 1178 |
+
Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1179 |
+
return int_elementwise_binary_512(a, b, std::divides<int8_t>());
|
| 1180 |
+
}
|
| 1181 |
+
template <>
|
| 1182 |
+
Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1183 |
+
return int_elementwise_binary_512(a, b, std::divides<uint8_t>());
|
| 1184 |
+
}
|
| 1185 |
+
|
| 1186 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1187 |
+
inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1188 |
+
return _mm512_and_si512(a, b);
|
| 1189 |
+
}
|
| 1190 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1191 |
+
inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1192 |
+
return _mm512_or_si512(a, b);
|
| 1193 |
+
}
|
| 1194 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1195 |
+
inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1196 |
+
return _mm512_xor_si512(a, b);
|
| 1197 |
+
}
|
| 1198 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1199 |
+
inline Vectorized<T> operator~(const Vectorized<T>& a) {
|
| 1200 |
+
return _mm512_xor_si512(a, _mm512_set1_epi32(-1));
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
|
| 1204 |
+
return (*this == other) & Vectorized<int64_t>(1);
|
| 1205 |
+
}
|
| 1206 |
+
|
| 1207 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
|
| 1208 |
+
return (*this != other) & Vectorized<int64_t>(1);
|
| 1209 |
+
}
|
| 1210 |
+
|
| 1211 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
|
| 1212 |
+
return (*this > other) & Vectorized<int64_t>(1);
|
| 1213 |
+
}
|
| 1214 |
+
|
| 1215 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
|
| 1216 |
+
return (*this >= other) & Vectorized<int64_t>(1);
|
| 1217 |
+
}
|
| 1218 |
+
|
| 1219 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
|
| 1220 |
+
return (*this < other) & Vectorized<int64_t>(1);
|
| 1221 |
+
}
|
| 1222 |
+
|
| 1223 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
|
| 1224 |
+
return (*this <= other) & Vectorized<int64_t>(1);
|
| 1225 |
+
}
|
| 1226 |
+
|
| 1227 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
|
| 1228 |
+
return (*this == other) & Vectorized<int32_t>(1);
|
| 1229 |
+
}
|
| 1230 |
+
|
| 1231 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
|
| 1232 |
+
return (*this != other) & Vectorized<int32_t>(1);
|
| 1233 |
+
}
|
| 1234 |
+
|
| 1235 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
|
| 1236 |
+
return (*this > other) & Vectorized<int32_t>(1);
|
| 1237 |
+
}
|
| 1238 |
+
|
| 1239 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
|
| 1240 |
+
return (*this >= other) & Vectorized<int32_t>(1);
|
| 1241 |
+
}
|
| 1242 |
+
|
| 1243 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
|
| 1244 |
+
return (*this < other) & Vectorized<int32_t>(1);
|
| 1245 |
+
}
|
| 1246 |
+
|
| 1247 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
|
| 1248 |
+
return (*this <= other) & Vectorized<int32_t>(1);
|
| 1249 |
+
}
|
| 1250 |
+
|
| 1251 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
|
| 1252 |
+
return (*this == other) & Vectorized<int16_t>(1);
|
| 1253 |
+
}
|
| 1254 |
+
|
| 1255 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
|
| 1256 |
+
return (*this != other) & Vectorized<int16_t>(1);
|
| 1257 |
+
}
|
| 1258 |
+
|
| 1259 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
|
| 1260 |
+
return (*this > other) & Vectorized<int16_t>(1);
|
| 1261 |
+
}
|
| 1262 |
+
|
| 1263 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
|
| 1264 |
+
return (*this >= other) & Vectorized<int16_t>(1);
|
| 1265 |
+
}
|
| 1266 |
+
|
| 1267 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
|
| 1268 |
+
return (*this < other) & Vectorized<int16_t>(1);
|
| 1269 |
+
}
|
| 1270 |
+
|
| 1271 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
|
| 1272 |
+
return (*this <= other) & Vectorized<int16_t>(1);
|
| 1273 |
+
}
|
| 1274 |
+
|
| 1275 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
|
| 1276 |
+
return (*this == other) & Vectorized<int8_t>(1);
|
| 1277 |
+
}
|
| 1278 |
+
|
| 1279 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
|
| 1280 |
+
return (*this != other) & Vectorized<int8_t>(1);
|
| 1281 |
+
}
|
| 1282 |
+
|
| 1283 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
|
| 1284 |
+
return (*this > other) & Vectorized<int8_t>(1);
|
| 1285 |
+
}
|
| 1286 |
+
|
| 1287 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
|
| 1288 |
+
return (*this >= other) & Vectorized<int8_t>(1);
|
| 1289 |
+
}
|
| 1290 |
+
|
| 1291 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
|
| 1292 |
+
return (*this < other) & Vectorized<int8_t>(1);
|
| 1293 |
+
}
|
| 1294 |
+
|
| 1295 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
|
| 1296 |
+
return (*this <= other) & Vectorized<int8_t>(1);
|
| 1297 |
+
}
|
| 1298 |
+
|
| 1299 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
|
| 1300 |
+
return (*this == other) & Vectorized<uint8_t>(1);
|
| 1301 |
+
}
|
| 1302 |
+
|
| 1303 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
|
| 1304 |
+
return (*this != other) & Vectorized<uint8_t>(1);
|
| 1305 |
+
}
|
| 1306 |
+
|
| 1307 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
|
| 1308 |
+
return (*this > other) & Vectorized<uint8_t>(1);
|
| 1309 |
+
}
|
| 1310 |
+
|
| 1311 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
|
| 1312 |
+
return (*this >= other) & Vectorized<uint8_t>(1);
|
| 1313 |
+
}
|
| 1314 |
+
|
| 1315 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
|
| 1316 |
+
return (*this < other) & Vectorized<uint8_t>(1);
|
| 1317 |
+
}
|
| 1318 |
+
|
| 1319 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
|
| 1320 |
+
return (*this <= other) & Vectorized<uint8_t>(1);
|
| 1321 |
+
}
|
| 1322 |
+
|
| 1323 |
+
template <bool left_shift, typename T, typename std::enable_if_t<std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>, int> = 0>
|
| 1324 |
+
Vectorized<T> inline shift_512_8(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1325 |
+
// No vector instruction for shifting int8_t/uint8_t, so emulating
|
| 1326 |
+
// it instead.
|
| 1327 |
+
|
| 1328 |
+
// Control masks for shuffle operation, treating 512 bits as an
|
| 1329 |
+
// array of 8-bit elements, and considering pairs of neighboring
|
| 1330 |
+
// elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
|
| 1331 |
+
// M!=N) is set so that shuffle will move element with index M from
|
| 1332 |
+
// input pair into element with index N in output pair, and element
|
| 1333 |
+
// with index M in output pair will be set to all 0s.
|
| 1334 |
+
__m512i ctl_0_1 = _mm512_set_epi8(62, 0x80, 60, 0x80, 58, 0x80, 56, 0x80,
|
| 1335 |
+
54, 0x80, 52, 0x80, 50, 0x80, 48, 0x80,
|
| 1336 |
+
46, 0x80, 44, 0x80, 42, 0x80, 40, 0x80,
|
| 1337 |
+
38, 0x80, 36, 0x80, 34, 0x80, 32, 0x80,
|
| 1338 |
+
30, 0x80, 28, 0x80, 26, 0x80, 24, 0x80,
|
| 1339 |
+
22, 0x80, 20, 0x80, 18, 0x80, 16, 0x80,
|
| 1340 |
+
14, 0x80, 12, 0x80, 10, 0x80, 8, 0x80,
|
| 1341 |
+
6, 0x80, 4, 0x80, 2, 0x80, 0, 0x80);
|
| 1342 |
+
__m512i ctl_1_0 = _mm512_set_epi8(0x80, 63, 0x80, 61, 0x80, 59, 0x80, 57,
|
| 1343 |
+
0x80, 55, 0x80, 53, 0x80, 51, 0x80, 49,
|
| 1344 |
+
0x80, 47, 0x80, 45, 0x80, 43, 0x80, 41,
|
| 1345 |
+
0x80, 39, 0x80, 37, 0x80, 35, 0x80, 33,
|
| 1346 |
+
0x80, 31, 0x80, 29, 0x80, 27, 0x80, 25,
|
| 1347 |
+
0x80, 23, 0x80, 21, 0x80, 19, 0x80, 17,
|
| 1348 |
+
0x80, 15, 0x80, 13, 0x80, 11, 0x80, 9,
|
| 1349 |
+
0x80, 7, 0x80, 5, 0x80, 3, 0x80, 1);
|
| 1350 |
+
|
| 1351 |
+
// Masks for bitwise and operation, treating 512 bits as an array of
|
| 1352 |
+
// 8-bit elements, and considering them in pairs of neighboring
|
| 1353 |
+
// elements. A mask named "keep_M" (M in [0,1]) is set so that
|
| 1354 |
+
// bitwise and will copy element with index M from input pair into
|
| 1355 |
+
// element with the same index in output pair, while the other
|
| 1356 |
+
// element in output pair will be set to all 0s.
|
| 1357 |
+
__m512i keep_0 = _mm512_set1_epi16(0xFF);
|
| 1358 |
+
__m512i keep_1 = _mm512_set1_epi16(0xFF00);
|
| 1359 |
+
|
| 1360 |
+
// Take each 8-bit element with idx%2==0 from input array to be
|
| 1361 |
+
// shifted and extend it to 16 bits so that 0s are added to the
|
| 1362 |
+
// right. Then, perform shifting on this 16-bit number. Upper 8
|
| 1363 |
+
// bits will be proper result of shifting original 8-bit number, so
|
| 1364 |
+
// write them to result array, into the same position from which
|
| 1365 |
+
// corresponding input element is taken. Also, make sure that
|
| 1366 |
+
// result array elements with idx%2!=0 are set to all 0s.
|
| 1367 |
+
//
|
| 1368 |
+
// Note that number of bits to shift for is extended to 16 bits by
|
| 1369 |
+
// adding 0s to the left. That means this number is not properly
|
| 1370 |
+
// sign-extended for negative values. However, number of bits to
|
| 1371 |
+
// shift is treated as an unsigned integer by respective shift
|
| 1372 |
+
// intrinsics anyway so if negative then either with or without
|
| 1373 |
+
// proper sign extension, it will be interpreted as a number greater
|
| 1374 |
+
// than 32, and the shifting result will be the same.
|
| 1375 |
+
__m512i a0 = _mm512_shuffle_epi8(a, ctl_0_1);
|
| 1376 |
+
__m512i b0 = _mm512_and_si512(b, keep_0);
|
| 1377 |
+
__m512i c0;
|
| 1378 |
+
if (left_shift)
|
| 1379 |
+
c0 = _mm512_sllv_epi16(a0, b0);
|
| 1380 |
+
else
|
| 1381 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1382 |
+
c0 = _mm512_srav_epi16(a0, b0);
|
| 1383 |
+
else
|
| 1384 |
+
c0 = _mm512_srlv_epi16(a0, b0);
|
| 1385 |
+
c0 = _mm512_shuffle_epi8(c0, ctl_1_0);
|
| 1386 |
+
|
| 1387 |
+
// Peform shifting the same way for input array elements with
|
| 1388 |
+
// idx%2==1.
|
| 1389 |
+
__m512i a1 = _mm512_and_si512(a, keep_1);
|
| 1390 |
+
__m512i b1 = _mm512_shuffle_epi8(b, ctl_1_0);
|
| 1391 |
+
__m512i c1;
|
| 1392 |
+
if (left_shift)
|
| 1393 |
+
c1 = _mm512_sllv_epi16(a1, b1);
|
| 1394 |
+
else
|
| 1395 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1396 |
+
c1 = _mm512_srav_epi16(a1, b1);
|
| 1397 |
+
else
|
| 1398 |
+
c1 = _mm512_srlv_epi16(a1, b1);
|
| 1399 |
+
c1 = _mm512_and_si512(c1, keep_1);
|
| 1400 |
+
|
| 1401 |
+
// Merge partial results into the final result.
|
| 1402 |
+
__m512i c = _mm512_or_si512(c0, c1);
|
| 1403 |
+
|
| 1404 |
+
return c;
|
| 1405 |
+
}
|
| 1406 |
+
|
| 1407 |
+
template <>
|
| 1408 |
+
Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1409 |
+
return _mm512_sllv_epi64(a, b);
|
| 1410 |
+
}
|
| 1411 |
+
|
| 1412 |
+
template <>
|
| 1413 |
+
Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1414 |
+
return _mm512_sllv_epi32(a, b);
|
| 1415 |
+
}
|
| 1416 |
+
|
| 1417 |
+
template <>
|
| 1418 |
+
Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1419 |
+
return _mm512_sllv_epi16(a, b);
|
| 1420 |
+
}
|
| 1421 |
+
|
| 1422 |
+
template <>
|
| 1423 |
+
Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1424 |
+
return shift_512_8<true>(a, b);
|
| 1425 |
+
}
|
| 1426 |
+
|
| 1427 |
+
template <>
|
| 1428 |
+
Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1429 |
+
return shift_512_8<true>(a, b);
|
| 1430 |
+
}
|
| 1431 |
+
|
| 1432 |
+
template <>
|
| 1433 |
+
Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1434 |
+
return _mm512_srav_epi64(a, b);
|
| 1435 |
+
}
|
| 1436 |
+
|
| 1437 |
+
template <>
|
| 1438 |
+
Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1439 |
+
return _mm512_srav_epi32(a, b);
|
| 1440 |
+
}
|
| 1441 |
+
|
| 1442 |
+
template <>
|
| 1443 |
+
Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1444 |
+
return _mm512_srav_epi16(a, b);
|
| 1445 |
+
}
|
| 1446 |
+
|
| 1447 |
+
template <>
|
| 1448 |
+
Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1449 |
+
return shift_512_8<false>(a, b);
|
| 1450 |
+
}
|
| 1451 |
+
|
| 1452 |
+
template <>
|
| 1453 |
+
Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1454 |
+
return shift_512_8<false>(a, b);
|
| 1455 |
+
}
|
| 1456 |
+
|
| 1457 |
+
#endif
|
| 1458 |
+
|
| 1459 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_mask.h
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec_mask.h>
|
| 6 |
+
|
| 7 |
+
namespace at::vec {
|
| 8 |
+
inline namespace CPU_CAPABILITY {
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
|
| 11 |
+
|
| 12 |
+
template <typename T, int dst_n, typename mask_t, int mask_n>
|
| 13 |
+
struct VecMaskLoad<
|
| 14 |
+
T,
|
| 15 |
+
dst_n,
|
| 16 |
+
mask_t,
|
| 17 |
+
mask_n,
|
| 18 |
+
typename std::enable_if_t<
|
| 19 |
+
(mask_n == dst_n * 2 && dst_n >= 1) &&
|
| 20 |
+
(std::is_same_v<T, float> || std::is_same_v<T, int32_t>),
|
| 21 |
+
void>> {
|
| 22 |
+
static inline VectorizedN<T, dst_n> apply(
|
| 23 |
+
const T* ptr,
|
| 24 |
+
const VecMask<mask_t, mask_n>& vec_mask) {
|
| 25 |
+
at::vec::Vectorized<T> zero_vec(0);
|
| 26 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 27 |
+
VectorizedN<mask_t, 2> tmp_vec;
|
| 28 |
+
VectorizedN<T, dst_n> result;
|
| 29 |
+
for (int i = 0; i < dst_n; i++) {
|
| 30 |
+
tmp_vec[0] = vec_mask[2 * i];
|
| 31 |
+
tmp_vec[1] = vec_mask[2 * i + 1];
|
| 32 |
+
auto int64_mask = VecMask<mask_t, 2>(tmp_vec).template cast<int64_t, 2>();
|
| 33 |
+
auto int_mask = int64_mask.template cast<int, 1>()[0];
|
| 34 |
+
auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ);
|
| 35 |
+
if constexpr (std::is_same_v<T, float>) {
|
| 36 |
+
result[i] = Vectorized<T>(_mm512_mask_loadu_ps(
|
| 37 |
+
zero_vec, mmask, ptr + i * Vectorized<T>::size()));
|
| 38 |
+
} else {
|
| 39 |
+
result[i] = Vectorized<T>(_mm512_mask_loadu_epi32(
|
| 40 |
+
zero_vec, mmask, ptr + i * Vectorized<T>::size()));
|
| 41 |
+
}
|
| 42 |
+
}
|
| 43 |
+
return result;
|
| 44 |
+
}
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
template <typename T, int dst_n, typename mask_t>
|
| 48 |
+
struct VecMaskLoad<
|
| 49 |
+
T,
|
| 50 |
+
dst_n,
|
| 51 |
+
mask_t,
|
| 52 |
+
dst_n,
|
| 53 |
+
typename std::enable_if_t<
|
| 54 |
+
std::is_same_v<T, float> || std::is_same_v<T, int32_t>,
|
| 55 |
+
void>> {
|
| 56 |
+
static inline VectorizedN<T, dst_n> apply(
|
| 57 |
+
const T* ptr,
|
| 58 |
+
const VecMask<mask_t, dst_n>& vec_mask) {
|
| 59 |
+
at::vec::Vectorized<T> zero_vec(0);
|
| 60 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 61 |
+
VectorizedN<T, dst_n> result;
|
| 62 |
+
#ifndef _MSC_VER
|
| 63 |
+
#pragma unroll
|
| 64 |
+
#endif
|
| 65 |
+
for (int i = 0; i < dst_n; i++) {
|
| 66 |
+
auto tmp_mask = VecMask<mask_t, 1>(vec_mask[i]);
|
| 67 |
+
auto int_mask = tmp_mask.template cast<int, 1>()[0];
|
| 68 |
+
auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ);
|
| 69 |
+
if constexpr (std::is_same_v<T, float>) {
|
| 70 |
+
result[i] = Vectorized<T>(_mm512_mask_loadu_ps(
|
| 71 |
+
zero_vec, mmask, ptr + i * Vectorized<T>::size()));
|
| 72 |
+
} else {
|
| 73 |
+
result[i] = Vectorized<T>(_mm512_mask_loadu_epi32(
|
| 74 |
+
zero_vec, mmask, ptr + i * Vectorized<T>::size()));
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
return result;
|
| 78 |
+
}
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
template <typename data_t, int dst_n, typename mask_t>
|
| 82 |
+
struct VecMaskLoad<
|
| 83 |
+
data_t,
|
| 84 |
+
dst_n,
|
| 85 |
+
mask_t,
|
| 86 |
+
dst_n,
|
| 87 |
+
typename std::enable_if<
|
| 88 |
+
std::is_same_v<data_t, BFloat16> ||
|
| 89 |
+
std::is_same_v<data_t, Half>>::type> {
|
| 90 |
+
static inline VectorizedN<data_t, dst_n> apply(
|
| 91 |
+
const data_t* ptr,
|
| 92 |
+
const VecMask<mask_t, dst_n>& vec_mask) {
|
| 93 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 94 |
+
VectorizedN<data_t, dst_n> result;
|
| 95 |
+
#ifndef _MSC_VER
|
| 96 |
+
#pragma unroll
|
| 97 |
+
#endif
|
| 98 |
+
for (int i = 0; i < dst_n; i++) {
|
| 99 |
+
auto tmp_mask = VecMask<mask_t, 1>(vec_mask[i]);
|
| 100 |
+
auto int_mask = tmp_mask.template cast<int, 2>();
|
| 101 |
+
auto mmask0 = _mm512_cmp_epi32_mask(int_mask[0], all_ones, _MM_CMPINT_EQ);
|
| 102 |
+
auto mmask1 = _mm512_cmp_epi32_mask(int_mask[1], all_ones, _MM_CMPINT_EQ);
|
| 103 |
+
auto zero = _mm256_set1_epi16(0);
|
| 104 |
+
auto temp0 = _mm256_mask_loadu_epi16(
|
| 105 |
+
zero, mmask0, ptr + (2 * i) * Vectorized<int>::size());
|
| 106 |
+
auto temp1 = _mm256_mask_loadu_epi16(
|
| 107 |
+
zero, mmask1, ptr + (2 * i + 1) * Vectorized<int>::size());
|
| 108 |
+
result[i] = Vectorized<data_t>(
|
| 109 |
+
_mm512_inserti32x8(_mm512_castsi256_si512(temp0), temp1, 1));
|
| 110 |
+
}
|
| 111 |
+
return result;
|
| 112 |
+
}
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
template <typename data_t, int dst_n, typename mask_t, int mask_n>
|
| 116 |
+
struct VecMaskLoad<
|
| 117 |
+
data_t,
|
| 118 |
+
dst_n,
|
| 119 |
+
mask_t,
|
| 120 |
+
mask_n,
|
| 121 |
+
typename std::enable_if_t<
|
| 122 |
+
(mask_n == 2 * dst_n && dst_n >= 1) &&
|
| 123 |
+
(std::is_same_v<data_t, BFloat16> || std::is_same_v<data_t, Half>)>> {
|
| 124 |
+
static inline VectorizedN<data_t, dst_n> apply(
|
| 125 |
+
const data_t* ptr,
|
| 126 |
+
const VecMask<mask_t, mask_n>& vec_mask) {
|
| 127 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 128 |
+
VectorizedN<data_t, dst_n> result;
|
| 129 |
+
VectorizedN<mask_t, 2> tmp_vec;
|
| 130 |
+
for (int i = 0; i < dst_n; i++) {
|
| 131 |
+
tmp_vec[0] = vec_mask[2 * i];
|
| 132 |
+
tmp_vec[1] = vec_mask[2 * i + 1];
|
| 133 |
+
auto int_mask = VecMask<mask_t, 2>(tmp_vec).template cast<int, 2>();
|
| 134 |
+
auto mmask0 = _mm512_cmp_epi32_mask(int_mask[0], all_ones, _MM_CMPINT_EQ);
|
| 135 |
+
auto mmask1 = _mm512_cmp_epi32_mask(int_mask[1], all_ones, _MM_CMPINT_EQ);
|
| 136 |
+
auto zero = _mm256_set1_epi16(0);
|
| 137 |
+
auto temp0 = _mm256_mask_loadu_epi16(
|
| 138 |
+
zero, mmask0, ptr + (2 * i) * Vectorized<int>::size());
|
| 139 |
+
auto temp1 = _mm256_mask_loadu_epi16(
|
| 140 |
+
zero, mmask1, ptr + (2 * i + 1) * Vectorized<int>::size());
|
| 141 |
+
result[i] = Vectorized<data_t>(
|
| 142 |
+
_mm512_inserti32x8(_mm512_castsi256_si512(temp0), temp1, 1));
|
| 143 |
+
}
|
| 144 |
+
return result;
|
| 145 |
+
}
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
template <typename data_t, typename mask_t>
|
| 149 |
+
struct VecMaskLoad<
|
| 150 |
+
data_t,
|
| 151 |
+
1,
|
| 152 |
+
mask_t,
|
| 153 |
+
1,
|
| 154 |
+
typename std::enable_if<
|
| 155 |
+
std::is_same_v<data_t, int8_t> ||
|
| 156 |
+
std::is_same_v<data_t, uint8_t>>::type> {
|
| 157 |
+
static inline VectorizedN<data_t, 1> apply(
|
| 158 |
+
const data_t* ptr,
|
| 159 |
+
const VecMask<mask_t, 1>& vec_mask) {
|
| 160 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 161 |
+
auto int_mask = vec_mask.template cast<int, 1>()[0];
|
| 162 |
+
auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ);
|
| 163 |
+
auto zero = _mm_set1_epi8(0);
|
| 164 |
+
auto temp = _mm_mask_loadu_epi8(zero, mmask, ptr);
|
| 165 |
+
return Vectorized<data_t>(
|
| 166 |
+
_mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0));
|
| 167 |
+
}
|
| 168 |
+
};
|
| 169 |
+
|
| 170 |
+
template <typename data_t, typename mask_t>
|
| 171 |
+
struct VecMaskLoad<
|
| 172 |
+
data_t,
|
| 173 |
+
2,
|
| 174 |
+
mask_t,
|
| 175 |
+
1,
|
| 176 |
+
typename std::enable_if<
|
| 177 |
+
std::is_same_v<data_t, int64_t> ||
|
| 178 |
+
std::is_same_v<data_t, double>>::type> {
|
| 179 |
+
static inline VectorizedN<data_t, 2> apply(
|
| 180 |
+
const data_t* ptr,
|
| 181 |
+
const VecMask<mask_t, 1>& vec_mask) {
|
| 182 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 183 |
+
at::vec::Vectorized<data_t> zero_vec(0);
|
| 184 |
+
auto int_mask = vec_mask.template cast<int, 1>()[0];
|
| 185 |
+
auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ);
|
| 186 |
+
at::vec::VectorizedN<data_t, 2> result;
|
| 187 |
+
if constexpr (std::is_same_v<data_t, double>) {
|
| 188 |
+
result[0] = _mm512_mask_loadu_pd(zero_vec, (__mmask8)mmask, ptr);
|
| 189 |
+
result[1] =
|
| 190 |
+
_mm512_mask_loadu_pd(zero_vec, (__mmask8)(mmask >> 8), ptr + 8);
|
| 191 |
+
} else {
|
| 192 |
+
result[0] = _mm512_mask_loadu_epi64(zero_vec, (__mmask8)mmask, ptr);
|
| 193 |
+
result[1] =
|
| 194 |
+
_mm512_mask_loadu_epi64(zero_vec, (__mmask8)(mmask >> 8), ptr + 8);
|
| 195 |
+
}
|
| 196 |
+
return result;
|
| 197 |
+
}
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
template <int N>
|
| 201 |
+
struct VecMaskCast<float, N, int, N> {
|
| 202 |
+
static inline VecMask<float, N> apply(const VecMask<int, N>& vec_mask) {
|
| 203 |
+
VectorizedN<float, N> result;
|
| 204 |
+
#ifndef _MSC_VER
|
| 205 |
+
#pragma unroll
|
| 206 |
+
#endif
|
| 207 |
+
for (int i = 0; i < N; ++i) {
|
| 208 |
+
result[i] = _mm512_castsi512_ps(vec_mask[i]);
|
| 209 |
+
}
|
| 210 |
+
return result;
|
| 211 |
+
}
|
| 212 |
+
};
|
| 213 |
+
|
| 214 |
+
template <int N>
|
| 215 |
+
struct VecMaskCast<int, N, float, N> {
|
| 216 |
+
static inline VecMask<int, N> apply(const VecMask<float, N>& vec_mask) {
|
| 217 |
+
VectorizedN<int, N> result;
|
| 218 |
+
#ifndef _MSC_VER
|
| 219 |
+
#pragma unroll
|
| 220 |
+
#endif
|
| 221 |
+
for (int i = 0; i < N; ++i) {
|
| 222 |
+
result[i] = _mm512_castps_si512(vec_mask[i]);
|
| 223 |
+
}
|
| 224 |
+
return result;
|
| 225 |
+
}
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
template <int N>
|
| 229 |
+
struct VecMaskCast<int64_t, N, double, N> {
|
| 230 |
+
static inline VecMask<int64_t, N> apply(const VecMask<double, N>& vec_mask) {
|
| 231 |
+
VectorizedN<int64_t, N> result;
|
| 232 |
+
#ifndef _MSC_VER
|
| 233 |
+
#pragma unroll
|
| 234 |
+
#endif
|
| 235 |
+
for (int i = 0; i < N; ++i) {
|
| 236 |
+
result[i] = _mm512_castpd_si512(vec_mask[i]);
|
| 237 |
+
}
|
| 238 |
+
return result;
|
| 239 |
+
}
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
+
template <int N>
|
| 243 |
+
struct VecMaskCast<double, N, int64_t, N> {
|
| 244 |
+
static inline VecMask<double, N> apply(const VecMask<int64_t, N>& vec_mask) {
|
| 245 |
+
VectorizedN<double, N> result;
|
| 246 |
+
#ifndef _MSC_VER
|
| 247 |
+
#pragma unroll
|
| 248 |
+
#endif
|
| 249 |
+
for (int i = 0; i < N; ++i) {
|
| 250 |
+
result[i] = _mm512_castsi512_pd(vec_mask[i]);
|
| 251 |
+
}
|
| 252 |
+
return result;
|
| 253 |
+
}
|
| 254 |
+
};
|
| 255 |
+
|
| 256 |
+
template <int dst_n, typename mask_t, int mask_n>
|
| 257 |
+
struct VecMaskCast<
|
| 258 |
+
int64_t,
|
| 259 |
+
dst_n,
|
| 260 |
+
mask_t,
|
| 261 |
+
mask_n,
|
| 262 |
+
typename std::enable_if_t<
|
| 263 |
+
(dst_n == 2 * mask_n) &&
|
| 264 |
+
(std::is_same_v<mask_t, float> || std::is_same_v<mask_t, int>),
|
| 265 |
+
void>> {
|
| 266 |
+
static inline VecMask<int64_t, dst_n> apply(
|
| 267 |
+
const VecMask<mask_t, mask_n>& vec_mask) {
|
| 268 |
+
VectorizedN<int64_t, dst_n> result;
|
| 269 |
+
auto int_mask = vec_mask.template cast<int, mask_n>();
|
| 270 |
+
#ifndef _MSC_VER
|
| 271 |
+
#pragma unroll
|
| 272 |
+
#endif
|
| 273 |
+
for (int i = 0; i < mask_n; ++i) {
|
| 274 |
+
auto int64_vec =
|
| 275 |
+
convert<int64_t, 2, int, 1>(VectorizedN<int, 1>(int_mask[i]));
|
| 276 |
+
result[2 * i] = int64_vec[0];
|
| 277 |
+
result[2 * i + 1] = int64_vec[1];
|
| 278 |
+
}
|
| 279 |
+
return VecMask<int64_t, dst_n>(result);
|
| 280 |
+
}
|
| 281 |
+
};
|
| 282 |
+
|
| 283 |
+
template <typename dst_t, int dst_n, int mask_n>
|
| 284 |
+
struct VecMaskCast<
|
| 285 |
+
dst_t,
|
| 286 |
+
dst_n,
|
| 287 |
+
int64_t,
|
| 288 |
+
mask_n,
|
| 289 |
+
typename std::enable_if_t<
|
| 290 |
+
(mask_n == 2 * dst_n) &&
|
| 291 |
+
(std::is_same_v<dst_t, float> || std::is_same_v<dst_t, int>),
|
| 292 |
+
void>> {
|
| 293 |
+
static inline VecMask<dst_t, dst_n> apply(
|
| 294 |
+
const VecMask<int64_t, mask_n>& vec_mask) {
|
| 295 |
+
VectorizedN<int, dst_n> result;
|
| 296 |
+
VectorizedN<int64_t, 2> int64_vec;
|
| 297 |
+
for (int i = 0; i < dst_n; ++i) {
|
| 298 |
+
int64_vec[0] = vec_mask[2 * i];
|
| 299 |
+
int64_vec[1] = vec_mask[2 * i + 1];
|
| 300 |
+
result[i] = convert<int, 1, int64_t, 2>(int64_vec);
|
| 301 |
+
}
|
| 302 |
+
return VecMask<int, dst_n>(result).template cast<dst_t, dst_n>();
|
| 303 |
+
}
|
| 304 |
+
};
|
| 305 |
+
|
| 306 |
+
template <>
|
| 307 |
+
struct VecMaskCast<double, 2, float, 1> {
|
| 308 |
+
static inline VecMask<double, 2> apply(const VecMask<float, 1>& vec_mask) {
|
| 309 |
+
auto int64_mask = VecMaskCast<int64_t, 2, float, 1>::apply(vec_mask);
|
| 310 |
+
return VecMaskCast<double, 2, int64_t, 2>::apply(int64_mask);
|
| 311 |
+
}
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
template <>
|
| 315 |
+
struct VecMaskCast<float, 1, double, 2> {
|
| 316 |
+
static inline VecMask<float, 1> apply(const VecMask<double, 2>& vec_mask) {
|
| 317 |
+
auto int64_mask = VecMaskCast<int64_t, 2, double, 2>::apply(vec_mask);
|
| 318 |
+
return VecMaskCast<float, 1, int64_t, 2>::apply(int64_mask);
|
| 319 |
+
}
|
| 320 |
+
};
|
| 321 |
+
|
| 322 |
+
template <>
|
| 323 |
+
inline bool VecMask<int, 1>::all_zero() const {
|
| 324 |
+
__mmask16 mask = _mm512_test_epi32_mask(mask_[0], mask_[0]);
|
| 325 |
+
return mask == 0;
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
template <>
|
| 329 |
+
inline bool VecMask<int, 1>::is_masked(int i) const {
|
| 330 |
+
return _mm512_movepi32_mask(mask_[0]) & (1 << i);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
template <>
|
| 334 |
+
inline bool VecMask<int, 1>::all_masked() const {
|
| 335 |
+
__mmask16 mask = _mm512_movepi32_mask(mask_[0]);
|
| 336 |
+
return mask == 0xffff;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
template <int N>
|
| 340 |
+
struct VecMaskCheck<int64_t, N> {
|
| 341 |
+
static inline bool all_zero(const VectorizedN<int64_t, N>& vec_mask) {
|
| 342 |
+
bool all_zero = true;
|
| 343 |
+
for (int i = 0; i < N; ++i) {
|
| 344 |
+
all_zero =
|
| 345 |
+
all_zero && (_mm512_test_epi64_mask(vec_mask[i], vec_mask[i]) == 0);
|
| 346 |
+
if (!all_zero) {
|
| 347 |
+
return all_zero;
|
| 348 |
+
}
|
| 349 |
+
}
|
| 350 |
+
return all_zero;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
static inline bool is_masked(const VectorizedN<int64_t, N>& vec_mask, int i) {
|
| 354 |
+
for (int j = 0; j < N; ++j) {
|
| 355 |
+
if (i < (j + 1) * 8) {
|
| 356 |
+
return _mm512_movepi64_mask(vec_mask[j]) & (1 << (i - j * 8));
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
return false;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
static inline bool all_masked(const VectorizedN<int64_t, N>& vec_mask) {
|
| 363 |
+
bool all_masked = true;
|
| 364 |
+
for (int i = 0; i < N; ++i) {
|
| 365 |
+
all_masked = all_masked && (_mm512_movepi64_mask(vec_mask[i]) == 0xff);
|
| 366 |
+
if (!all_masked) {
|
| 367 |
+
return all_masked;
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
return all_masked;
|
| 371 |
+
}
|
| 372 |
+
};
|
| 373 |
+
|
| 374 |
+
#define VEC_MASK_METHOD_WITH_CAST_TO_INT( \
|
| 375 |
+
T, N, return_type, method, args_def, args) \
|
| 376 |
+
template <> \
|
| 377 |
+
inline return_type VecMask<T, N>::method args_def const { \
|
| 378 |
+
return cast<int, 1>().method args; \
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_zero, (), ())
|
| 382 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_zero, (), ())
|
| 383 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, is_masked, (int i), (i))
|
| 384 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, is_masked, (int i), (i))
|
| 385 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_masked, (), ())
|
| 386 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_masked, (), ())
|
| 387 |
+
|
| 388 |
+
#undef VEC_MASK_DEFINE_METHOD_WITH_CAST_TO_INT
|
| 389 |
+
|
| 390 |
+
#endif
|
| 391 |
+
|
| 392 |
+
} // namespace CPU_CAPABILITY
|
| 393 |
+
} // namespace at::vec
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h
ADDED
|
@@ -0,0 +1,1409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <ATen/native/quantized/AffineQuantizerBase.h>
|
| 9 |
+
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
#include <c10/util/qint32.h>
|
| 12 |
+
#include <c10/util/qint8.h>
|
| 13 |
+
#include <c10/util/quint8.h>
|
| 14 |
+
|
| 15 |
+
#include <array>
|
| 16 |
+
#include <cmath>
|
| 17 |
+
|
| 18 |
+
// This file defines Vectorized<> for the quantized types.
|
| 19 |
+
//
|
| 20 |
+
//
|
| 21 |
+
// Currently, we simply use these classes as efficient converters between
|
| 22 |
+
// the quantized types and Vectorized<float>, usually in bandwidth-bound cases
|
| 23 |
+
// where doing the arithmetic in full-precision is acceptable (e.g.
|
| 24 |
+
// elementwise operators).
|
| 25 |
+
//
|
| 26 |
+
//
|
| 27 |
+
// Conversions are as follows:
|
| 28 |
+
// Vectorized<qint8> -> 4x Vectorized<float>
|
| 29 |
+
// Vectorized<quint8> -> 4x Vectorized<float>
|
| 30 |
+
// Vectorized<qint32> -> 1x Vectorized<float>
|
| 31 |
+
//
|
| 32 |
+
// The size of the returned float vector is specified by the special
|
| 33 |
+
// constexpr function float_num_vecs. The type of the value returned
|
| 34 |
+
// from dequantize (and expected as an argument to quantize) is
|
| 35 |
+
// specified by float_vec_return_type.
|
| 36 |
+
//
|
| 37 |
+
// When writing kernels with these vectors, it is expected that floating-
|
| 38 |
+
// point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
|
| 39 |
+
// iterations.
|
| 40 |
+
|
| 41 |
+
namespace at {
|
| 42 |
+
namespace vec {
|
| 43 |
+
inline namespace CPU_CAPABILITY {
|
| 44 |
+
|
| 45 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 46 |
+
|
| 47 |
+
#ifdef _MSC_VER
|
| 48 |
+
__declspec(align(64)) struct Vectorizedqi {
|
| 49 |
+
protected:
|
| 50 |
+
__m512i vals;
|
| 51 |
+
#else
|
| 52 |
+
struct Vectorizedqi {
|
| 53 |
+
protected:
|
| 54 |
+
__m512i vals __attribute__((aligned(64)));
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
public:
|
| 58 |
+
Vectorizedqi() {}
|
| 59 |
+
Vectorizedqi(__m512i v) : vals(v) {}
|
| 60 |
+
operator __m512i() const {
|
| 61 |
+
return vals;
|
| 62 |
+
}
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
template <typename T>
|
| 67 |
+
__m512i pack_saturate_and_clamp(
|
| 68 |
+
__m512i first,
|
| 69 |
+
__m512i second,
|
| 70 |
+
T min_val,
|
| 71 |
+
T max_val);
|
| 72 |
+
|
| 73 |
+
template <>
|
| 74 |
+
inline __m512i pack_saturate_and_clamp<int32_t>(
|
| 75 |
+
__m512i first [[maybe_unused]],
|
| 76 |
+
__m512i second [[maybe_unused]],
|
| 77 |
+
int32_t min_val [[maybe_unused]],
|
| 78 |
+
int32_t max_val [[maybe_unused]]) {
|
| 79 |
+
// This function is for linkage only, will not be used
|
| 80 |
+
AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
|
| 81 |
+
return __m512i{};
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <>
|
| 85 |
+
inline __m512i pack_saturate_and_clamp<int8_t>(
|
| 86 |
+
__m512i first,
|
| 87 |
+
__m512i second,
|
| 88 |
+
int8_t min_val,
|
| 89 |
+
int8_t max_val) {
|
| 90 |
+
__m512i packed_and_sat = _mm512_packs_epi16(first, second);
|
| 91 |
+
return _mm512_max_epi8(
|
| 92 |
+
_mm512_set1_epi8(min_val),
|
| 93 |
+
_mm512_min_epi8(packed_and_sat, _mm512_set1_epi8(max_val)));
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template <>
|
| 97 |
+
inline __m512i pack_saturate_and_clamp<uint8_t>(
|
| 98 |
+
__m512i first,
|
| 99 |
+
__m512i second,
|
| 100 |
+
uint8_t min_val,
|
| 101 |
+
uint8_t max_val) {
|
| 102 |
+
__m512i packed_and_sat = _mm512_packus_epi16(first, second);
|
| 103 |
+
return _mm512_max_epu8(
|
| 104 |
+
_mm512_set1_epi8(min_val),
|
| 105 |
+
_mm512_min_epu8(packed_and_sat, _mm512_set1_epi8(max_val)));
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
template <typename T>
|
| 109 |
+
typename std::enable_if_t<std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, at::vec::Vectorized<float>>
|
| 110 |
+
inline convert_int8_to_float(at::vec::Vectorized<T> src) {
|
| 111 |
+
// Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
|
| 112 |
+
// Only handle first 16*8 bits
|
| 113 |
+
__m128i input_128 = _mm512_castsi512_si128(src);
|
| 114 |
+
// Convert from 16*uint8/int8 to 16*int32
|
| 115 |
+
__m512i input_512_extended;
|
| 116 |
+
if constexpr (std::is_same_v<T, uint8_t>)
|
| 117 |
+
input_512_extended = _mm512_cvtepu8_epi32(input_128);
|
| 118 |
+
else
|
| 119 |
+
input_512_extended = _mm512_cvtepi8_epi32(input_128);
|
| 120 |
+
// Convert from 16*int32 to 16*float32
|
| 121 |
+
return _mm512_cvtepi32_ps(input_512_extended);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
template <typename T>
|
| 125 |
+
typename std::enable_if_t<std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, at::vec::Vectorized<T>>
|
| 126 |
+
inline convert_float_to_int8(at::vec::Vectorized<float> src) {
|
| 127 |
+
// Convert from float32 to int32 with truncation
|
| 128 |
+
__m512i x_values_int32 = _mm512_cvttps_epi32(src);
|
| 129 |
+
|
| 130 |
+
// Convert from int32 to int16 using signed saturation
|
| 131 |
+
__m512i xy_packed_v = _mm512_packs_epi32(x_values_int32, x_values_int32);
|
| 132 |
+
|
| 133 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 134 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 135 |
+
|
| 136 |
+
// Convert from int16 to uint8/int8 using unsigned saturation
|
| 137 |
+
__m512i xyzw_clamped_v = pack_saturate_and_clamp<T>(
|
| 138 |
+
xy_packed_v, xy_packed_v, min_val, max_val);
|
| 139 |
+
__m512i permute_mask_v =
|
| 140 |
+
_mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
|
| 141 |
+
0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
|
| 142 |
+
return _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template <typename T>
|
| 146 |
+
__FORCE_INLINE void QuantizeAvx512(
|
| 147 |
+
const float* src,
|
| 148 |
+
T* dst,
|
| 149 |
+
int len,
|
| 150 |
+
float inverse_scale,
|
| 151 |
+
int64_t zero_point) {
|
| 152 |
+
constexpr int VLEN = 16;
|
| 153 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 154 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 155 |
+
const __m512i min_v = _mm512_set1_epi32(min_val);
|
| 156 |
+
const __m512i max_v = _mm512_set1_epi32(max_val);
|
| 157 |
+
// This is the largest int32 value < int32_max exactly representable in float
|
| 158 |
+
constexpr int32_t int32_float_max_val =
|
| 159 |
+
std::numeric_limits<int32_t>::max() - 127;
|
| 160 |
+
int i = 0;
|
| 161 |
+
__m512 inverse_scale_v = _mm512_set1_ps(inverse_scale);
|
| 162 |
+
// clang-format off
|
| 163 |
+
static const __m512i shuffle_mask_v = _mm512_set_epi8(
|
| 164 |
+
0xff, 0xff, 0xff, 0xff,
|
| 165 |
+
0xff, 0xff, 0xff, 0xff,
|
| 166 |
+
0xff, 0xff, 0xff, 0xff,
|
| 167 |
+
0x0c, 0x08, 0x04, 0x00,
|
| 168 |
+
0xff, 0xff, 0xff, 0xff,
|
| 169 |
+
0xff, 0xff, 0xff, 0xff,
|
| 170 |
+
0xff, 0xff, 0xff, 0xff,
|
| 171 |
+
0x0c, 0x08, 0x04, 0x00,
|
| 172 |
+
0xff, 0xff, 0xff, 0xff,
|
| 173 |
+
0xff, 0xff, 0xff, 0xff,
|
| 174 |
+
0xff, 0xff, 0xff, 0xff,
|
| 175 |
+
0x0c, 0x08, 0x04, 0x00,
|
| 176 |
+
0xff, 0xff, 0xff, 0xff,
|
| 177 |
+
0xff, 0xff, 0xff, 0xff,
|
| 178 |
+
0xff, 0xff, 0xff, 0xff,
|
| 179 |
+
0x0c, 0x08, 0x04, 0x00);
|
| 180 |
+
// clang-format on
|
| 181 |
+
__m512i permute_mask_v =
|
| 182 |
+
_mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
|
| 183 |
+
0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
|
| 184 |
+
__m512i permute_mask_l8_v =
|
| 185 |
+
_mm512_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
| 186 |
+
0x00, 0x00, 0x00, 0x00, 0x0c, 0x08, 0x04, 0x00);
|
| 187 |
+
int len_aligned = len / (VLEN * 4) * (VLEN * 4);
|
| 188 |
+
for (; i < len_aligned; i += 4 * VLEN) {
|
| 189 |
+
// x
|
| 190 |
+
__m512 x_vals = _mm512_load_ps(src + i);
|
| 191 |
+
__m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
|
| 192 |
+
// If the floating point value is greater than int32_max,
|
| 193 |
+
// _mm512_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
|
| 194 |
+
// Clip at int32_float_max_val to avoid this.
|
| 195 |
+
x_transformed_v =
|
| 196 |
+
_mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
|
| 197 |
+
// y
|
| 198 |
+
__m512 y_vals = _mm512_load_ps(src + i + VLEN);
|
| 199 |
+
__m512 y_transformed_v = _mm512_mul_ps(y_vals, inverse_scale_v);
|
| 200 |
+
y_transformed_v =
|
| 201 |
+
_mm512_min_ps(y_transformed_v, _mm512_set1_ps(int32_float_max_val));
|
| 202 |
+
// z
|
| 203 |
+
__m512 z_vals = _mm512_load_ps(src + i + 2 * VLEN);
|
| 204 |
+
__m512 z_transformed_v = _mm512_mul_ps(z_vals, inverse_scale_v);
|
| 205 |
+
z_transformed_v =
|
| 206 |
+
_mm512_min_ps(z_transformed_v, _mm512_set1_ps(int32_float_max_val));
|
| 207 |
+
// w
|
| 208 |
+
__m512 w_vals = _mm512_load_ps(src + i + 3 * VLEN);
|
| 209 |
+
__m512 w_transformed_v = _mm512_mul_ps(w_vals, inverse_scale_v);
|
| 210 |
+
w_transformed_v =
|
| 211 |
+
_mm512_min_ps(w_transformed_v, _mm512_set1_ps(int32_float_max_val));
|
| 212 |
+
|
| 213 |
+
__m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
|
| 214 |
+
__m512i y_rounded_v = _mm512_cvtps_epi32(y_transformed_v);
|
| 215 |
+
__m512i z_rounded_v = _mm512_cvtps_epi32(z_transformed_v);
|
| 216 |
+
__m512i w_rounded_v = _mm512_cvtps_epi32(w_transformed_v);
|
| 217 |
+
|
| 218 |
+
// add zero point
|
| 219 |
+
x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
|
| 220 |
+
y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point));
|
| 221 |
+
z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point));
|
| 222 |
+
w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point));
|
| 223 |
+
|
| 224 |
+
__m512i xy_packed_v = _mm512_packs_epi32(x_rounded_v, y_rounded_v);
|
| 225 |
+
__m512i zw_packed_v = _mm512_packs_epi32(z_rounded_v, w_rounded_v);
|
| 226 |
+
__m512i xyzw_clamped_v =
|
| 227 |
+
pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
|
| 228 |
+
|
| 229 |
+
xyzw_clamped_v =
|
| 230 |
+
_mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
|
| 231 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(dst + i), xyzw_clamped_v);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
// Additional 8-lane AVX512 version to take advantage when len is smaller
|
| 235 |
+
// based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
|
| 236 |
+
for (; i < len / VLEN * VLEN; i += VLEN) {
|
| 237 |
+
__m512 x_vals = _mm512_load_ps(src + i);
|
| 238 |
+
__m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
|
| 239 |
+
x_transformed_v =
|
| 240 |
+
_mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
|
| 241 |
+
__m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
|
| 242 |
+
x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
|
| 243 |
+
__m512i x_clipped_v =
|
| 244 |
+
_mm512_max_epi32(min_v, _mm512_min_epi32(max_v, x_rounded_v));
|
| 245 |
+
|
| 246 |
+
x_clipped_v = _mm512_shuffle_epi8(x_clipped_v, shuffle_mask_v);
|
| 247 |
+
x_clipped_v = _mm512_permutexvar_epi32(permute_mask_l8_v, x_clipped_v);
|
| 248 |
+
_mm_storeu_si128(
|
| 249 |
+
reinterpret_cast<__m128i*>(dst + i),
|
| 250 |
+
_mm512_castsi512_si128(x_clipped_v));
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
for (; i < len; ++i) {
|
| 254 |
+
float transformed = src[i] * inverse_scale;
|
| 255 |
+
|
| 256 |
+
// Not exactly the same behavior as the vectorized code.
|
| 257 |
+
// The vectorized code above always rounds to even in halfway cases
|
| 258 |
+
// (https://software.intel.com/en-us/node/523819), but std::nearbyint
|
| 259 |
+
// does the same only when the current rounding mode is FE_TONEAREST.
|
| 260 |
+
// However, in practice, this should not be a problem because most cases
|
| 261 |
+
// use the default rounding mode FE_TONEAREST.
|
| 262 |
+
// Note that we cannot implement the same behavior as the vectorized code
|
| 263 |
+
// using std::round because it does rounding away from zero in halfway
|
| 264 |
+
// cases.
|
| 265 |
+
transformed = zero_point + std::nearbyint(transformed);
|
| 266 |
+
float clipped =
|
| 267 |
+
std::min(std::max(transformed, float(min_val)), float(max_val));
|
| 268 |
+
dst[i] = clipped;
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template<>
|
| 273 |
+
struct Vectorized<c10::qint32> : public Vectorizedqi {
|
| 274 |
+
using size_type = int;
|
| 275 |
+
static constexpr size_type size() {
|
| 276 |
+
return 16;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
static constexpr int float_num_vecs() {
|
| 280 |
+
return 1;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
static constexpr int int_num_vecs() {
|
| 284 |
+
return 1;
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
using float_vec_return_type = std::array<Vectorized<float>, 1>;
|
| 288 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
|
| 289 |
+
using value_type = c10::qint32::underlying;
|
| 290 |
+
|
| 291 |
+
public:
|
| 292 |
+
using Vectorizedqi::Vectorizedqi;
|
| 293 |
+
Vectorized() {}
|
| 294 |
+
|
| 295 |
+
Vectorized(__m512i vals_) { vals = vals_;}
|
| 296 |
+
|
| 297 |
+
// Broadcast constructor
|
| 298 |
+
Vectorized(const c10::qint32& val) {
|
| 299 |
+
value_type uw = val.val_;
|
| 300 |
+
vals = _mm512_set1_epi32(uw);
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
void store(void* ptr, int count = size()) const {
|
| 304 |
+
if (count != size()) {
|
| 305 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 306 |
+
} else {
|
| 307 |
+
_mm512_storeu_si512((__m512i*)ptr, vals);
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
static Vectorized<c10::qint32> loadu(const void* ptr) {
|
| 312 |
+
return Vectorized<c10::qint32>(ptr);
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
|
| 316 |
+
__at_align__ value_type tmp_values[size()];
|
| 317 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 318 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 319 |
+
// instructions while a loop would be compiled to one instruction.
|
| 320 |
+
for (const auto i : c10::irange(size())) {
|
| 321 |
+
tmp_values[i] = 0;
|
| 322 |
+
}
|
| 323 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 324 |
+
return loadu(tmp_values);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
float_vec_return_type dequantize(
|
| 328 |
+
Vectorized<float> scale,
|
| 329 |
+
Vectorized<float> zero_point,
|
| 330 |
+
Vectorized<float> scale_zp_premul) const {
|
| 331 |
+
__m512 float_vals = _mm512_cvtepi32_ps(vals);
|
| 332 |
+
return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
float_vec_return_type dequantize(
|
| 336 |
+
Vectorized<float> scale,
|
| 337 |
+
Vectorized<float> zero_point) const {
|
| 338 |
+
__m512 float_vals = _mm512_cvtepi32_ps(vals);
|
| 339 |
+
return {(Vectorized<float>(float_vals) - zero_point) * scale};
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
static Vectorized<c10::qint32> quantize(
|
| 343 |
+
const float_vec_return_type& rhs,
|
| 344 |
+
float scale,
|
| 345 |
+
int32_t zero_point,
|
| 346 |
+
float inverse_scale [[maybe_unused]]) {
|
| 347 |
+
Vectorized<c10::qint32> retval;
|
| 348 |
+
auto rhs_data = (__m512)rhs[0];
|
| 349 |
+
at::native::quantize_vec<c10::qint32, /*precision=*/32>(
|
| 350 |
+
scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 16);
|
| 351 |
+
return retval;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
|
| 355 |
+
return _mm512_max_epi32(vals, b.vals);
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
|
| 359 |
+
return _mm512_min_epi32(vals, b.vals);
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 363 |
+
return maximum(zero_point);
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
Vectorized<c10::qint32> relu6(
|
| 367 |
+
Vectorized<c10::qint32> zero_point,
|
| 368 |
+
Vectorized<c10::qint32> q_six) {
|
| 369 |
+
return _mm512_min_epi32(
|
| 370 |
+
_mm512_max_epi32(vals, zero_point.vals), q_six.vals);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 374 |
+
return {_mm512_sub_epi32(vals, b)};
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 378 |
+
const int_vec_return_type& inp,
|
| 379 |
+
float multiplier,
|
| 380 |
+
int32_t zero_point) {
|
| 381 |
+
__m512 multiplier_v = _mm512_set1_ps(multiplier);
|
| 382 |
+
__m512i zero_point_v = _mm512_set1_epi32(zero_point);
|
| 383 |
+
|
| 384 |
+
__m512 scaled = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier_v);
|
| 385 |
+
__m512i rounded = _mm512_cvtps_epi32(scaled);
|
| 386 |
+
return _mm512_add_epi32(rounded, zero_point_v);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
private:
|
| 390 |
+
// Load from memory constructor
|
| 391 |
+
Vectorized(const void* ptr) {
|
| 392 |
+
vals = _mm512_loadu_si512((const __m512i*)ptr);
|
| 393 |
+
}
|
| 394 |
+
};
|
| 395 |
+
|
| 396 |
+
template <>
|
| 397 |
+
Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 398 |
+
return a.maximum(b);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
template <>
|
| 402 |
+
Vectorized<c10::qint32> inline operator*(
|
| 403 |
+
const Vectorized<c10::qint32>& a,
|
| 404 |
+
const Vectorized<c10::qint32>& b) {
|
| 405 |
+
return _mm512_mullo_epi32(a, b);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
template <>
|
| 409 |
+
Vectorized<c10::qint32> inline operator+(
|
| 410 |
+
const Vectorized<c10::qint32>& a,
|
| 411 |
+
const Vectorized<c10::qint32>& b) {
|
| 412 |
+
return _mm512_add_epi32(a, b);
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
/*
|
| 416 |
+
* Convert values from int32 back to int8/uint8
|
| 417 |
+
*/
|
| 418 |
+
template <typename T>
|
| 419 |
+
__m512i RequantizeAvx512(
|
| 420 |
+
const std::array<Vectorized<c10::qint32>, 4>& inp,
|
| 421 |
+
__m512 multiplier,
|
| 422 |
+
__m512i zp) {
|
| 423 |
+
static_assert(
|
| 424 |
+
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>,
|
| 425 |
+
"Only int8_t/uint8_t are supported");
|
| 426 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 427 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 428 |
+
__m512i permute_mask_v =
|
| 429 |
+
_mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
|
| 430 |
+
0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
|
| 431 |
+
__m512 x_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier);
|
| 432 |
+
__m512 y_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[1]), multiplier);
|
| 433 |
+
__m512 z_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[2]), multiplier);
|
| 434 |
+
__m512 w_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[3]), multiplier);
|
| 435 |
+
|
| 436 |
+
__m512i x_rounded_v = _mm512_cvtps_epi32(x_scaled_v);
|
| 437 |
+
__m512i y_rounded_v = _mm512_cvtps_epi32(y_scaled_v);
|
| 438 |
+
__m512i z_rounded_v = _mm512_cvtps_epi32(z_scaled_v);
|
| 439 |
+
__m512i w_rounded_v = _mm512_cvtps_epi32(w_scaled_v);
|
| 440 |
+
|
| 441 |
+
/* Add zero point */
|
| 442 |
+
__m512i x_v = _mm512_add_epi32(x_rounded_v, zp);
|
| 443 |
+
__m512i y_v = _mm512_add_epi32(y_rounded_v, zp);
|
| 444 |
+
__m512i z_v = _mm512_add_epi32(z_rounded_v, zp);
|
| 445 |
+
__m512i w_v = _mm512_add_epi32(w_rounded_v, zp);
|
| 446 |
+
|
| 447 |
+
/* Pack to int16_t and saturate */
|
| 448 |
+
__m512i xy_packed_v = _mm512_packs_epi32(x_v, y_v);
|
| 449 |
+
__m512i zw_packed_v = _mm512_packs_epi32(z_v, w_v);
|
| 450 |
+
|
| 451 |
+
__m512i xyzw_clamped_v =
|
| 452 |
+
pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
|
| 453 |
+
|
| 454 |
+
/*
|
| 455 |
+
* xyzw_clamped_v has results in the following layout so we need to
|
| 456 |
+
* permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 x8-11 y8-11 z8-11 w8-11 x12-15 y12-15 z12-15 w12-15
|
| 457 |
+
*/
|
| 458 |
+
xyzw_clamped_v = _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
|
| 459 |
+
return xyzw_clamped_v;
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
template<>
|
| 463 |
+
struct Vectorized<c10::qint8> : public Vectorizedqi {
|
| 464 |
+
static constexpr int size() {
|
| 465 |
+
return 64;
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
static constexpr int float_num_vecs() {
|
| 469 |
+
return 4;
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
static constexpr int int_num_vecs() {
|
| 473 |
+
return 4;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
| 477 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
| 478 |
+
using value_type = typename c10::qint8::underlying;
|
| 479 |
+
|
| 480 |
+
public:
|
| 481 |
+
using Vectorizedqi::Vectorizedqi;
|
| 482 |
+
|
| 483 |
+
Vectorized() {}
|
| 484 |
+
Vectorized(__m512i vals_) { vals = vals_;}
|
| 485 |
+
|
| 486 |
+
// Broadcast constructor
|
| 487 |
+
Vectorized(const c10::qint8& val) {
|
| 488 |
+
value_type uw = val.val_;
|
| 489 |
+
vals = _mm512_set1_epi8(uw);
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
// This is needed because the compiler emits awful code for the default
|
| 493 |
+
// constructor for moving the enum
|
| 494 |
+
Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
|
| 495 |
+
|
| 496 |
+
// This is added to avoid error: definition of implicit copy assignment operator
|
| 497 |
+
// for 'Vectorized<c10::qint8>' is deprecated because it has a user-declared
|
| 498 |
+
// copy constructor [-Werror,-Wdeprecated-copy]
|
| 499 |
+
Vectorized& operator=(const Vectorized<c10::qint8>&) = default;
|
| 500 |
+
|
| 501 |
+
void store(void* ptr, int count = size()) const {
|
| 502 |
+
if (count != size()) {
|
| 503 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 504 |
+
} else {
|
| 505 |
+
_mm512_storeu_si512((__m512i*)ptr, vals);
|
| 506 |
+
}
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
static Vectorized<c10::qint8> loadu(const void* ptr) {
|
| 510 |
+
return Vectorized<c10::qint8>(ptr);
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
|
| 514 |
+
__at_align__ value_type tmp_values[size()];
|
| 515 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 516 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 517 |
+
// instructions while a loop would be compiled to one instruction.
|
| 518 |
+
for (const auto i : c10::irange(size())) {
|
| 519 |
+
tmp_values[i] = 0;
|
| 520 |
+
}
|
| 521 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 522 |
+
return loadu(tmp_values);
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
private:
|
| 526 |
+
__m512i cvtepi8_epi32(__m128i epi8_vals) const {
|
| 527 |
+
return _mm512_cvtepi8_epi32(epi8_vals);
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
public:
|
| 531 |
+
float_vec_return_type dequantize(
|
| 532 |
+
Vectorized<float> scale,
|
| 533 |
+
Vectorized<float> zero_point,
|
| 534 |
+
Vectorized<float> scale_neg_zp_premul) const {
|
| 535 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 536 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 537 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 538 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 539 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 540 |
+
#else
|
| 541 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 542 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 543 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 544 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 545 |
+
#endif
|
| 546 |
+
|
| 547 |
+
__m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
|
| 548 |
+
__m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
|
| 549 |
+
__m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
|
| 550 |
+
__m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
|
| 551 |
+
|
| 552 |
+
auto val0 =
|
| 553 |
+
vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
|
| 554 |
+
auto val1 =
|
| 555 |
+
vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
|
| 556 |
+
auto val2 =
|
| 557 |
+
vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
|
| 558 |
+
auto val3 =
|
| 559 |
+
vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
|
| 560 |
+
return {val0, val1, val2, val3};
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
float_vec_return_type dequantize(
|
| 564 |
+
Vectorized<float> scale,
|
| 565 |
+
Vectorized<float> zero_point) const {
|
| 566 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 567 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 568 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 569 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 570 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 571 |
+
#else
|
| 572 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 573 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 574 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 575 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 576 |
+
#endif
|
| 577 |
+
|
| 578 |
+
__m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
|
| 579 |
+
__m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
|
| 580 |
+
__m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
|
| 581 |
+
__m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
|
| 582 |
+
|
| 583 |
+
auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
|
| 584 |
+
auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
|
| 585 |
+
auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
|
| 586 |
+
auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
|
| 587 |
+
return {val0, val1, val2, val3};
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
static Vectorized<c10::qint8> quantize(
|
| 591 |
+
const float_vec_return_type& rhs,
|
| 592 |
+
float scale,
|
| 593 |
+
int32_t zero_point,
|
| 594 |
+
float inverse_scale) {
|
| 595 |
+
auto* rhs_data = (float*)rhs.data();
|
| 596 |
+
int8_t quantized_values[64];
|
| 597 |
+
QuantizeAvx512<value_type>(
|
| 598 |
+
rhs_data, quantized_values, 64, inverse_scale, zero_point);
|
| 599 |
+
return Vectorized<c10::qint8>::loadu(quantized_values);
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
|
| 603 |
+
return _mm512_max_epi8(vals, b.vals);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
|
| 607 |
+
return _mm512_min_epi8(vals, b.vals);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
|
| 611 |
+
return maximum(zero_point);
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
Vectorized<c10::qint8> relu6(
|
| 615 |
+
Vectorized<c10::qint8> zero_point,
|
| 616 |
+
Vectorized<c10::qint8> q_six) {
|
| 617 |
+
return _mm512_min_epi8(
|
| 618 |
+
_mm512_max_epi8(vals, zero_point.vals), q_six.vals);
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
|
| 622 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 623 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 624 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 625 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 626 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 627 |
+
#else
|
| 628 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 629 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 630 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 631 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 632 |
+
#endif
|
| 633 |
+
|
| 634 |
+
__m512i int32_val0 = cvtepi8_epi32(int_val0);
|
| 635 |
+
__m512i int32_val1 = cvtepi8_epi32(int_val1);
|
| 636 |
+
__m512i int32_val2 = cvtepi8_epi32(int_val2);
|
| 637 |
+
__m512i int32_val3 = cvtepi8_epi32(int_val3);
|
| 638 |
+
|
| 639 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 640 |
+
__m128i int_b0 = _mm_set_epi64x(b.vals.m512i_u64[1], b.vals.m512i_u64[0]);
|
| 641 |
+
__m128i int_b1 = _mm_set_epi64x(b.vals.m512i_u64[3], b.vals.m512i_u64[2]);
|
| 642 |
+
__m128i int_b2 = _mm_set_epi64x(b.vals.m512i_u64[5], b.vals.m512i_u64[4]);
|
| 643 |
+
__m128i int_b3 = _mm_set_epi64x(b.vals.m512i_u64[7], b.vals.m512i_u64[6]);
|
| 644 |
+
#else
|
| 645 |
+
__m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
|
| 646 |
+
__m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
|
| 647 |
+
__m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
|
| 648 |
+
__m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
|
| 649 |
+
#endif
|
| 650 |
+
|
| 651 |
+
__m512i int32_b0 = cvtepi8_epi32(int_b0);
|
| 652 |
+
__m512i int32_b1 = cvtepi8_epi32(int_b1);
|
| 653 |
+
__m512i int32_b2 = cvtepi8_epi32(int_b2);
|
| 654 |
+
__m512i int32_b3 = cvtepi8_epi32(int_b3);
|
| 655 |
+
|
| 656 |
+
__m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
|
| 657 |
+
__m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
|
| 658 |
+
__m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
|
| 659 |
+
__m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
|
| 660 |
+
|
| 661 |
+
return {Vectorized<c10::qint32>(res_0),
|
| 662 |
+
Vectorized<c10::qint32>(res_1),
|
| 663 |
+
Vectorized<c10::qint32>(res_2),
|
| 664 |
+
Vectorized<c10::qint32>(res_3)};
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
static Vectorized<c10::qint8> requantize_from_int(
|
| 668 |
+
const int_vec_return_type& inp,
|
| 669 |
+
float multiplier,
|
| 670 |
+
int32_t zero_point) {
|
| 671 |
+
__m512 multiplier_v = _mm512_set1_ps(multiplier);
|
| 672 |
+
__m512i zero_point_v = _mm512_set1_epi32(zero_point);
|
| 673 |
+
return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
private:
|
| 677 |
+
// Load from memory constructor
|
| 678 |
+
Vectorized(const void* ptr) {
|
| 679 |
+
vals = _mm512_loadu_si512((const __m512i*)ptr);
|
| 680 |
+
}
|
| 681 |
+
};
|
| 682 |
+
|
| 683 |
+
template <>
|
| 684 |
+
Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
|
| 685 |
+
return a.maximum(b);
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
template<>
|
| 689 |
+
struct Vectorized<c10::quint8> : public Vectorizedqi {
|
| 690 |
+
static constexpr int size() {
|
| 691 |
+
return 64;
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
static constexpr int float_num_vecs() {
|
| 695 |
+
return 4;
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
static constexpr int int_num_vecs() {
|
| 699 |
+
return 4;
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
| 703 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
| 704 |
+
using value_type = typename c10::quint8::underlying;
|
| 705 |
+
|
| 706 |
+
public:
|
| 707 |
+
using Vectorizedqi::Vectorizedqi;
|
| 708 |
+
Vectorized() {}
|
| 709 |
+
|
| 710 |
+
Vectorized(__m512i vals_) { vals = vals_;}
|
| 711 |
+
|
| 712 |
+
// Broadcast constructor
|
| 713 |
+
Vectorized(const c10::quint8& val) {
|
| 714 |
+
value_type uw = val.val_;
|
| 715 |
+
vals = _mm512_set1_epi8(uw);
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
|
| 719 |
+
|
| 720 |
+
// This is added to avoid error: definition of implicit copy assignment operator
|
| 721 |
+
// for 'Vectorized<c10::quint8>' is deprecated because it has a user-declared
|
| 722 |
+
// copy constructor [-Werror,-Wdeprecated-copy]
|
| 723 |
+
Vectorized& operator=(const Vectorized<c10::quint8>&) = default;
|
| 724 |
+
|
| 725 |
+
void store(void* ptr, int count = size()) const {
|
| 726 |
+
if (count != size()) {
|
| 727 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 728 |
+
} else {
|
| 729 |
+
_mm512_storeu_si512((__m512i*)ptr, vals);
|
| 730 |
+
}
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
static Vectorized<c10::quint8> loadu(const void* ptr) {
|
| 734 |
+
return Vectorized<c10::quint8>(ptr);
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
|
| 738 |
+
__at_align__ value_type tmp_values[size()];
|
| 739 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 740 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 741 |
+
// instructions while a loop would be compiled to one instruction.
|
| 742 |
+
for (const auto i : c10::irange(size())) {
|
| 743 |
+
tmp_values[i] = 0;
|
| 744 |
+
}
|
| 745 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 746 |
+
return loadu(tmp_values);
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
private:
|
| 750 |
+
__m512i cvtepu8_epi32(__m128i epu8_vals) const {
|
| 751 |
+
return _mm512_cvtepu8_epi32(epu8_vals);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
public:
|
| 755 |
+
float_vec_return_type dequantize(
|
| 756 |
+
Vectorized<float> scale,
|
| 757 |
+
Vectorized<float> zero_point,
|
| 758 |
+
Vectorized<float> scale_zp_premul) const {
|
| 759 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 760 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 761 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 762 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 763 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 764 |
+
#else
|
| 765 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 766 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 767 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 768 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 769 |
+
#endif
|
| 770 |
+
|
| 771 |
+
__m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
|
| 772 |
+
__m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
|
| 773 |
+
__m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
|
| 774 |
+
__m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
|
| 775 |
+
|
| 776 |
+
auto val0 =
|
| 777 |
+
vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
|
| 778 |
+
auto val1 =
|
| 779 |
+
vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
|
| 780 |
+
auto val2 =
|
| 781 |
+
vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
|
| 782 |
+
auto val3 =
|
| 783 |
+
vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
|
| 784 |
+
|
| 785 |
+
return {val0, val1, val2, val3};
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
float_vec_return_type dequantize(
|
| 789 |
+
Vectorized<float> scale,
|
| 790 |
+
Vectorized<float> zero_point) const {
|
| 791 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 792 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 793 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 794 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 795 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 796 |
+
#else
|
| 797 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 798 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 799 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 800 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 801 |
+
#endif
|
| 802 |
+
|
| 803 |
+
__m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
|
| 804 |
+
__m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
|
| 805 |
+
__m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
|
| 806 |
+
__m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
|
| 807 |
+
|
| 808 |
+
auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
|
| 809 |
+
auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
|
| 810 |
+
auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
|
| 811 |
+
auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
|
| 812 |
+
|
| 813 |
+
return {val0, val1, val2, val3};
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
static Vectorized<c10::quint8> quantize(
|
| 817 |
+
const float_vec_return_type& rhs,
|
| 818 |
+
float scale,
|
| 819 |
+
int32_t zero_point,
|
| 820 |
+
float inverse_scale) {
|
| 821 |
+
auto* rhs_data = (float*)rhs.data();
|
| 822 |
+
uint8_t quantized_values[64];
|
| 823 |
+
QuantizeAvx512<value_type>(
|
| 824 |
+
rhs_data, quantized_values, 64, inverse_scale, zero_point);
|
| 825 |
+
return Vectorized<c10::quint8>::loadu(quantized_values);
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
|
| 829 |
+
return _mm512_max_epu8(vals, b.vals);
|
| 830 |
+
}
|
| 831 |
+
|
| 832 |
+
Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
|
| 833 |
+
return _mm512_min_epu8(vals, b.vals);
|
| 834 |
+
}
|
| 835 |
+
|
| 836 |
+
Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
|
| 837 |
+
return maximum(zero_point);
|
| 838 |
+
}
|
| 839 |
+
|
| 840 |
+
Vectorized<c10::quint8> relu6(
|
| 841 |
+
Vectorized<c10::quint8> zero_point,
|
| 842 |
+
Vectorized<c10::quint8> q_six) {
|
| 843 |
+
return _mm512_min_epu8(
|
| 844 |
+
_mm512_max_epu8(vals, zero_point.vals), q_six.vals);
|
| 845 |
+
}
|
| 846 |
+
|
| 847 |
+
int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
|
| 848 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 849 |
+
__m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]);
|
| 850 |
+
__m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]);
|
| 851 |
+
__m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]);
|
| 852 |
+
__m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]);
|
| 853 |
+
#else
|
| 854 |
+
__m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
|
| 855 |
+
__m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
|
| 856 |
+
__m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
|
| 857 |
+
__m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
|
| 858 |
+
#endif
|
| 859 |
+
|
| 860 |
+
__m512i int32_val0 = cvtepu8_epi32(int_val0);
|
| 861 |
+
__m512i int32_val1 = cvtepu8_epi32(int_val1);
|
| 862 |
+
__m512i int32_val2 = cvtepu8_epi32(int_val2);
|
| 863 |
+
__m512i int32_val3 = cvtepu8_epi32(int_val3);
|
| 864 |
+
|
| 865 |
+
#if defined(_MSC_VER) && !defined(__clang__)
|
| 866 |
+
__m128i int_b0 = _mm_set_epi64x(b.vals.m512i_u64[1], b.vals.m512i_u64[0]);
|
| 867 |
+
__m128i int_b1 = _mm_set_epi64x(b.vals.m512i_u64[3], b.vals.m512i_u64[2]);
|
| 868 |
+
__m128i int_b2 = _mm_set_epi64x(b.vals.m512i_u64[5], b.vals.m512i_u64[4]);
|
| 869 |
+
__m128i int_b3 = _mm_set_epi64x(b.vals.m512i_u64[7], b.vals.m512i_u64[6]);
|
| 870 |
+
#else
|
| 871 |
+
__m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
|
| 872 |
+
__m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
|
| 873 |
+
__m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
|
| 874 |
+
__m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
|
| 875 |
+
#endif
|
| 876 |
+
|
| 877 |
+
__m512i int32_b0 = cvtepu8_epi32(int_b0);
|
| 878 |
+
__m512i int32_b1 = cvtepu8_epi32(int_b1);
|
| 879 |
+
__m512i int32_b2 = cvtepu8_epi32(int_b2);
|
| 880 |
+
__m512i int32_b3 = cvtepu8_epi32(int_b3);
|
| 881 |
+
|
| 882 |
+
__m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
|
| 883 |
+
__m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
|
| 884 |
+
__m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
|
| 885 |
+
__m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
|
| 886 |
+
return {Vectorized<c10::qint32>(res_0),
|
| 887 |
+
Vectorized<c10::qint32>(res_1),
|
| 888 |
+
Vectorized<c10::qint32>(res_2),
|
| 889 |
+
Vectorized<c10::qint32>(res_3)};
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
static Vectorized<c10::quint8> requantize_from_int(
|
| 893 |
+
const int_vec_return_type& inp,
|
| 894 |
+
float multiplier,
|
| 895 |
+
int32_t zero_point) {
|
| 896 |
+
__m512 multiplier_v = _mm512_set1_ps(multiplier);
|
| 897 |
+
__m512i zero_point_v = _mm512_set1_epi32(zero_point);
|
| 898 |
+
return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
|
| 899 |
+
}
|
| 900 |
+
|
| 901 |
+
private:
|
| 902 |
+
|
| 903 |
+
// Load from memory constructor
|
| 904 |
+
Vectorized(const void* ptr) {
|
| 905 |
+
vals = _mm512_loadu_si512((const __m512i*)ptr);
|
| 906 |
+
}
|
| 907 |
+
};
|
| 908 |
+
|
| 909 |
+
template <>
|
| 910 |
+
Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
|
| 911 |
+
return a.maximum(b);
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
#else
|
| 915 |
+
|
| 916 |
+
// NOTE: These are low-performance implementations that we fall back on.
|
| 917 |
+
|
| 918 |
+
template <
|
| 919 |
+
typename T,
|
| 920 |
+
typename float_vec_return_type_,
|
| 921 |
+
typename int_vec_return_type_,
|
| 922 |
+
int size_>
|
| 923 |
+
struct VectorizedQuantizedConverter {
|
| 924 |
+
static constexpr int size() {
|
| 925 |
+
return size_;
|
| 926 |
+
}
|
| 927 |
+
|
| 928 |
+
static constexpr int float_num_vecs() {
|
| 929 |
+
return size() / 8;
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
static constexpr int int_num_vecs() {
|
| 933 |
+
return size() / 8;
|
| 934 |
+
}
|
| 935 |
+
|
| 936 |
+
using float_vec_return_type = float_vec_return_type_;
|
| 937 |
+
using int_vec_return_type = int_vec_return_type_;
|
| 938 |
+
|
| 939 |
+
using value_type = typename T::underlying;
|
| 940 |
+
std::array<value_type, size_> vals;
|
| 941 |
+
|
| 942 |
+
VectorizedQuantizedConverter(T val) {
|
| 943 |
+
for (const auto i : c10::irange(size())) {
|
| 944 |
+
vals[i] = val.val_;
|
| 945 |
+
}
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
VectorizedQuantizedConverter(const void* ptr) {
|
| 949 |
+
memcpy(vals.data(), ptr, sizeof(value_type) * size());
|
| 950 |
+
}
|
| 951 |
+
|
| 952 |
+
void store(void* ptr, int count = size()) const {
|
| 953 |
+
memcpy(ptr, vals.data(), count * sizeof(value_type));
|
| 954 |
+
}
|
| 955 |
+
|
| 956 |
+
float_vec_return_type dequantize(
|
| 957 |
+
Vectorized<float> scale,
|
| 958 |
+
Vectorized<float> zero_point,
|
| 959 |
+
Vectorized<float> scale_zp_premul [[maybe_unused]]) const {
|
| 960 |
+
float_vec_return_type rv;
|
| 961 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 962 |
+
float tmp_vals[16];
|
| 963 |
+
for (const auto j : c10::irange(16)) {
|
| 964 |
+
tmp_vals[j] = at::native::dequantize_val<T>(
|
| 965 |
+
scale[j], zero_point[j], T(vals[16 * i + j]));
|
| 966 |
+
}
|
| 967 |
+
rv[i] = Vectorized<float>(tmp_vals[0],
|
| 968 |
+
tmp_vals[1],
|
| 969 |
+
tmp_vals[2],
|
| 970 |
+
tmp_vals[3],
|
| 971 |
+
tmp_vals[4],
|
| 972 |
+
tmp_vals[5],
|
| 973 |
+
tmp_vals[6],
|
| 974 |
+
tmp_vals[7],
|
| 975 |
+
tmp_vals[8],
|
| 976 |
+
tmp_vals[9],
|
| 977 |
+
tmp_vals[10],
|
| 978 |
+
tmp_vals[11],
|
| 979 |
+
tmp_vals[12],
|
| 980 |
+
tmp_vals[13],
|
| 981 |
+
tmp_vals[14],
|
| 982 |
+
tmp_vals[15]);
|
| 983 |
+
}
|
| 984 |
+
return rv;
|
| 985 |
+
}
|
| 986 |
+
|
| 987 |
+
float_vec_return_type dequantize(
|
| 988 |
+
Vectorized<float> scale,
|
| 989 |
+
Vectorized<float> zero_point) const {
|
| 990 |
+
Vectorized<float> scale_zp_premul;
|
| 991 |
+
return dequantize(scale, zero_point, scale_zp_premul);
|
| 992 |
+
}
|
| 993 |
+
|
| 994 |
+
protected:
|
| 995 |
+
VectorizedQuantizedConverter() {}
|
| 996 |
+
};
|
| 997 |
+
|
| 998 |
+
template <>
|
| 999 |
+
struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
|
| 1000 |
+
c10::qint32,
|
| 1001 |
+
std::array<Vectorized<float>, 1>,
|
| 1002 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 1003 |
+
16> {
|
| 1004 |
+
Vectorized()
|
| 1005 |
+
: VectorizedQuantizedConverter<
|
| 1006 |
+
c10::qint32,
|
| 1007 |
+
std::array<Vectorized<float>, 1>,
|
| 1008 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 1009 |
+
16>() {}
|
| 1010 |
+
Vectorized(c10::qint32 val)
|
| 1011 |
+
: VectorizedQuantizedConverter<
|
| 1012 |
+
c10::qint32,
|
| 1013 |
+
std::array<Vectorized<float>, 1>,
|
| 1014 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 1015 |
+
16>(val) {}
|
| 1016 |
+
Vectorized(const void* ptr)
|
| 1017 |
+
: VectorizedQuantizedConverter<
|
| 1018 |
+
c10::qint32,
|
| 1019 |
+
std::array<Vectorized<float>, 1>,
|
| 1020 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 1021 |
+
16>(ptr) {}
|
| 1022 |
+
|
| 1023 |
+
static Vectorized<c10::qint32> loadu(const void* ptr) {
|
| 1024 |
+
return Vectorized<c10::qint32>(ptr);
|
| 1025 |
+
}
|
| 1026 |
+
|
| 1027 |
+
static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
|
| 1028 |
+
__at_align__ value_type tmp_values[size()];
|
| 1029 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 1030 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 1031 |
+
// instructions while a loop would be compiled to one instruction.
|
| 1032 |
+
for (const auto i : c10::irange(size())) {
|
| 1033 |
+
tmp_values[i] = 0;
|
| 1034 |
+
}
|
| 1035 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 1036 |
+
return loadu(tmp_values);
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
static Vectorized<c10::qint32> quantize(
|
| 1040 |
+
const float_vec_return_type& rhs,
|
| 1041 |
+
float scale,
|
| 1042 |
+
int32_t zero_point,
|
| 1043 |
+
float inverse_scale [[maybe_unused]]) {
|
| 1044 |
+
std::array<value_type, size()> qvals;
|
| 1045 |
+
std::array<float, float_num_vecs() * 16> float_vals;
|
| 1046 |
+
|
| 1047 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 1048 |
+
rhs[i].store(&float_vals[i * 16], 16);
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
at::native::quantize_vec<c10::qint32, /*precision=*/32>(
|
| 1052 |
+
scale,
|
| 1053 |
+
zero_point,
|
| 1054 |
+
float_vals.data(),
|
| 1055 |
+
(c10::qint32*)qvals.data(),
|
| 1056 |
+
16 * float_num_vecs());
|
| 1057 |
+
|
| 1058 |
+
return Vectorized<c10::qint32>::loadu(qvals.data());
|
| 1059 |
+
}
|
| 1060 |
+
|
| 1061 |
+
Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
|
| 1062 |
+
Vectorized<c10::qint32> retval;
|
| 1063 |
+
for (const auto i : c10::irange(size())) {
|
| 1064 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 1065 |
+
}
|
| 1066 |
+
return retval;
|
| 1067 |
+
}
|
| 1068 |
+
|
| 1069 |
+
Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
|
| 1070 |
+
Vectorized<c10::qint32> retval;
|
| 1071 |
+
for (const auto i : c10::irange(size())) {
|
| 1072 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1073 |
+
}
|
| 1074 |
+
return retval;
|
| 1075 |
+
}
|
| 1076 |
+
|
| 1077 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 1078 |
+
return maximum(zero_point);
|
| 1079 |
+
}
|
| 1080 |
+
|
| 1081 |
+
|
| 1082 |
+
Vectorized<c10::qint32> relu6(
|
| 1083 |
+
Vectorized<c10::qint32> zero_point,
|
| 1084 |
+
Vectorized<c10::qint32> q_six) {
|
| 1085 |
+
Vectorized<c10::qint32> retval;
|
| 1086 |
+
for (const auto i : c10::irange(size())) {
|
| 1087 |
+
retval.vals[i] = std::min<value_type>(
|
| 1088 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1089 |
+
}
|
| 1090 |
+
return retval;
|
| 1091 |
+
}
|
| 1092 |
+
|
| 1093 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 1094 |
+
int_vec_return_type retval;
|
| 1095 |
+
for (const auto i : c10::irange(size())) {
|
| 1096 |
+
retval[0].vals[i] = vals[i] - b.vals[i];
|
| 1097 |
+
}
|
| 1098 |
+
return retval;
|
| 1099 |
+
}
|
| 1100 |
+
|
| 1101 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 1102 |
+
const int_vec_return_type& inp,
|
| 1103 |
+
float multiplier,
|
| 1104 |
+
int32_t zero_point) {
|
| 1105 |
+
Vectorized<c10::qint32> retval;
|
| 1106 |
+
for (const auto i : c10::irange(size())) {
|
| 1107 |
+
retval.vals[i] =
|
| 1108 |
+
std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
|
| 1109 |
+
zero_point;
|
| 1110 |
+
}
|
| 1111 |
+
return retval;
|
| 1112 |
+
}
|
| 1113 |
+
};
|
| 1114 |
+
|
| 1115 |
+
template <>
|
| 1116 |
+
Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 1117 |
+
return a.maximum(b);
|
| 1118 |
+
}
|
| 1119 |
+
|
| 1120 |
+
template <>
|
| 1121 |
+
Vectorized<c10::qint32> inline operator*(
|
| 1122 |
+
const Vectorized<c10::qint32>& a,
|
| 1123 |
+
const Vectorized<c10::qint32>& b) {
|
| 1124 |
+
Vectorized<c10::qint32> retval;
|
| 1125 |
+
for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
|
| 1126 |
+
retval.vals[i] = a.vals[i] * b.vals[i];
|
| 1127 |
+
}
|
| 1128 |
+
return retval;
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
template <>
|
| 1132 |
+
Vectorized<c10::qint32> inline operator+(
|
| 1133 |
+
const Vectorized<c10::qint32>& a,
|
| 1134 |
+
const Vectorized<c10::qint32>& b) {
|
| 1135 |
+
Vectorized<c10::qint32> retval;
|
| 1136 |
+
for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
|
| 1137 |
+
retval.vals[i] = a.vals[i] + b.vals[i];
|
| 1138 |
+
}
|
| 1139 |
+
return retval;
|
| 1140 |
+
}
|
| 1141 |
+
|
| 1142 |
+
template <>
|
| 1143 |
+
struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
|
| 1144 |
+
c10::qint8,
|
| 1145 |
+
std::array<Vectorized<float>, 4>,
|
| 1146 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1147 |
+
64> {
|
| 1148 |
+
Vectorized()
|
| 1149 |
+
: VectorizedQuantizedConverter<
|
| 1150 |
+
c10::qint8,
|
| 1151 |
+
std::array<Vectorized<float>, 4>,
|
| 1152 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1153 |
+
64>() {}
|
| 1154 |
+
Vectorized(c10::qint8 val)
|
| 1155 |
+
: VectorizedQuantizedConverter<
|
| 1156 |
+
c10::qint8,
|
| 1157 |
+
std::array<Vectorized<float>, 4>,
|
| 1158 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1159 |
+
64>(val) {}
|
| 1160 |
+
Vectorized(const void* ptr)
|
| 1161 |
+
: VectorizedQuantizedConverter<
|
| 1162 |
+
c10::qint8,
|
| 1163 |
+
std::array<Vectorized<float>, 4>,
|
| 1164 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1165 |
+
64>(ptr) {}
|
| 1166 |
+
|
| 1167 |
+
static Vectorized<c10::qint8> loadu(const void* ptr) {
|
| 1168 |
+
return Vectorized<c10::qint8>(ptr);
|
| 1169 |
+
}
|
| 1170 |
+
|
| 1171 |
+
static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
|
| 1172 |
+
__at_align__ value_type tmp_values[size()];
|
| 1173 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 1174 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 1175 |
+
// instructions while a loop would be compiled to one instruction.
|
| 1176 |
+
for (const auto i : c10::irange(size())) {
|
| 1177 |
+
tmp_values[i] = 0;
|
| 1178 |
+
}
|
| 1179 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 1180 |
+
return loadu(tmp_values);
|
| 1181 |
+
}
|
| 1182 |
+
|
| 1183 |
+
static Vectorized<c10::qint8> quantize(
|
| 1184 |
+
const float_vec_return_type& rhs,
|
| 1185 |
+
float scale,
|
| 1186 |
+
int32_t zero_point,
|
| 1187 |
+
float inverse_scale [[maybe_unused]]) {
|
| 1188 |
+
std::array<value_type, size()> qvals;
|
| 1189 |
+
std::array<float, float_num_vecs() * 16> float_vals;
|
| 1190 |
+
|
| 1191 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 1192 |
+
rhs[i].store(&float_vals[i * 16], 16);
|
| 1193 |
+
}
|
| 1194 |
+
|
| 1195 |
+
at::native::quantize_vec<c10::qint8>(
|
| 1196 |
+
scale,
|
| 1197 |
+
zero_point,
|
| 1198 |
+
float_vals.data(),
|
| 1199 |
+
(c10::qint8*)qvals.data(),
|
| 1200 |
+
16 * float_num_vecs());
|
| 1201 |
+
|
| 1202 |
+
return Vectorized<c10::qint8>::loadu(qvals.data());
|
| 1203 |
+
}
|
| 1204 |
+
|
| 1205 |
+
Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
|
| 1206 |
+
Vectorized<c10::qint8> retval;
|
| 1207 |
+
for (const auto i : c10::irange(size())) {
|
| 1208 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 1209 |
+
}
|
| 1210 |
+
return retval;
|
| 1211 |
+
}
|
| 1212 |
+
|
| 1213 |
+
Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
|
| 1214 |
+
Vectorized<c10::qint8> retval;
|
| 1215 |
+
for (const auto i : c10::irange(size())) {
|
| 1216 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1217 |
+
}
|
| 1218 |
+
return retval;
|
| 1219 |
+
}
|
| 1220 |
+
|
| 1221 |
+
Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
|
| 1222 |
+
return maximum(zero_point);
|
| 1223 |
+
}
|
| 1224 |
+
|
| 1225 |
+
Vectorized<c10::qint8> relu6(
|
| 1226 |
+
Vectorized<c10::qint8> zero_point,
|
| 1227 |
+
Vectorized<c10::qint8> q_six) {
|
| 1228 |
+
Vectorized<c10::qint8> retval;
|
| 1229 |
+
for (const auto i : c10::irange(size())) {
|
| 1230 |
+
retval.vals[i] = std::min<value_type>(
|
| 1231 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1232 |
+
}
|
| 1233 |
+
return retval;
|
| 1234 |
+
}
|
| 1235 |
+
|
| 1236 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
|
| 1237 |
+
int_vec_return_type retval;
|
| 1238 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1239 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1240 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1241 |
+
retval[i].vals[j] =
|
| 1242 |
+
static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
|
| 1243 |
+
static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
|
| 1244 |
+
}
|
| 1245 |
+
}
|
| 1246 |
+
return retval;
|
| 1247 |
+
}
|
| 1248 |
+
static Vectorized<c10::qint8> requantize_from_int(
|
| 1249 |
+
const int_vec_return_type& inp,
|
| 1250 |
+
float multiplier,
|
| 1251 |
+
int32_t zero_point) {
|
| 1252 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1253 |
+
constexpr auto min_val = std::numeric_limits<value_type>::min();
|
| 1254 |
+
constexpr auto max_val = std::numeric_limits<value_type>::max();
|
| 1255 |
+
Vectorized<c10::qint8> retval;
|
| 1256 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1257 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1258 |
+
int32_t rounded =
|
| 1259 |
+
std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
|
| 1260 |
+
zero_point;
|
| 1261 |
+
retval.vals[i * elem_per_int_vec + j] =
|
| 1262 |
+
std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
|
| 1263 |
+
}
|
| 1264 |
+
}
|
| 1265 |
+
return retval;
|
| 1266 |
+
}
|
| 1267 |
+
};
|
| 1268 |
+
|
| 1269 |
+
template <>
|
| 1270 |
+
Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
|
| 1271 |
+
return a.maximum(b);
|
| 1272 |
+
}
|
| 1273 |
+
|
| 1274 |
+
template <>
|
| 1275 |
+
struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
|
| 1276 |
+
c10::quint8,
|
| 1277 |
+
std::array<Vectorized<float>, 4>,
|
| 1278 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1279 |
+
64> {
|
| 1280 |
+
Vectorized()
|
| 1281 |
+
: VectorizedQuantizedConverter<
|
| 1282 |
+
c10::quint8,
|
| 1283 |
+
std::array<Vectorized<float>, 4>,
|
| 1284 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1285 |
+
64>() {}
|
| 1286 |
+
Vectorized(c10::quint8 val)
|
| 1287 |
+
: VectorizedQuantizedConverter<
|
| 1288 |
+
c10::quint8,
|
| 1289 |
+
std::array<Vectorized<float>, 4>,
|
| 1290 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1291 |
+
64>(val) {}
|
| 1292 |
+
Vectorized(const void* ptr)
|
| 1293 |
+
: VectorizedQuantizedConverter<
|
| 1294 |
+
c10::quint8,
|
| 1295 |
+
std::array<Vectorized<float>, 4>,
|
| 1296 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1297 |
+
64>(ptr) {}
|
| 1298 |
+
|
| 1299 |
+
static Vectorized<c10::quint8> loadu(const void* ptr) {
|
| 1300 |
+
return Vectorized<c10::quint8>(ptr);
|
| 1301 |
+
}
|
| 1302 |
+
|
| 1303 |
+
static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
|
| 1304 |
+
__at_align__ value_type tmp_values[size()];
|
| 1305 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 1306 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 1307 |
+
// instructions while a loop would be compiled to one instruction.
|
| 1308 |
+
for (const auto i : c10::irange(size())) {
|
| 1309 |
+
tmp_values[i] = 0;
|
| 1310 |
+
}
|
| 1311 |
+
std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 1312 |
+
return loadu(tmp_values);
|
| 1313 |
+
}
|
| 1314 |
+
|
| 1315 |
+
static Vectorized<c10::quint8> quantize(
|
| 1316 |
+
const float_vec_return_type& rhs,
|
| 1317 |
+
float scale,
|
| 1318 |
+
int32_t zero_point,
|
| 1319 |
+
float inverse_scale [[maybe_unused]]) {
|
| 1320 |
+
std::array<value_type, size()> qvals;
|
| 1321 |
+
std::array<float, float_num_vecs() * 16> float_vals;
|
| 1322 |
+
|
| 1323 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 1324 |
+
rhs[i].store(&float_vals[i * 16], 16);
|
| 1325 |
+
}
|
| 1326 |
+
|
| 1327 |
+
at::native::quantize_vec<c10::quint8>(
|
| 1328 |
+
scale,
|
| 1329 |
+
zero_point,
|
| 1330 |
+
float_vals.data(),
|
| 1331 |
+
(c10::quint8*)qvals.data(),
|
| 1332 |
+
16 * float_num_vecs());
|
| 1333 |
+
|
| 1334 |
+
return Vectorized<c10::quint8>::loadu(qvals.data());
|
| 1335 |
+
}
|
| 1336 |
+
|
| 1337 |
+
Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
|
| 1338 |
+
Vectorized<c10::quint8> retval;
|
| 1339 |
+
for (const auto i : c10::irange(size())) {
|
| 1340 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 1341 |
+
}
|
| 1342 |
+
return retval;
|
| 1343 |
+
}
|
| 1344 |
+
|
| 1345 |
+
Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
|
| 1346 |
+
Vectorized<c10::quint8> retval;
|
| 1347 |
+
for (const auto i : c10::irange(size())) {
|
| 1348 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1349 |
+
}
|
| 1350 |
+
return retval;
|
| 1351 |
+
}
|
| 1352 |
+
|
| 1353 |
+
Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
|
| 1354 |
+
return maximum(zero_point);
|
| 1355 |
+
}
|
| 1356 |
+
|
| 1357 |
+
|
| 1358 |
+
Vectorized<c10::quint8> relu6(
|
| 1359 |
+
Vectorized<c10::quint8> zero_point,
|
| 1360 |
+
Vectorized<c10::quint8> q_six) {
|
| 1361 |
+
Vectorized<c10::quint8> retval;
|
| 1362 |
+
for (const auto i : c10::irange(size())) {
|
| 1363 |
+
retval.vals[i] = std::min<value_type>(
|
| 1364 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1365 |
+
}
|
| 1366 |
+
return retval;
|
| 1367 |
+
}
|
| 1368 |
+
|
| 1369 |
+
int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
|
| 1370 |
+
int_vec_return_type retval;
|
| 1371 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1372 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1373 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1374 |
+
retval[i].vals[j] =
|
| 1375 |
+
static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
|
| 1376 |
+
static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
|
| 1377 |
+
}
|
| 1378 |
+
}
|
| 1379 |
+
return retval;
|
| 1380 |
+
}
|
| 1381 |
+
static Vectorized<c10::quint8> requantize_from_int(
|
| 1382 |
+
const int_vec_return_type& inp,
|
| 1383 |
+
float multiplier,
|
| 1384 |
+
int32_t zero_point) {
|
| 1385 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1386 |
+
constexpr auto min_val = std::numeric_limits<value_type>::min();
|
| 1387 |
+
constexpr auto max_val = std::numeric_limits<value_type>::max();
|
| 1388 |
+
Vectorized<c10::quint8> retval;
|
| 1389 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1390 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1391 |
+
int32_t rounded =
|
| 1392 |
+
std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
|
| 1393 |
+
zero_point;
|
| 1394 |
+
retval.vals[i * elem_per_int_vec + j] =
|
| 1395 |
+
std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
|
| 1396 |
+
}
|
| 1397 |
+
}
|
| 1398 |
+
return retval;
|
| 1399 |
+
}
|
| 1400 |
+
};
|
| 1401 |
+
|
| 1402 |
+
template <>
|
| 1403 |
+
Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
|
| 1404 |
+
return a.maximum(b);
|
| 1405 |
+
}
|
| 1406 |
+
|
| 1407 |
+
#endif // defined(CPU_CAPABILITY_AVX512) && !defined(MSVC)
|
| 1408 |
+
|
| 1409 |
+
}}}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API int64_t _cufft_get_plan_cache_max_size(at::DeviceIndex device_index);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API int64_t _debug_has_internal_overlap(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_native.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sinh_slow(at::TensorList self);
|
| 20 |
+
TORCH_API void _foreach_sinh_out(at::TensorList self, at::TensorList out);
|
| 21 |
+
TORCH_API void foreach_tensor_sinh_slow_(at::TensorList self);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_sinh_cuda(at::TensorList self);
|
| 23 |
+
TORCH_API void foreach_tensor_sinh_cuda_(at::TensorList self);
|
| 24 |
+
} // namespace native
|
| 25 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _fused_dropout {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, double, ::std::optional<at::Generator>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_dropout")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _fused_dropout_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, double, ::std::optional<at::Generator>, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_dropout")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional<at::Generator> generator, at::Tensor & out0, at::Tensor & out1);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_is_all_true_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_is_all_true(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor _is_all_true(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::_is_all_true::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_is_all_true_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _is_all_true(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautograd
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
} // namespace native
|
| 20 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence(const at::Tensor & input, const at::Tensor & lengths, bool batch_first);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & lengths, bool batch_first);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _pack_padded_sequence_outf(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeexplicitautograd
|
| 25 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _scaled_dot_product_flash_attention {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, ::std::optional<double>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_flash_attention")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_apply_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _sparse_semi_structured_apply(const at::Tensor & input, const at::Tensor & thread_masks);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _sparse_sum_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim);
|
| 21 |
+
TORCH_API at::Tensor & _sparse_sum_backward_outf(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_test_functorch_fallback.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_test_functorch_fallback_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor
|
| 26 |
+
inline at::Tensor _test_functorch_fallback(const at::Tensor & self, const at::Tensor & other) {
|
| 27 |
+
return at::_ops::_test_functorch_fallback::call(self, other);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _test_functorch_fallback_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) {
|
| 32 |
+
return at::_ops::_test_functorch_fallback_out::call(self, other, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::_test_functorch_fallback.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _test_functorch_fallback_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) {
|
| 36 |
+
return at::_ops::_test_functorch_fallback_out::call(self, other, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _unique2 {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, bool, bool, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique2")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _unique2_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique2")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim=0);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/aminmax_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false) {
|
| 27 |
+
return at::_ops::aminmax::call(self, dim, keepdim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, ::std::optional<int64_t> dim=::std::nullopt, bool keepdim=false) {
|
| 32 |
+
return at::_ops::aminmax_out::call(self, dim, keepdim, min, max);
|
| 33 |
+
}
|
| 34 |
+
// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max) {
|
| 36 |
+
return at::_ops::aminmax_out::call(self, dim, keepdim, min, max);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/angle_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API angle {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::angle")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "angle(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API angle_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::angle")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_ops.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API bartlett_window {
|
| 18 |
+
using schema = at::Tensor (int64_t, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API bartlett_window_periodic {
|
| 29 |
+
using schema = at::Tensor (int64_t, bool, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 35 |
+
static at::Tensor call(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API bartlett_window_out {
|
| 40 |
+
using schema = at::Tensor & (int64_t, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(int64_t window_length, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API bartlett_window_periodic_out {
|
| 51 |
+
using schema = at::Tensor & (int64_t, bool, at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_out")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)")
|
| 57 |
+
static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out);
|
| 58 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & binomial_out(at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor & binomial_outf(const at::Tensor & count, const at::Tensor & prob, ::std::optional<at::Generator> generator, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_meta_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & cauchy_(at::Tensor & self, double median=0, double sigma=1, ::std::optional<at::Generator> generator=::std::nullopt);
|
| 21 |
+
|
| 22 |
+
} // namespace meta
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/constant_pad_nd_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0);
|
| 20 |
+
TORCH_API at::Tensor & constant_pad_nd_out_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/conv_tbc_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor
|
| 26 |
+
inline at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) {
|
| 27 |
+
return at::_ops::conv_tbc::call(self, weight, bias, pad);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & conv_tbc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) {
|
| 32 |
+
return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & conv_tbc_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) {
|
| 36 |
+
return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_pack_gemm_matrix_fp16_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API fbgemm_pack_gemm_matrix_fp16 {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_pack_gemm_matrix_fp16")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & input);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/fft_ifft_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
|
| 26 |
+
inline at::Tensor fft_ifft(const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 27 |
+
return at::_ops::fft_ifft::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor fft_ifft(const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 32 |
+
return at::_ops::fft_ifft::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor
|
| 37 |
+
inline at::Tensor fft_ifft_symint(const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 38 |
+
return at::_ops::fft_ifft::call(self, n, dim, norm);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor fft_ifft(const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 43 |
+
return at::_ops::fft_ifft::call(self, n, dim, norm);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 49 |
+
return at::_ops::fft_ifft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 53 |
+
at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, ::std::optional<int64_t> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 54 |
+
return at::_ops::fft_ifft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & fft_ifft_outf(const at::Tensor & self, ::std::optional<int64_t> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
|
| 60 |
+
return at::_ops::fft_ifft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 64 |
+
at::Tensor & fft_ifft_outf(const at::Tensor & self, ::std::optional<int64_t> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
|
| 65 |
+
return at::_ops::fft_ifft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 70 |
+
inline at::Tensor & fft_ifft_symint_out(at::Tensor & out, const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 71 |
+
return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 75 |
+
at::Tensor & fft_ifft_out(at::Tensor & out, const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt) {
|
| 76 |
+
return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
|
| 81 |
+
inline at::Tensor & fft_ifft_symint_outf(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
|
| 82 |
+
return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor & fft_ifft_outf(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
|
| 87 |
+
return at::_ops::fft_ifft_out::call(self, n, dim, norm, out);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_native.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/fmod_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Scalar & other);
|
| 20 |
+
TORCH_API at::Tensor & fmod_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 21 |
+
TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Scalar & other);
|
| 22 |
+
struct TORCH_API structured_fmod_out : public at::meta::structured_fmod_Tensor {
|
| 23 |
+
void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
|
| 24 |
+
};
|
| 25 |
+
} // namespace native
|
| 26 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/fractional_max_pool3d_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_fractional_max_pool3d_out_cpu : public at::meta::structured_fractional_max_pool3d {
|
| 20 |
+
void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices);
|
| 21 |
+
};
|
| 22 |
+
struct TORCH_API structured_fractional_max_pool3d_out_cuda : public at::meta::structured_fractional_max_pool3d {
|
| 23 |
+
void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices);
|
| 24 |
+
};
|
| 25 |
+
} // namespace native
|
| 26 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/gt_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other);
|
| 21 |
+
TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
|
| 22 |
+
TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other);
|
| 24 |
+
TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other);
|
| 25 |
+
TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 26 |
+
TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 27 |
+
TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other);
|
| 28 |
+
|
| 29 |
+
} // namespace cuda
|
| 30 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/hamming_window_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, at::TensorOptions options={});
|
| 21 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 22 |
+
TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length);
|
| 23 |
+
TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options={});
|
| 25 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 26 |
+
TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic);
|
| 27 |
+
TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, at::Tensor & out);
|
| 28 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options={});
|
| 29 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 30 |
+
TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha);
|
| 31 |
+
TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, at::Tensor & out);
|
| 32 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={});
|
| 33 |
+
TORCH_API at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 34 |
+
TORCH_API at::Tensor & hamming_window_out(at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta);
|
| 35 |
+
TORCH_API at::Tensor & hamming_window_outf(int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out);
|
| 36 |
+
|
| 37 |
+
} // namespace compositeexplicitautograd
|
| 38 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_meta.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_index_copy : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
template <bool DIM = false>
|
| 23 |
+
struct TORCH_API precompute_out {
|
| 24 |
+
|
| 25 |
+
precompute_out<true> set_dim(int64_t value) {
|
| 26 |
+
static_assert(DIM == false, "dim already set");
|
| 27 |
+
precompute_out<true> ret;
|
| 28 |
+
ret.dim = value;
|
| 29 |
+
return ret;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
int64_t dim;
|
| 33 |
+
};
|
| 34 |
+
using meta_return_ty = precompute_out <true>;
|
| 35 |
+
meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source);
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
} // namespace native
|
| 39 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cross_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_linalg_cross : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, const at::Tensor & other, int64_t dim);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_out(at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> linalg_lu_factor_ex_outf(const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_norm_ops.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API linalg_norm {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const ::std::optional<at::Scalar> &, at::OptionalIntArrayRef, bool, ::std::optional<at::ScalarType>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_norm")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API linalg_norm_ord_str {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional<at::ScalarType>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_norm")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ord_str")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API linalg_norm_out {
|
| 40 |
+
using schema = at::Tensor & (const at::Tensor &, const ::std::optional<at::Scalar> &, at::OptionalIntArrayRef, bool, ::std::optional<at::ScalarType>, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_norm")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API linalg_norm_ord_str_out {
|
| 51 |
+
using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::OptionalIntArrayRef, bool, ::std::optional<at::ScalarType>, at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_norm")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ord_str_out")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
|
| 57 |
+
static at::Tensor & call(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
|
| 58 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/logical_and_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/logspace_ops.h
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API logspace {
|
| 18 |
+
using schema = at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API logspace_Tensor_Tensor {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API logspace_Tensor_Scalar {
|
| 40 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, double, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 46 |
+
static at::Tensor call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 47 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API logspace_Scalar_Tensor {
|
| 51 |
+
using schema = at::Tensor (const at::Scalar &, const at::Tensor &, int64_t, double, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
|
| 57 |
+
static at::Tensor call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API logspace_out {
|
| 62 |
+
using schema = at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, double, at::Tensor &);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)")
|
| 68 |
+
static at::Tensor & call(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out);
|
| 69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
struct TORCH_API logspace_Tensor_Tensor_out {
|
| 73 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &);
|
| 74 |
+
using ptr_schema = schema*;
|
| 75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out")
|
| 78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)")
|
| 79 |
+
static at::Tensor & call(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out);
|
| 80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out);
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
struct TORCH_API logspace_Tensor_Scalar_out {
|
| 84 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, double, at::Tensor &);
|
| 85 |
+
using ptr_schema = schema*;
|
| 86 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 87 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 88 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out")
|
| 89 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)")
|
| 90 |
+
static at::Tensor & call(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out);
|
| 91 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out);
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
struct TORCH_API logspace_Scalar_Tensor_out {
|
| 95 |
+
using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, int64_t, double, at::Tensor &);
|
| 96 |
+
using ptr_schema = schema*;
|
| 97 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 98 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logspace")
|
| 99 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out")
|
| 100 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)")
|
| 101 |
+
static at::Tensor & call(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out);
|
| 102 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, at::Tensor & out);
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
}} // namespace at::_ops
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_backward_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes);
|
| 21 |
+
TORCH_API at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_H.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/matrix_H_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/moveaxis_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API moveaxis_intlist {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "intlist")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API moveaxis_int {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, int64_t, int64_t);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, int64_t source, int64_t destination);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|