ZTWHHH commited on
Commit
4a6f0a5
·
verified ·
1 Parent(s): cc5fe26

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h +29 -0
  2. videochat2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h +29 -0
  3. videochat2/lib/python3.10/site-packages/torch/include/ATen/Config.h +21 -0
  4. videochat2/lib/python3.10/site-packages/torch/include/ATen/Dimname.h +1 -0
  5. videochat2/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h +166 -0
  6. videochat2/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h +46 -0
  7. videochat2/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h +31 -0
  8. videochat2/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h +54 -0
  9. videochat2/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h +190 -0
  10. videochat2/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h +0 -0
  11. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h +176 -0
  12. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h +260 -0
  13. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h +229 -0
  14. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
  15. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
  16. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +395 -0
  17. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +600 -0
  18. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
  19. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h +90 -0
  20. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h +28 -0
  21. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
  22. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
  23. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h +793 -0
  24. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
  25. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +313 -0
  26. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
  27. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h +36 -0
  28. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h +21 -0
  29. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h +101 -0
  30. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h +355 -0
  31. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +83 -0
  32. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +160 -0
  33. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +199 -0
  34. videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
  35. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
  36. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +30 -0
  37. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
  38. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +358 -0
  39. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +549 -0
  40. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h +43 -0
  41. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
  42. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h +452 -0
  43. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h +8 -0
  44. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h +330 -0
  45. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h +1182 -0
  46. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h +432 -0
  47. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h +469 -0
  48. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_convert.h +308 -0
  49. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h +447 -0
  50. videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h +656 -0
videochat2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable std::optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CUDAFunctions_inl.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/TensorBody.h>
2
+
3
+ // TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
4
+ // Code introduced to avoid cyclic dependency in static dispatch is no longer
5
+ // needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
6
+ // to Operators.cpp for supporting multiple backends with multiple kernels.
7
+ //
8
+ // Note [Avoiding Include Cycles In Static Dispatch]
9
+ // In order to avoid #include cycles in the static dispatch build, we've carefully split out
10
+ // the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
11
+ //
12
+ // Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
13
+ // - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
14
+ // all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
15
+ // directly inlined into TensorBody.h.
16
+ // - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
17
+ // which include functions that have defaultable std::optional<Tensor> arguments.
18
+ // That requires knowing the full Tensor class definition.
19
+ //
20
+ // We break the cycle by doing the following:
21
+ // - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
22
+ // - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
23
+ // - CPUFunctions_inl.h includes everything else
24
+ // - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
25
+ // and then it includes CPUFunctions_inl.h.
26
+ // - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
27
+ // - This also means that static dispatch build, CPUFunctions.h only needs to
28
+ // #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
29
+ #include <ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/Config.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's
4
+ // obvious if you forgot to include Config.h
5
+ // c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
6
+ //
7
+ // DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h
8
+
9
+ #define AT_MKLDNN_ENABLED() 1
10
+ #define AT_MKLDNN_ACL_ENABLED() 0
11
+ #define AT_MKL_ENABLED() 1
12
+ #define AT_MKL_SEQUENTIAL() 0
13
+ #define AT_POCKETFFT_ENABLED() 0
14
+ #define AT_NNPACK_ENABLED() 1
15
+ #define CAFFE2_STATIC_LINK_CUDA() 0
16
+ #define AT_BUILD_WITH_BLAS() 1
17
+ #define AT_BUILD_WITH_LAPACK() 1
18
+ #define AT_PARALLEL_OPENMP 1
19
+ #define AT_PARALLEL_NATIVE 0
20
+ #define AT_BLAS_F2C() 0
21
+ #define AT_BLAS_USE_CBLAS_DOT() 0
videochat2/lib/python3.10/site-packages/torch/include/ATen/Dimname.h ADDED
@@ -0,0 +1 @@
 
 
1
+ #include <ATen/core/Dimname.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorBase.h>
3
+
4
+ namespace at::detail {
5
+
6
+ inline void check_size_nonnegative(ArrayRef<int64_t> size) {
7
+ for (const auto& x : size) {
8
+ TORCH_CHECK(
9
+ x >= 0,
10
+ "Trying to create tensor with negative dimension ",
11
+ x,
12
+ ": ",
13
+ size);
14
+ }
15
+ }
16
+
17
+ inline void check_size_nonnegative(ArrayRef<c10::SymInt> size) {
18
+ for (const auto& x : size) {
19
+ TORCH_CHECK(
20
+ x.expect_size(__FILE__, __LINE__),
21
+ "Trying to create tensor with negative dimension ",
22
+ x,
23
+ ": ",
24
+ size);
25
+ }
26
+ }
27
+
28
+ TORCH_API size_t computeStorageNbytesContiguous(
29
+ IntArrayRef sizes,
30
+ size_t itemsize,
31
+ size_t storage_offset = 0);
32
+ TORCH_API SymInt computeStorageNbytesContiguous(
33
+ SymIntArrayRef sizes,
34
+ const SymInt& itemsize,
35
+ const SymInt& storage_offset = 0);
36
+ TORCH_API size_t computeStorageNbytes(
37
+ IntArrayRef sizes,
38
+ IntArrayRef strides,
39
+ size_t itemsize,
40
+ size_t storage_offset = 0);
41
+ TORCH_API SymInt computeStorageNbytes(
42
+ SymIntArrayRef sizes,
43
+ SymIntArrayRef strides,
44
+ const SymInt& itemsize,
45
+ const SymInt& storage_offset = 0);
46
+
47
+ TORCH_API TensorBase empty_generic(
48
+ IntArrayRef size,
49
+ c10::Allocator* allocator,
50
+ c10::DispatchKeySet ks,
51
+ ScalarType scalar_type,
52
+ std::optional<c10::MemoryFormat> memory_format_opt);
53
+
54
+ TORCH_API TensorBase empty_generic_symint(
55
+ SymIntArrayRef size,
56
+ c10::Allocator* allocator,
57
+ c10::DispatchKeySet ks,
58
+ ScalarType scalar_type,
59
+ std::optional<c10::MemoryFormat> memory_format_opt);
60
+
61
+ TORCH_API TensorBase empty_strided_generic(
62
+ IntArrayRef size,
63
+ IntArrayRef stride,
64
+ c10::Allocator* allocator,
65
+ c10::DispatchKeySet ks,
66
+ ScalarType scalar_type);
67
+
68
+ TORCH_API TensorBase empty_strided_symint_generic(
69
+ SymIntArrayRef size,
70
+ SymIntArrayRef stride,
71
+ c10::Allocator* allocator,
72
+ c10::DispatchKeySet ks,
73
+ ScalarType scalar_type);
74
+
75
+ TORCH_API TensorBase empty_cpu(
76
+ IntArrayRef size,
77
+ ScalarType dtype,
78
+ bool pin_memory = false,
79
+ std::optional<c10::MemoryFormat> memory_format_opt = std::nullopt);
80
+
81
+ TORCH_API TensorBase empty_cpu(
82
+ IntArrayRef size,
83
+ std::optional<ScalarType> dtype_opt,
84
+ std::optional<Layout> layout_opt,
85
+ std::optional<Device> device_opt,
86
+ std::optional<bool> pin_memory_opt,
87
+ std::optional<c10::MemoryFormat> memory_format_opt);
88
+
89
+ TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options);
90
+
91
+ TORCH_API TensorBase empty_strided_cpu(
92
+ IntArrayRef size,
93
+ IntArrayRef stride,
94
+ ScalarType dtype,
95
+ bool pin_memory = false);
96
+
97
+ TORCH_API TensorBase empty_strided_cpu(
98
+ IntArrayRef size,
99
+ IntArrayRef stride,
100
+ std::optional<ScalarType> dtype_opt,
101
+ std::optional<Layout> layout_opt,
102
+ std::optional<Device> device_opt,
103
+ std::optional<bool> pin_memory_opt);
104
+
105
+ TORCH_API TensorBase empty_strided_cpu(
106
+ IntArrayRef size,
107
+ IntArrayRef stride,
108
+ const TensorOptions& options);
109
+
110
+ TORCH_API TensorBase empty_meta(
111
+ IntArrayRef size,
112
+ ScalarType dtype,
113
+ std::optional<c10::MemoryFormat> memory_format_opt = std::nullopt);
114
+
115
+ TORCH_API TensorBase empty_meta(
116
+ IntArrayRef size,
117
+ std::optional<ScalarType> dtype_opt,
118
+ std::optional<Layout> layout_opt,
119
+ std::optional<Device> device_opt,
120
+ std::optional<bool> pin_memory_opt,
121
+ std::optional<c10::MemoryFormat> memory_format_opt);
122
+
123
+ TORCH_API TensorBase empty_symint_meta(
124
+ SymIntArrayRef size,
125
+ std::optional<ScalarType> dtype_opt,
126
+ std::optional<Layout> layout_opt,
127
+ std::optional<Device> device_opt,
128
+ std::optional<bool> pin_memory_opt,
129
+ std::optional<c10::MemoryFormat> memory_format_opt);
130
+
131
+ TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options);
132
+
133
+ TORCH_API TensorBase
134
+ empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype);
135
+
136
+ TORCH_API TensorBase empty_strided_meta(
137
+ IntArrayRef size,
138
+ IntArrayRef stride,
139
+ std::optional<ScalarType> dtype_opt,
140
+ std::optional<Layout> layout_opt,
141
+ std::optional<Device> device_opt,
142
+ std::optional<bool> pin_memory_opt);
143
+
144
+ TORCH_API TensorBase empty_strided_meta(
145
+ IntArrayRef size,
146
+ IntArrayRef stride,
147
+ const TensorOptions& options);
148
+
149
+ TORCH_API TensorBase empty_strided_symint_meta(
150
+ SymIntArrayRef size,
151
+ SymIntArrayRef stride,
152
+ ScalarType dtype);
153
+
154
+ TORCH_API TensorBase empty_strided_symint_meta(
155
+ SymIntArrayRef size,
156
+ SymIntArrayRef stride,
157
+ std::optional<ScalarType> dtype_opt,
158
+ std::optional<Layout> layout_opt,
159
+ std::optional<Device> device_opt);
160
+
161
+ TORCH_API TensorBase empty_strided_symint_meta(
162
+ SymIntArrayRef size,
163
+ SymIntArrayRef stride,
164
+ const TensorOptions& options);
165
+
166
+ } // namespace at::detail
videochat2/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+ #include <memory>
5
+
6
+ namespace at::functorch {
7
+
8
+ // NOTE [functorch TLS in pytorch/pytorch]
9
+ //
10
+ // functorch lives out-of-tree. However, it has some TLS that needs to be
11
+ // propagated. The solution for that is we store a pointer to the TLS
12
+ // inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to
13
+ // include whatever functorch needs.
14
+ //
15
+ // We need to store a pointer due to the indirection:
16
+ // inside functorch, we will create a subclass of FunctorchTLSBase called
17
+ // FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack.
18
+ // FuncTorchTLSBase doesn't have any metadata because it hasn't been defined
19
+ // yet.
20
+ //
21
+ // Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside
22
+ // functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*.
23
+ // We can't directly pass around FunctorchTLSBase (without a pointer) because
24
+ // FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having
25
+ // more elements.
26
+ struct TORCH_API FuncTorchTLSBase {
27
+ virtual ~FuncTorchTLSBase() = default;
28
+ virtual std::unique_ptr<FuncTorchTLSBase> deepcopy() const = 0;
29
+
30
+ virtual int64_t checkSupportsSingleLevelAutogradFunction() const = 0;
31
+ virtual void checkSupportsCppAutogradFunction() const = 0;
32
+ virtual void checkSupportsInplaceRequiresGrad() const = 0;
33
+ virtual void checkSupportsRetainGrad() const = 0;
34
+ };
35
+
36
+ // returns deepcopy of the functorch tls
37
+ TORCH_API std::unique_ptr<FuncTorchTLSBase> getCopyOfFuncTorchTLS();
38
+
39
+ // sets the functorch tls. always does a deep copy.
40
+ TORCH_API void setFuncTorchTLS(
41
+ const std::shared_ptr<const FuncTorchTLSBase>& state);
42
+
43
+ // get a mutable reference to the functorch tls
44
+ TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor();
45
+
46
+ } // namespace at::functorch
videochat2/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ #include <ostream>
6
+ #include <string>
7
+
8
+ namespace at {
9
+
10
+ enum class LinalgBackend : int8_t { Default, Cusolver, Magma };
11
+
12
+ inline std::string LinalgBackendToString(at::LinalgBackend backend) {
13
+ switch (backend) {
14
+ case LinalgBackend::Default:
15
+ return "at::LinalgBackend::Default";
16
+ case LinalgBackend::Cusolver:
17
+ return "at::LinalgBackend::Cusolver";
18
+ case LinalgBackend::Magma:
19
+ return "at::LinalgBackend::Magma";
20
+ default:
21
+ TORCH_CHECK(false, "Unknown linalg backend");
22
+ }
23
+ }
24
+
25
+ inline std::ostream& operator<<(
26
+ std::ostream& stream,
27
+ at::LinalgBackend backend) {
28
+ return stream << LinalgBackendToString(backend);
29
+ }
30
+
31
+ } // namespace at
videochat2/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <atomic>
5
+ #include <cstddef>
6
+ #include <exception>
7
+
8
+ #ifdef _OPENMP
9
+ #define INTRA_OP_PARALLEL
10
+
11
+ #include <omp.h>
12
+ #endif
13
+
14
+ #ifdef _OPENMP
15
+ namespace at::internal {
16
+ template <typename F>
17
+ inline void invoke_parallel(
18
+ int64_t begin,
19
+ int64_t end,
20
+ int64_t grain_size,
21
+ const F& f) {
22
+ std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
23
+ std::exception_ptr eptr;
24
+
25
+ #pragma omp parallel
26
+ {
27
+ // choose number of tasks based on grain size and number of threads
28
+ // can't use num_threads clause due to bugs in GOMP's thread pool (See
29
+ // #32008)
30
+ int64_t num_threads = omp_get_num_threads();
31
+ if (grain_size > 0) {
32
+ num_threads = std::min(num_threads, divup((end - begin), grain_size));
33
+ }
34
+
35
+ int64_t tid = omp_get_thread_num();
36
+ int64_t chunk_size = divup((end - begin), num_threads);
37
+ int64_t begin_tid = begin + tid * chunk_size;
38
+ if (begin_tid < end) {
39
+ try {
40
+ internal::ThreadIdGuard tid_guard(tid);
41
+ f(begin_tid, std::min(end, chunk_size + begin_tid));
42
+ } catch (...) {
43
+ if (!err_flag.test_and_set()) {
44
+ eptr = std::current_exception();
45
+ }
46
+ }
47
+ }
48
+ }
49
+ if (eptr) {
50
+ std::rethrow_exception(eptr);
51
+ }
52
+ }
53
+ } // namespace at::internal
54
+ #endif // _OPENMP
videochat2/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/DimVector.h>
4
+ #include <ATen/EmptyTensor.h>
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/TensorGeometry.h>
7
+ #include <ATen/Utils.h>
8
+
9
+ #include <utility>
10
+
11
+ // These functions are NOT in Utils.h, because this file has a dep on Tensor.h
12
+
13
+ #define TORCH_CHECK_TENSOR_ALL(cond, ...) \
14
+ TORCH_CHECK((cond)._is_all_true().item<bool>(), __VA_ARGS__);
15
+
16
+ namespace at {
17
+
18
+ // The following are utility functions for checking that arguments
19
+ // make sense. These are particularly useful for native functions,
20
+ // which do NO argument checking by default.
21
+
22
+ struct TORCH_API TensorArg {
23
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
24
+ const Tensor& tensor;
25
+ const char* name;
26
+ int pos; // 1-indexed
27
+ TensorArg(const Tensor& tensor, const char* name, int pos)
28
+ : tensor(tensor), name(name), pos(pos) {}
29
+ // Try to mitigate any possibility of dangling reference to temporaries.
30
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
31
+ TensorArg(Tensor&& tensor, const char* name, int pos) = delete;
32
+ const Tensor* operator->() const {
33
+ return &tensor;
34
+ }
35
+ const Tensor& operator*() const {
36
+ return tensor;
37
+ }
38
+ };
39
+
40
+ struct TORCH_API TensorGeometryArg {
41
+ TensorGeometry tensor;
42
+ const char* name;
43
+ int pos; // 1-indexed
44
+ /* implicit */ TensorGeometryArg(TensorArg arg)
45
+ : tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {}
46
+ TensorGeometryArg(TensorGeometry tensor, const char* name, int pos)
47
+ : tensor(std::move(tensor)), name(name), pos(pos) {}
48
+ const TensorGeometry* operator->() const {
49
+ return &tensor;
50
+ }
51
+ const TensorGeometry& operator*() const {
52
+ return tensor;
53
+ }
54
+ };
55
+
56
+ // A string describing which function did checks on its input
57
+ // arguments.
58
+ // TODO: Consider generalizing this into a call stack.
59
+ using CheckedFrom = const char*;
60
+
61
+ // The undefined convention: singular operators assume their arguments
62
+ // are defined, but functions which take multiple tensors will
63
+ // implicitly filter out undefined tensors (to make it easier to perform
64
+ // tests which should apply if the tensor is defined, and should not
65
+ // otherwise.)
66
+ //
67
+ // NB: This means that the n-ary operators take lists of TensorArg,
68
+ // not TensorGeometryArg, because the Tensor to TensorGeometry
69
+ // conversion will blow up if you have undefined tensors.
70
+
71
+ TORCH_API std::ostream& operator<<(
72
+ std::ostream& out,
73
+ const TensorGeometryArg& t);
74
+ TORCH_API void checkDim(
75
+ CheckedFrom c,
76
+ const Tensor& tensor,
77
+ const char* name,
78
+ int pos, // 1-indexed
79
+ int64_t dim);
80
+ TORCH_API void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim);
81
+ // NB: this is an inclusive-exclusive range
82
+ TORCH_API void checkDimRange(
83
+ CheckedFrom c,
84
+ const TensorGeometryArg& t,
85
+ int64_t dim_start,
86
+ int64_t dim_end);
87
+ TORCH_API void checkSameDim(
88
+ CheckedFrom c,
89
+ const TensorGeometryArg& t1,
90
+ const TensorGeometryArg& t2);
91
+ TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t);
92
+ TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
93
+ TORCH_API void checkSize(
94
+ CheckedFrom c,
95
+ const TensorGeometryArg& t,
96
+ IntArrayRef sizes);
97
+ TORCH_API void checkSize_symint(
98
+ CheckedFrom c,
99
+ const TensorGeometryArg& t,
100
+ c10::SymIntArrayRef sizes);
101
+ TORCH_API void checkSize(
102
+ CheckedFrom c,
103
+ const TensorGeometryArg& t,
104
+ int64_t dim,
105
+ int64_t size);
106
+ TORCH_API void checkSize_symint(
107
+ CheckedFrom c,
108
+ const TensorGeometryArg& t,
109
+ int64_t dim,
110
+ const c10::SymInt& size);
111
+ TORCH_API void checkNumel(
112
+ CheckedFrom c,
113
+ const TensorGeometryArg& t,
114
+ int64_t numel);
115
+ TORCH_API void checkSameNumel(
116
+ CheckedFrom c,
117
+ const TensorArg& t1,
118
+ const TensorArg& t2);
119
+ TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors);
120
+ TORCH_API void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType s);
121
+ TORCH_API void checkScalarTypes(
122
+ CheckedFrom c,
123
+ const TensorArg& t,
124
+ at::ArrayRef<ScalarType> l);
125
+ TORCH_API void checkSameGPU(
126
+ CheckedFrom c,
127
+ const TensorArg& t1,
128
+ const TensorArg& t2);
129
+ TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors);
130
+ TORCH_API void checkSameType(
131
+ CheckedFrom c,
132
+ const TensorArg& t1,
133
+ const TensorArg& t2);
134
+ TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors);
135
+ TORCH_API void checkSameSize(
136
+ CheckedFrom c,
137
+ const TensorArg& t1,
138
+ const TensorArg& t2);
139
+ TORCH_API void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors);
140
+ TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t);
141
+ TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t);
142
+
143
+ // FixMe: does TensorArg slow things down?
144
+ TORCH_API void checkBackend(
145
+ CheckedFrom c,
146
+ at::ArrayRef<Tensor> t,
147
+ at::Backend backend);
148
+
149
+ TORCH_API void checkDeviceType(
150
+ CheckedFrom c,
151
+ at::ArrayRef<Tensor> tensors,
152
+ at::DeviceType device_type);
153
+
154
+ TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout);
155
+
156
+ TORCH_API void checkLayout(
157
+ CheckedFrom c,
158
+ at::ArrayRef<Tensor> tensors,
159
+ at::Layout layout);
160
+
161
+ // Methods for getting data_ptr if tensor is defined
162
+ TORCH_API void* maybe_data_ptr(const Tensor& tensor);
163
+ TORCH_API void* maybe_data_ptr(const TensorArg& tensor);
164
+
165
+ TORCH_API void check_dim_size(
166
+ const Tensor& tensor,
167
+ int64_t dim,
168
+ int64_t dim_size,
169
+ int64_t size);
170
+
171
+ namespace detail {
172
+ TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes);
173
+
174
+ TORCH_API std::optional<std::vector<int64_t>> computeStride(
175
+ IntArrayRef oldshape,
176
+ IntArrayRef oldstride,
177
+ IntArrayRef newshape);
178
+
179
+ TORCH_API std::optional<SymDimVector> computeStride(
180
+ c10::SymIntArrayRef oldshape,
181
+ c10::SymIntArrayRef oldstride,
182
+ c10::SymIntArrayRef newshape);
183
+
184
+ TORCH_API std::optional<DimVector> computeStride(
185
+ IntArrayRef oldshape,
186
+ IntArrayRef oldstride,
187
+ const DimVector& newshape);
188
+
189
+ } // namespace detail
190
+ } // namespace at
videochat2/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace c10 {
8
+
9
+ struct IValue;
10
+ using Stack = std::vector<IValue>;
11
+
12
+ class OperatorHandle;
13
+ class KernelFunction;
14
+
15
+ // This kernel implements the behavior of falling through to the next available
16
+ // registered dispatch key. The implementation of this function is FAST; it is
17
+ // no overhead to fallthrough to the next key. See cpp file for some more
18
+ // implementation notes; notably, this does NOT actually go through the
19
+ // boxing/unboxing codepath.
20
+ TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
21
+
22
+ // Note [Ambiguity in AutogradOther kernel]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // This error-reporting kernel is registered to the AutogradOther entry in the
25
+ // dispatch table when there is both a CompositeImplicitAutograd kernel and a
26
+ // backend kernel for ANY backend that maps to AutogradOther. To see why
27
+ // this is necessary in the AutogradOther case, it's helpful to first see
28
+ // why everything works out fine for a backend that has a reserved Autograd
29
+ // entry (see rule 2.2 in [Note] DispatchTable computation):
30
+ //
31
+ // CPU AutogradCPU
32
+ // reg? registers with...
33
+ // -------------------------------------------------
34
+ // y Autograd registration takes precedence
35
+ // over CompositeImplicitAutograd.
36
+ // This is good, because the CPU specific backend
37
+ // implementation is more specialized and typically better;
38
+ // if we used the composite, we would bypass it.
39
+ // (NB: the Autograd key is guaranteed to exist because
40
+ // the autograd codegen requires it!)
41
+ //
42
+ // n CompositeImplicitAutograd takes precedence.
43
+ // This is also good, because the Autograd
44
+ // registration (if it exists) would try to redispatch
45
+ // to the (non-existent) CPU implementation; by
46
+ // using the composite, we ensure the operator
47
+ // actually works.
48
+ //
49
+ // As you can see, when we have a specific Autograd key (AutogradCPU), we can
50
+ // decide whether or not to use the CompositeImplicitAutograd kernel or the
51
+ // Autograd kernel based on whether or not the backend kernel exists.
52
+ //
53
+ // However, for AutogradOther (which is the catchall autograd kernel for
54
+ // everything that doesn't have a specific Autograd key), we can't do this
55
+ // trick because there isn't any unique backend to peek at to disambiguate;
56
+ // if there are some backends that have implementations they prefer Autograd,
57
+ // but unimplemented backends would prefer CompositeImplicitAutograd. Rather
58
+ // than arbitrarily pick one or the other, we just register a kernel that raises
59
+ // an error and let the user decide how to proceed.
60
+ TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
61
+
62
+ // Note [named_not_supported_kernel]
63
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64
+ // This kernel implements reporting an error message saying that named tensor is
65
+ // not supported. This kernel doesn't rely on the Stack, and so it is special
66
+ // cased in the dispatcher to be triggered before we attempt boxing (so we can
67
+ // give a good error message in cases when boxing is not supported). When
68
+ // boxing is universally supported this can be removed.
69
+ [[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
70
+
71
+ /**
72
+ * BoxedKernel is similar to a std::function storing a boxed kernel.
73
+ */
74
+ class TORCH_API BoxedKernel final {
75
+ public:
76
+ // This is how boxed kernels are actually stored
77
+ //
78
+ // Note [Plumbing Keys Through The Dispatcher]
79
+ // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS)
80
+ // upon every dispatch call into order to compute which kernel to dispatch to.
81
+ //
82
+ // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores
83
+ // to have a first argument of type DispatchKeySet.
84
+ //
85
+ // What are the invariants of the DispatchKeySet when it gets passed to a kernel?
86
+ // - All keys to the left of the current dispatch key have been masked out.
87
+ // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer)
88
+ // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments
89
+ // are still in the set.
90
+ //
91
+ // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches:
92
+ // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will
93
+ // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher
94
+ // upon redispatching.
95
+ //
96
+ // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature
97
+ // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples.
98
+ //
99
+ // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h.
100
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
101
+ using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*);
102
+ // This is the public API for how boxed kernels are defined
103
+ using BoxedKernelFunction = void(const OperatorHandle&, Stack*);
104
+ using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*);
105
+
106
+ BoxedKernel();
107
+
108
+ // Fast path for dispatch to allow not touching the boxed kernel in
109
+ // the common case where unboxed is available.
110
+ bool isValid() const;
111
+ bool isFallthrough() const;
112
+
113
+ /**
114
+ * Call the function with boxed arguments.
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Create a KernelFunction from a boxed function.
120
+ *
121
+ * Example:
122
+ *
123
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
124
+ * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>();
125
+ */
126
+ template<BoxedKernelFunction* func>
127
+ static BoxedKernel makeFromFunction();
128
+
129
+ /**
130
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
131
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
132
+ */
133
+ template<BoxedKernelFunction_withDispatchKeys* func>
134
+ static BoxedKernel makeFromFunction();
135
+
136
+ /**
137
+ * Create a KernelFunction from a boxed functor.
138
+ *
139
+ * Example:
140
+ *
141
+ * > class MyFunctor final : public c10::OperatorKernel {
142
+ * > public:
143
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
144
+ * > };
145
+ * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>());
146
+ */
147
+ template<class KernelFunctor>
148
+ static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
149
+
150
+
151
+ static BoxedKernel makeFallthrough();
152
+ static BoxedKernel makeAmbiguousAutogradOther();
153
+ static BoxedKernel makeNamedNotSupported();
154
+
155
+ private:
156
+
157
+ friend class KernelFunction;
158
+
159
+ template<BoxedKernelFunction* func>
160
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
161
+
162
+ template<BoxedKernelFunction_withDispatchKeys* func>
163
+ static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack);
164
+
165
+ explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func);
166
+
167
+ OperatorKernel* getFunctor() const;
168
+ InternalBoxedKernelFunction* getFnPtr() const;
169
+
170
+ c10::intrusive_ptr<OperatorKernel> functor_;
171
+ InternalBoxedKernelFunction* boxed_kernel_func_;
172
+ };
173
+
174
+ } // namespace c10
175
+
176
+ #include <ATen/core/boxing/BoxedKernel_impl.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ATen_fwd.h>
4
+ #include <ATen/core/boxing/BoxedKernel.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/core/DispatchKeySet.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+ #include <c10/util/TypeList.h>
9
+ #include <type_traits>
10
+
11
+ namespace c10 {
12
+
13
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
14
+
15
+ class OperatorHandle;
16
+ struct OperatorKernel;
17
+ class KernelFunction;
18
+
19
+ template <typename T>
20
+ using has_symint =
21
+ std::disjunction<
22
+ std::is_same<c10::SymInt, T>,
23
+ std::is_same<c10::SymIntArrayRef, T>,
24
+ std::is_same<at::OptionalSymIntArrayRef, T>,
25
+ std::is_same<std::optional<c10::SymInt>, T>
26
+ >;
27
+
28
+ template <typename T>
29
+ struct remove_symint {
30
+ using type = T;
31
+ };
32
+
33
+ template <>
34
+ struct remove_symint<c10::SymInt> {
35
+ using type = int64_t;
36
+ };
37
+
38
+ template <>
39
+ struct remove_symint<at::OptionalSymIntArrayRef> {
40
+ using type = OptionalIntArrayRef;
41
+ };
42
+
43
+ template <>
44
+ struct remove_symint<c10::SymIntArrayRef> {
45
+ using type = c10::IntArrayRef;
46
+ };
47
+
48
+ template <>
49
+ struct remove_symint<std::optional<c10::SymInt>> {
50
+ using type = std::optional<int64_t>;
51
+ };
52
+
53
+
54
+ template <bool symint, typename T>
55
+ struct maybe_keep_symint final {};
56
+
57
+ template <typename T>
58
+ struct maybe_keep_symint<true, T> { using type = T; };
59
+
60
+ template <typename T>
61
+ struct maybe_keep_symint<false, T> { using type = typename remove_symint<T>::type; };
62
+
63
+ template <typename T>
64
+ using fn_has_symint = typename guts::typelist::true_for_any_type<
65
+ has_symint,
66
+ typename guts::infer_function_traits<T>::type::parameter_types
67
+ >;
68
+
69
+ template <typename T>
70
+ struct fn_remove_symint;
71
+
72
+ template <typename Ret, typename... Args>
73
+ struct fn_remove_symint<Ret(Args...)> {
74
+ using type = Ret(typename remove_symint<Args>::type...);
75
+ };
76
+
77
+ /**
78
+ * KernelFunction is similar to std::function but stores a kernel function.
79
+ * You can create a KernelFunction from a boxed or unboxed function/functor/lambda
80
+ * and call it in a boxed or unboxed way. If the way it was created doesn't
81
+ * match the way it was called, it will do boxing or unboxing as necessary.
82
+ */
83
+ class TORCH_API KernelFunction final {
84
+ public:
85
+ using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction;
86
+ using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction;
87
+ using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys;
88
+
89
+ KernelFunction();
90
+
91
+ // Fast path for dispatch to allow not touching the boxed kernel in
92
+ // the common case where unboxed is available.
93
+ bool isValidUnboxed() const;
94
+ bool isValidSymUnboxed() const;
95
+ bool isValid() const;
96
+ bool isFallthrough() const;
97
+
98
+ /**
99
+ * Call the function in a boxed way.
100
+ * If the kernel function was created with an unboxed function,
101
+ * this will call an unboxing wrapper which then calls into that
102
+ * unboxed function.
103
+ *
104
+ * Example:
105
+ *
106
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
107
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
108
+ * > Tensor result = func.callBoxed(stack);
109
+ *
110
+ * Or, with an unboxed implementation:
111
+ *
112
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
113
+ * > [] (Tensor a, bool b) -> Tensor {...});
114
+ * > Tensor result = func.callBoxed(stack);
115
+ */
116
+ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const;
117
+
118
+ /**
119
+ * Call the function in an unboxed way.
120
+ * If the kernel function was created with a boxed function,
121
+ * this will box all inputs and then call into that boxed function.
122
+ *
123
+ * Note that this doesn't work for all types yet.
124
+ *
125
+ * Example:
126
+ *
127
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
128
+ * > [] (Tensor a, bool b) -> Tensor {...});
129
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
130
+ *
131
+ * Or, with a boxed implementation:
132
+ *
133
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
134
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
135
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
136
+ */
137
+ template<class Return, class... Args>
138
+ Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const;
139
+
140
+ /**
141
+ * Create a KernelFunction from a BoxedKernel.
142
+ */
143
+ static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn);
144
+
145
+ /**
146
+ * Create a KernelFunction from a boxed function.
147
+ *
148
+ * Example:
149
+ *
150
+ * > void boxed_func(OperatorKernel*, Stack* stack) {...}
151
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>();
152
+ */
153
+ template<BoxedKernelFunction* func>
154
+ static KernelFunction makeFromBoxedFunction();
155
+
156
+ /**
157
+ * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
158
+ * See Note [Plumbing Keys Through The Dispatcher] for details.
159
+ */
160
+ template<BoxedKernelFunction_withDispatchKeys* func>
161
+ static KernelFunction makeFromBoxedFunction();
162
+
163
+ /**
164
+ * Create a KernelFunction from an unboxed functor.
165
+ *
166
+ * Example:
167
+ *
168
+ * > class MyFunctor final : public c10::OperatorKernel {
169
+ * > public:
170
+ * > Tensor operator()(Tensor a, Tensor b) {...}
171
+ * > };
172
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor<MyFunctor>(std::make_unique<MyFunctor>());
173
+ */
174
+ template<bool AllowLegacyTypes = false, class KernelFunctor>
175
+ static KernelFunction makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor);
176
+
177
+ /**
178
+ * Create a KernelFunction from a boxed functor.
179
+ *
180
+ * Example:
181
+ *
182
+ * > class MyFunctor final : public c10::OperatorKernel {
183
+ * > public:
184
+ * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...}
185
+ * > };
186
+ * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique<MyFunctor>());
187
+ */
188
+ template<class KernelFunctor>
189
+ static KernelFunction makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor);
190
+
191
+ /**
192
+ * Create a KernelFunction from an unboxed function.
193
+ * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction
194
+ * because knowing the function pointer as a template argument (i.e. at
195
+ * compile time) allows the compiler to inline the function into its
196
+ * unboxing wrapper and yields better performance when calling the function.
197
+ *
198
+ * Example:
199
+ *
200
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
201
+ * > KernelFunction func = KernelFunction::makeFromUnboxedFunction<decltype(unboxed_func), &unboxed_func>();
202
+ */
203
+ template<class FuncPtr, bool AllowLegacyTypes = false>
204
+ static KernelFunction makeFromUnboxedFunction(FuncPtr);
205
+
206
+ /**
207
+ * Create a KernelFunction from an unboxed function.
208
+ * KernelFunction::makeFromUnboxedFunction is usually a better choice than
209
+ * this if you know the function pointer at compile time, see doc comment
210
+ * there for an explanation.
211
+ *
212
+ * Example:
213
+ *
214
+ * > Tensor unboxed_func(Tensor a, Tensor b) {...}
215
+ * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func);
216
+ */
217
+ template<bool AllowLegacyTypes = false, class FuncType>
218
+ static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func);
219
+
220
+ static KernelFunction makeFallthrough();
221
+ static KernelFunction makeAmbiguousAutogradOther();
222
+ static KernelFunction makeNamedNotSupported();
223
+
224
+ /**
225
+ * Create a KernelFunction from an unboxed lambda.
226
+ *
227
+ * Example:
228
+ *
229
+ * > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
230
+ * > [] (Tensor a, bool b) -> Tensor {...});
231
+ */
232
+ template<bool AllowLegacyTypes = false, class Lambda>
233
+ static std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
234
+ template<bool AllowLegacyTypes = false, class Lambda>
235
+ static std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda);
236
+
237
+ std::string dumpState() const;
238
+ // For testing internal invariants only
239
+ bool _equalsBoxedAndUnboxed(const KernelFunction&) const;
240
+
241
+ private:
242
+
243
+ explicit KernelFunction(
244
+ std::unique_ptr<OperatorKernel> functor,
245
+ InternalBoxedKernelFunction* boxed_kernel_func,
246
+ void* unboxed_kernel_func,
247
+ void* sym_unboxed_kernel_func);
248
+ explicit KernelFunction(
249
+ BoxedKernel boxed_fn,
250
+ void* unboxed_kernel_func,
251
+ void* sym_unboxed_kernel_func);
252
+
253
+ BoxedKernel boxed_kernel_func_;
254
+ void* unboxed_kernel_func_;
255
+ void* sym_unboxed_kernel_func_;
256
+ };
257
+
258
+ }
259
+
260
+ #include <ATen/core/boxing/KernelFunction_impl.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/boxing/impl/boxing.h>
2
+ #include <ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h>
3
+ #include <ATen/core/boxing/impl/WrapFunctionIntoFunctor.h>
4
+ #include <ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h>
5
+
6
+ #include <c10/util/C++17.h>
7
+ #include <type_traits>
8
+
9
+ namespace c10 {
10
+
11
+ inline KernelFunction::KernelFunction()
12
+ : boxed_kernel_func_()
13
+ , unboxed_kernel_func_(nullptr)
14
+ , sym_unboxed_kernel_func_(nullptr)
15
+ {}
16
+
17
+ inline KernelFunction::KernelFunction(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
18
+ : boxed_kernel_func_(std::move(functor), boxed_kernel_func)
19
+ , unboxed_kernel_func_(unboxed_kernel_func)
20
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
21
+ {}
22
+
23
+ inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr)
24
+ : boxed_kernel_func_(std::move(boxed_fn))
25
+ , unboxed_kernel_func_(unboxed_kernel_func)
26
+ , sym_unboxed_kernel_func_(sym_unboxed_kernel_func)
27
+ {}
28
+
29
+ inline bool KernelFunction::isValidUnboxed() const {
30
+ return unboxed_kernel_func_ != nullptr;
31
+ }
32
+
33
+ inline bool KernelFunction::isValidSymUnboxed() const {
34
+ return sym_unboxed_kernel_func_ != nullptr;
35
+ }
36
+
37
+ inline bool KernelFunction::isValid() const {
38
+ return boxed_kernel_func_.isValid();
39
+ }
40
+
41
+ inline bool KernelFunction::isFallthrough() const {
42
+ return boxed_kernel_func_.isFallthrough();
43
+ }
44
+
45
+ inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const {
46
+ boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack);
47
+ }
48
+
49
+ template<class Return, class... Args>
50
+ inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) {
51
+ using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...);
52
+ ActualSignature* func = reinterpret_cast<ActualSignature*>(unboxed_kernel_func);
53
+ return (*func)(functor, dispatchKeySet, std::forward<Args>(args)...);
54
+ }
55
+
56
+ // This template requires you to explicitly specify the argument you want to
57
+ // forward; it doesn't work if you try to deduce it
58
+ // NB: keep this in sync with cloneWithRealTypes in function_schema.cpp
59
+
60
+ template <typename T>
61
+ inline typename remove_symint<T>::type unpackSymInt(T x) { return x; }
62
+
63
+ template <>
64
+ inline typename remove_symint<c10::SymInt>::type unpackSymInt(c10::SymInt x) {
65
+ return x.guard_int(__FILE__, __LINE__);
66
+ }
67
+
68
+ template <>
69
+ inline typename remove_symint<c10::SymIntArrayRef>::type unpackSymInt(c10::SymIntArrayRef x) {
70
+ return C10_AS_INTARRAYREF_SLOW(x);
71
+ }
72
+
73
+ template <>
74
+ inline typename remove_symint<std::optional<c10::SymInt>>::type unpackSymInt(std::optional<c10::SymInt> x) {
75
+ return x.has_value() ? std::make_optional(x->guard_int(__FILE__, __LINE__)) : std::nullopt;
76
+ }
77
+
78
+ template <>
79
+ inline typename remove_symint<at::OptionalSymIntArrayRef>::type unpackSymInt(at::OptionalSymIntArrayRef x) {
80
+ return x.has_value() ? std::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : std::nullopt;
81
+ }
82
+
83
+ template<class Return, class... Args>
84
+ C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const {
85
+ // note: Args above is intentionally not Args&&. We don't want perfect
86
+ // forwarding, which would require Args to be deduced, but instead we
87
+ // want callers to explicitly specify the Args.
88
+
89
+ if constexpr (std::disjunction_v<has_symint<Args>...>) {
90
+ if (sym_unboxed_kernel_func_ != nullptr) {
91
+ auto *functor = boxed_kernel_func_.getFunctor();
92
+ return callUnboxedKernelFunction<Return, Args...>(
93
+ sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
94
+ }
95
+
96
+ if (unboxed_kernel_func_ != nullptr) {
97
+ auto *functor = boxed_kernel_func_.getFunctor();
98
+ return callUnboxedKernelFunction<Return, typename remove_symint<Args>::type...>(
99
+ unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt<Args>(args)...);
100
+ }
101
+ } else {
102
+ if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) {
103
+ auto *functor = boxed_kernel_func_.getFunctor();
104
+ return callUnboxedKernelFunction<Return, Args...>(
105
+ unboxed_kernel_func_, functor, dispatchKeySet, std::forward<Args>(args)...);
106
+ }
107
+ }
108
+
109
+ return impl::BoxedKernelWrapper<Return(Args...)>::call(
110
+ boxed_kernel_func_,
111
+ opHandle,
112
+ dispatchKeySet,
113
+ std::forward<Args>(args)...
114
+ );
115
+ }
116
+
117
+ inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) {
118
+ return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer
119
+ }
120
+
121
+ template<KernelFunction::BoxedKernelFunction* func>
122
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
123
+ return KernelFunction::makeFromBoxedKernel(
124
+ BoxedKernel::makeFromFunction<func>());
125
+ }
126
+
127
+ template<KernelFunction::BoxedKernelFunction_withDispatchKeys* func>
128
+ inline KernelFunction KernelFunction::makeFromBoxedFunction() {
129
+ return KernelFunction::makeFromBoxedKernel(
130
+ BoxedKernel::makeFromFunction<func>());
131
+ }
132
+
133
+ inline KernelFunction KernelFunction::makeFallthrough() {
134
+ return KernelFunction::makeFromBoxedKernel(
135
+ BoxedKernel::makeFallthrough());
136
+ }
137
+
138
+ inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() {
139
+ return KernelFunction::makeFromBoxedKernel(
140
+ BoxedKernel::makeAmbiguousAutogradOther());
141
+ }
142
+
143
+ inline KernelFunction KernelFunction::makeNamedNotSupported() {
144
+ return KernelFunction::makeFromBoxedKernel(
145
+ BoxedKernel::makeNamedNotSupported());
146
+ }
147
+
148
+ template<bool AllowLegacyTypes, class KernelFunctor>
149
+ inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr<OperatorKernel> kernelFunctor) {
150
+ #ifndef NDEBUG
151
+ // This assertion is costly for build time so it's debug-gated.
152
+ static_assert(guts::is_functor<KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor> but the argument is not a functor.");
153
+ #endif
154
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
155
+
156
+ auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed<KernelFunctor>::call;
157
+ void* void_unboxed_fn = reinterpret_cast<void*>(unboxed_fn);
158
+ bool is_symint = fn_has_symint<decltype(unboxed_fn)>::value;
159
+ return KernelFunction(
160
+ std::move(kernelFunctor),
161
+ &impl::make_boxed_from_unboxed_functor<KernelFunctor, AllowLegacyTypes>::call,
162
+ is_symint ? nullptr : void_unboxed_fn,
163
+ is_symint ? void_unboxed_fn : nullptr
164
+ );
165
+ }
166
+
167
+ template<class KernelFunctor>
168
+ inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
169
+ return KernelFunction::makeFromBoxedKernel(
170
+ BoxedKernel::makeFromFunctor(std::move(kernelFunctor)));
171
+ }
172
+
173
+ template<class FuncPtr, bool AllowLegacyTypes>
174
+ inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) {
175
+ static_assert(is_compile_time_function_pointer<FuncPtr>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN.");
176
+ static_assert(!std::is_same<typename FuncPtr::FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
177
+ static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr");
178
+
179
+ #if !defined(C10_MOBILE)
180
+ (void)func_ptr; // Suppress unused variable warning
181
+ return makeFromUnboxedFunctor<AllowLegacyTypes, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>(
182
+ guts::make_unique_base<OperatorKernel, typename impl::WrapFunctionIntoFunctor<FuncPtr>::type>()
183
+ );
184
+ #else
185
+ // On mobile, we rather want to optimize for binary size than for performance,
186
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
187
+ // instead.
188
+ return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr());
189
+ #endif
190
+ }
191
+
192
+ template<bool AllowLegacyTypes, class FuncType>
193
+ inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) {
194
+ static_assert(guts::is_function_type<FuncType>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type.");
195
+ static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
196
+ TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
197
+
198
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(
199
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(func)
200
+ );
201
+ }
202
+
203
+ template<bool AllowLegacyTypes, class Lambda>
204
+ inline std::enable_if_t<guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
205
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
206
+
207
+ #if !defined(C10_MOBILE)
208
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
209
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
210
+ );
211
+ #else
212
+ // On mobile, we rather want to optimize for binary size than for performance,
213
+ // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction
214
+ // instead.
215
+ using FuncType = typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type;
216
+ return makeFromUnboxedRuntimeFunction<AllowLegacyTypes, FuncType>(lambda);
217
+ #endif
218
+ }
219
+
220
+ template<bool AllowLegacyTypes, class Lambda>
221
+ inline std::enable_if_t<!guts::is_stateless_lambda<std::decay_t<Lambda>>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) {
222
+ static_assert(guts::is_functor<std::decay_t<Lambda>>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type.");
223
+
224
+ return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(
225
+ guts::make_unique_base<OperatorKernel, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>(std::forward<Lambda>(lambda))
226
+ );
227
+ }
228
+
229
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/CompileTimeFunctionPointer.h>
4
+
5
+ namespace c10 {
6
+ namespace impl {
7
+ namespace detail {
8
+ template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
9
+ template<class FuncPtr, class ReturnType, class... Parameters>
10
+ class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
11
+ public:
12
+ C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
13
+ return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
14
+ }
15
+ };
16
+ }
17
+
18
+ // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
19
+ // Since it is a compile time function pointer, many compilers can inline it
20
+ // into the wrapper and you don't get any performance overhead for wrapping.
21
+ template<class FuncPtr>
22
+ struct WrapFunctionIntoFunctor final {
23
+ static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
24
+ using type = detail::WrapFunctionIntoFunctor_<
25
+ FuncPtr,
26
+ typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
27
+ typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
28
+ >;
29
+ };
30
+ }
31
+
32
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/TypeTraits.h>
4
+
5
+ namespace c10 {
6
+
7
+ namespace impl {
8
+ namespace detail {
9
+ template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
10
+ template<class FuncType, class ReturnType, class... Parameters>
11
+ class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
12
+ public:
13
+ template<class FuncType_>
14
+ explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
15
+ : kernel_func_(std::forward<FuncType_>(kernel_func)) {}
16
+
17
+ decltype(auto) operator()(Parameters... args) {
18
+ return kernel_func_(std::forward<Parameters>(args)...);
19
+ }
20
+
21
+ private:
22
+ FuncType kernel_func_;
23
+ };
24
+ }
25
+
26
+ // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
27
+ // inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
28
+ // This can, for example, be used for lambdas, functors or even function pointers.
29
+ // In the case of function pointers, since it is a runtime function pointer,
30
+ // there is an overhead for calling it whenever the kernel is invoked.
31
+ template<class FuncType>
32
+ using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
33
+ FuncType,
34
+ typename guts::infer_function_traits_t<FuncType>::return_type,
35
+ typename guts::infer_function_traits_t<FuncType>::parameter_types
36
+ >;
37
+ }
38
+
39
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file contains boxing (not unboxing) logic,
4
+ // i.e. how to make a vector<IValue> from a set of concrete arguments.
5
+
6
+ #include <ATen/core/ivalue.h>
7
+ #include <ATen/core/stack.h>
8
+ #include <c10/core/TensorOptions.h>
9
+
10
+ #include <ATen/core/boxing/BoxedKernel.h>
11
+
12
+ #include <c10/util/Metaprogramming.h>
13
+ #include <type_traits>
14
+
15
+ namespace c10 {
16
+ namespace impl {
17
+
18
+ //
19
+ // utils
20
+ //
21
+
22
+ // is_mutable_tensor_ref
23
+ template <class T> struct is_mutable_tensor_ref : std::false_type {};
24
+ template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
25
+
26
+ // is_tuple_of_mutable_tensor_refs
27
+ //
28
+ template <class T, class Enable = void>
29
+ struct is_tuple_of_mutable_tensor_refs : std::false_type {};
30
+
31
+ template <class T>
32
+ struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
33
+ : guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
34
+ {};
35
+
36
+ // has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
37
+ //
38
+ template <class T, class Enable = void>
39
+ struct has_ivalue_to : std::false_type {};
40
+
41
+ template <class T>
42
+ struct ivalue_to_helper
43
+ {
44
+ using type = decltype(std::declval<IValue>().template to<T>());
45
+ };
46
+ template <class T>
47
+ using ivalue_to_helper_t = typename ivalue_to_helper<T>::type;
48
+
49
+ template <class T>
50
+ struct has_ivalue_to<T, std::void_t<ivalue_to_helper_t<T>>>
51
+ : std::true_type
52
+ {};
53
+
54
+ //
55
+ // boxing predicates
56
+ //
57
+
58
+ // A boxable arg type is one that IValue has a constructor for.
59
+ template <typename T>
60
+ using can_box =
61
+ std::disjunction<
62
+ std::is_constructible<IValue, std::decay_t<T>>,
63
+ // TensorOptions are not directly constructible into IValue,
64
+ // but torch::jit::push knows how to handle them
65
+ std::is_same<TensorOptions, std::decay_t<T>>
66
+ >;
67
+
68
+ template <typename... Ts>
69
+ using can_box_all = std::conjunction<can_box<Ts>...>;
70
+
71
+ // an unboxable result is one that can be extracted from an IValue
72
+ template <typename T>
73
+ using can_unbox =
74
+ std::conjunction<
75
+ std::disjunction<
76
+ has_ivalue_to<T>,
77
+ // void returns are ok
78
+ std::is_same<void, T>
79
+ >,
80
+ std::negation<std::is_lvalue_reference<T>>
81
+ >;
82
+
83
+ //
84
+ // boxArgs - utility for pushing unboxed args onto IValue stack
85
+ //
86
+ template <class... Args>
87
+ torch::jit::Stack boxArgs(Args... args) {
88
+ // TODO Reuse stack vector instead of allocating?
89
+ torch::jit::Stack stack;
90
+ stack.reserve(sizeof...(Args));
91
+ torch::jit::push(stack, std::forward<Args>(args)...);
92
+ return stack;
93
+ }
94
+
95
+ template <class T>
96
+ inline constexpr size_t boxed_size_one() {
97
+ static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
98
+ return 1;
99
+ }
100
+
101
+ // torch::jit::push pushes 4 values for a TensorOptions; this needs to
102
+ // be kept in sync.
103
+ template <>
104
+ inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
105
+ return 4;
106
+ }
107
+
108
+ // NOTE: this could probably be simplified with C++17 fold expressions.
109
+ template <typename...>
110
+ struct BoxedSize : std::integral_constant<size_t, 0> {};
111
+ template <class T, class... Args>
112
+ struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
113
+
114
+ template <class... Args>
115
+ static inline constexpr size_t boxed_size() {
116
+ return BoxedSize<Args...>::value;
117
+ }
118
+
119
+ using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
120
+
121
+ template <typename T>
122
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
123
+ new (&dest[lastIdx]) IValue(arg);
124
+ lastIdx++;
125
+ }
126
+
127
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
128
+ new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
129
+ new (&dest[lastIdx++]) IValue(options.layout());
130
+ new (&dest[lastIdx++]) IValue(options.device());
131
+ new (&dest[lastIdx++]) IValue(options.pinned_memory());
132
+ }
133
+
134
+ inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
135
+
136
+ template<typename T, typename... Args>
137
+ C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
138
+ boxToStack(dest, arg, lastIdx);
139
+ boxArgsToStack(dest, lastIdx, args...);
140
+ }
141
+
142
+ //
143
+ // PopResult is a helper class whose specializations handle popping single and
144
+ // multiple return values, respectively.
145
+ //
146
+ template <class Result>
147
+ struct PopResult final {
148
+ static Result call(Stack& stack) {
149
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
150
+ stack.size() == 1,
151
+ "Boxed kernel was expected to return one value on the stack, ",
152
+ "but instead pushed ", stack.size(), " values."
153
+ );
154
+ return std::move(stack[0]).to<Result>();
155
+ }
156
+ };
157
+
158
+ template <class... Types>
159
+ struct PopResult<std::tuple<Types...>> final {
160
+ using Result = std::tuple<Types...>;
161
+
162
+ static Result call(Stack& stack) {
163
+ // for tuple return types, boxed kernel has pushed multiple values onto the stack
164
+ constexpr int RetCount = sizeof...(Types);
165
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
166
+ stack.size() == RetCount,
167
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
168
+ "but instead pushed ", stack.size(), " values."
169
+ );
170
+ return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
171
+ }
172
+ private:
173
+ // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
174
+ // I'm sure there's an incantation that slips it past the parser but eh
175
+ template <size_t... indices>
176
+ static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
177
+ return std::make_tuple((std::move(stack[indices]).to<Types>())...);
178
+ }
179
+ };
180
+
181
+ //
182
+ // BoxedKernelWrapper
183
+ //
184
+ // For a given function type FT, BoxedKernelWrapper<FT> implements
185
+ // a `call` method that
186
+ // - takes a boxed kernel and unboxed arguments as specified by FT,
187
+ // - calls `boxArgs` to box the arguments
188
+ // - calls the boxed kernel
189
+ // - unboxes and returns the result
190
+ //
191
+ // The partial specializations below handle various cases: in
192
+ // particular, not all types appearing in op signatures are supported,
193
+ // and ops returning references have nonstandard wrapper implementations.
194
+ //
195
+
196
+ // 1. The base specialization of BoxedKernelWrapper should never be instantiated.
197
+ // A "no call method defined on BoxedKernelWrapper" compile error means that
198
+ // an op signature has failed to trigger any of the partial specializations
199
+ // that follow this one.
200
+ //
201
+ template <class FuncType, class Enable = void>
202
+ struct BoxedKernelWrapper {
203
+ // The reason we're not just doing straight up static_assert(false, ...) here:
204
+ // Basically, the way to make sure a static_assert only fires if a template
205
+ // is actually instantiated (rather than every time the file is parsed) is to use
206
+ // template parameters in the expression, e.g. FuncType here. However, since
207
+ // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
208
+ // effect.
209
+ static_assert(sizeof(FuncType) != sizeof(FuncType),
210
+ "Function signature contains one or more unsupported parameter and/or return types. "
211
+ "Look for a nearby error like "
212
+ "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
213
+ "- (your function type) is the unsupported signature.");
214
+ };
215
+
216
+ //
217
+ // 2. Supported signatures, other than those involving non-const Tensor refs -
218
+ // i.e., "functional" ops.
219
+ //
220
+
221
+ template <class Result, class... Args>
222
+ struct BoxedKernelWrapper<
223
+ Result(Args...),
224
+ std::enable_if_t<
225
+ can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
226
+ void
227
+ >
228
+ > {
229
+ static Result call(
230
+ const BoxedKernel& boxed_kernel_func,
231
+ const OperatorHandle& opHandle,
232
+ DispatchKeySet dispatchKeySet,
233
+ Args... args
234
+ ) {
235
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
236
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
237
+
238
+ if constexpr (!std::is_same_v<void, Result>) {
239
+ // op has pushed one or more values onto the stack.
240
+ return PopResult<Result>::call(stack);
241
+ } else {
242
+ // op returns void, boxed kernel has pushed nothing onto stack.
243
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
244
+ stack.empty(),
245
+ "Boxed kernel was expected to return no values on the stack, ",
246
+ "but instead returned ", stack.size(), " values."
247
+ );
248
+ }
249
+ }
250
+ };
251
+
252
+ //
253
+ // 3. in-place ops take a single non-const Tensor reference
254
+ // as their first argument, and return it.
255
+ //
256
+ // Note: all signatures matching this pattern are assumed to be for such ops.
257
+ // Because of this, the generated BoxedKernelWrapper specializations simply
258
+ // return the in-place argument.
259
+ //
260
+
261
+ template <class... OtherArgs>
262
+ struct BoxedKernelWrapper<
263
+ at::Tensor&(at::Tensor&, OtherArgs...),
264
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
265
+ > {
266
+ static at::Tensor& call(
267
+ const BoxedKernel& boxed_kernel_func,
268
+ const OperatorHandle& opHandle,
269
+ DispatchKeySet dispatchKeySet,
270
+ at::Tensor& outArg, OtherArgs... otherArgs
271
+ ) {
272
+ torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
273
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
274
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
275
+ stack.size() == 1,
276
+ "Boxed kernel was expected to return a single value on the stack, ",
277
+ "but instead returned ", stack.size(), " values."
278
+ );
279
+
280
+ return outArg;
281
+ }
282
+ };
283
+
284
+ //
285
+ // 3.5. In-process migration to make in-place ops take and return
286
+ // const references instead.
287
+ template <class... OtherArgs>
288
+ struct BoxedKernelWrapper<
289
+ const at::Tensor&(const at::Tensor&, OtherArgs...),
290
+ std::enable_if_t<can_box_all<OtherArgs...>::value, void>
291
+ > {
292
+ static const at::Tensor& call(
293
+ const BoxedKernel& boxed_kernel_func,
294
+ const OperatorHandle& opHandle,
295
+ DispatchKeySet dispatchKeySet,
296
+ const at::Tensor& outArg, OtherArgs... otherArgs
297
+ ) {
298
+ torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
299
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
300
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
301
+ stack.size() == 1,
302
+ "Boxed kernel was expected to return a single value on the stack, ",
303
+ "but instead returned ", stack.size(), " values."
304
+ );
305
+
306
+ return outArg;
307
+ }
308
+ };
309
+
310
+ //
311
+ // 4. out of place ops that take a single non-const Tensor reference as their
312
+ // final argument, and also return it.
313
+ //
314
+ // Note: all signatures matching this pattern are assumed to be for such ops.
315
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
316
+ // return out arguments.
317
+ //
318
+ template <class FirstArg, class... RestArgs>
319
+ struct BoxedKernelWrapper<
320
+ at::Tensor&(FirstArg, RestArgs...),
321
+ std::enable_if_t<
322
+ can_box_all<FirstArg, RestArgs...>::value
323
+ // this skips over in-place kernels with a non-const Tensor
324
+ // arg at the front, so those can unambiguously trigger the preceding specialization.
325
+ && !is_mutable_tensor_ref<FirstArg>::value,
326
+ void
327
+ >
328
+ > {
329
+ static at::Tensor& call(
330
+ const BoxedKernel& boxed_kernel_func,
331
+ const OperatorHandle& opHandle,
332
+ DispatchKeySet dispatchKeySet,
333
+ FirstArg firstArg, RestArgs... restArgs
334
+ ) {
335
+ torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
336
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
337
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
338
+ stack.size() == 1,
339
+ "Boxed kernel was expected to return a single value on the stack, ",
340
+ "but instead returned ", stack.size(), " values."
341
+ );
342
+
343
+ // reusing restArgs after it has been forwarded here is ok because we know
344
+ // that the last element is of type `Tensor&`.
345
+ return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
346
+ }
347
+ };
348
+
349
+ //
350
+ // 5. out of place ops that take multiple non-const Tensor references as their
351
+ // final arguments, and return them in a std::tuple.
352
+ //
353
+ // Note: all signatures matching this pattern are assumed to be for such ops.
354
+ // This assumption permits the generated BoxedKernelWrapper specializations to simply
355
+ // return the out arguments.
356
+ //
357
+ template <class Result, class... Args>
358
+ struct BoxedKernelWrapper<
359
+ Result(Args...),
360
+ std::enable_if_t<
361
+ can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
362
+ void
363
+ >
364
+ > {
365
+ static Result call(
366
+ const BoxedKernel& boxed_kernel_func,
367
+ const OperatorHandle& opHandle,
368
+ DispatchKeySet dispatchKeySet,
369
+ Args... args
370
+ ) {
371
+ using ArgTuple = std::tuple<Args...>;
372
+ constexpr int RetCount = std::tuple_size<Result>();
373
+
374
+ torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
375
+ boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
376
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
377
+ stack.size() == RetCount,
378
+ "Boxed kernel was expected to return ", RetCount, " values on the stack, ",
379
+ "but instead returned ", stack.size(), " values."
380
+ );
381
+
382
+ // reusing args after it has been forwarded here is ok because we know
383
+ // that the last RetCount elements are of type `Tensor&`.
384
+ auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
385
+ static_assert(
386
+ std::is_same<Result, decltype(result)>::value,
387
+ "The parameter list of an op returning a tuple of Tensor references "
388
+ "must end with an equal number of Tensor reference parameters."
389
+ );
390
+ return result;
391
+ }
392
+ };
393
+
394
+ } // impl
395
+ } // c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/boxing/OperatorKernel.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/stack.h>
6
+ #include <c10/util/TypeList.h>
7
+ #include <ATen/core/IListRef.h>
8
+ #include <c10/util/intrusive_ptr.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ #include <utility>
12
+
13
+ namespace c10 {
14
+
15
+ using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace.
16
+ class OperatorHandle;
17
+
18
+ /*
19
+ * [Note: Argument forwarding in the dispatcher]
20
+ *
21
+ * The dispatcher uses a somewhat unusual way to forward arguments through several layers of
22
+ * wrapper functions. This can be confusing because an experienced C++ programmer would look at this
23
+ * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.".
24
+ * It is not a bug. The common way in C++ to forward arguments is to use universal references:
25
+ *
26
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
27
+ *
28
+ * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument.
29
+ * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a
30
+ * different reference type than was used in the kernel function. The correct reference type
31
+ * is dictated by the kernel signature and must be identical since we cast function pointers
32
+ * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines
33
+ * the reference type to use by looking at the explicitly supplied operator signature, not by looking at
34
+ * the argument we're calling it with.
35
+ *
36
+ * What does std::forward do, exactly?
37
+ * ------------------------------------
38
+ * std::forward<T>(t) is a way to cast t to the reference type supplied in T.
39
+ * Let's assume decay_t<T> == U and T is either U or some reference of U.
40
+ * - std::forward<T&>(t) will return U&, no matter what kind of reference t is.
41
+ * - std::forward<T&&>(t) will return U&&, no matter what kind of reference t is.
42
+ * - std::forward<T>(t) will return U&& (not U!), no matter what kind of reference t is.
43
+ *
44
+ * For universal references, that means that in the following function
45
+ * > template<class T> void func(T&& arg) { func2(std::forward<T>(arg)); }
46
+ *
47
+ * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be
48
+ * a non-reference U, and std::forward<T>(t) will return U&&, correctly moving the argument.
49
+ * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only
50
+ * way to match the signature (in C++, a type that is (T&)&& will collapse to T&).
51
+ * That means std::forward<T>(t) will return U& and the value will not be moved but passed on as
52
+ * a lvalue reference.
53
+ *
54
+ * How do we use that?
55
+ * ------------------------------------
56
+ * But std::forward can also be used outside of the common "universal forwarding" pattern to change
57
+ * reference types. So instead of following the common C++ pattern, we notice what
58
+ * std::forward<T>() actually does, and that is it takes a value and changes its reference to the
59
+ * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this
60
+ * to forward based on an explicitly specified reference type instead of the inferred argument type.
61
+ *
62
+ * This is why many of the dispatcher functions look like
63
+ * > template<class T> func(T t) { func2<T>(std::forward<T>(t)); }
64
+ * instead of the common
65
+ * > template<class T> func(T&& t) { func2(std::forward<T>(t)); }
66
+ *
67
+ * and are expected to be called by explicitly specifying the template parameters in a way that matches
68
+ * the expected operator signature at each call site.
69
+ */
70
+
71
+ namespace impl {
72
+ // supported_primitive_arg_types defines which primitive types we allow in
73
+ // kernel functions as arguments or returns.
74
+ // Additionally, we support lists, dicts and optionals containing these types.
75
+ using supported_primitive_arg_types = guts::typelist::typelist<
76
+ int64_t,
77
+ double,
78
+ bool,
79
+ c10::string_view,
80
+ at::Tensor,
81
+ at::Scalar,
82
+ c10::QScheme,
83
+ c10::ScalarType,
84
+ c10::Device,
85
+ c10::DeviceIndex,
86
+ c10::Layout,
87
+ c10::MemoryFormat,
88
+ at::Dimname
89
+ >;
90
+
91
+ // We have an unboxed functor in hand that takes C++ arguments, and
92
+ // we're building a boxed functor wrapper for it that takes IValues.
93
+ // So "outside" is boxed and "inside" is unboxed.
94
+ //
95
+ // So a valid input type is one that our boxed functor wrapper can
96
+ // unbox from an IValue into a C++ value.
97
+ //
98
+ // Whereas a valid output type is one that our wrapper can recieve
99
+ // as a C++ value from the unboxed functor, and box into an IValue.
100
+
101
+ //
102
+ // assert_is_valid_input_type
103
+ // checks that T can be unboxed from an IValue into a C++ value.
104
+ //
105
+
106
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
107
+ struct assert_is_valid_input_type {
108
+ assert_is_valid_input_type() {
109
+ if constexpr (guts::typelist::contains<supported_primitive_arg_types, T>::value) {
110
+ /* everything is ok, this is a primitive type */
111
+ } else {
112
+ /* otherwise this must be an instance of a valid custom class, since it can only
113
+ have been created via IValue(x), which ensures this. */
114
+ }
115
+ }
116
+ };
117
+
118
+ template<class T, bool AllowDeprecatedTypes>
119
+ struct assert_is_valid_input_type<std::optional<T>, AllowDeprecatedTypes>
120
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {};
121
+
122
+ template <bool AllowDeprecatedTypes, class... Args>
123
+ struct TypeCheckHelper;
124
+
125
+ template <bool AllowDeprecatedTypes>
126
+ struct TypeCheckHelper<AllowDeprecatedTypes> {};
127
+
128
+ template <bool AllowDeprecatedTypes, class Head, class... Rest>
129
+ struct TypeCheckHelper<AllowDeprecatedTypes, Head, Rest...>
130
+ : TypeCheckHelper<AllowDeprecatedTypes, Rest...> {
131
+ assert_is_valid_input_type<Head, AllowDeprecatedTypes> check;
132
+ };
133
+
134
+ template<class... Contained, bool AllowDeprecatedTypes>
135
+ struct assert_is_valid_input_type<std::tuple<Contained...>, AllowDeprecatedTypes>
136
+ : TypeCheckHelper<AllowDeprecatedTypes, Contained...> {};
137
+
138
+ template<class Key, class Value, bool AllowDeprecatedTypes>
139
+ struct assert_is_valid_input_type<Dict<Key, Value>, AllowDeprecatedTypes>
140
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
141
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
142
+ "You tried to register a kernel with an unsupported input type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
143
+ };
144
+
145
+ template<class Key, class Value, bool AllowDeprecatedTypes>
146
+ struct assert_is_valid_input_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
147
+ : assert_is_valid_input_type<Value, AllowDeprecatedTypes> {
148
+ static_assert(AllowDeprecatedTypes,
149
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
150
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
151
+ "You tried to register a kernel with an unsupported input type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
152
+ };
153
+
154
+ template<class T, bool AllowDeprecatedTypes>
155
+ struct assert_is_valid_input_type<List<T>, AllowDeprecatedTypes>
156
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
157
+ static_assert(!std::is_same<T, at::Scalar>::value,
158
+ "You tried to register a kernel with an unsupported input type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
159
+ };
160
+
161
+ template<class T, bool AllowDeprecatedTypes>
162
+ struct assert_is_valid_input_type<c10::ArrayRef<T>, AllowDeprecatedTypes>
163
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
164
+ static_assert(!std::is_same<T, at::Scalar>::value,
165
+ "You tried to register a kernel with an unsupported input type: ArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
166
+ };
167
+
168
+ template<class T, bool AllowDeprecatedTypes>
169
+ struct assert_is_valid_input_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
170
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
171
+ static_assert(!std::is_same<T, at::Scalar>::value,
172
+ "You tried to register a kernel with an unsupported input type: OptionalArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
173
+ };
174
+
175
+ template<class T, size_t N, bool AllowDeprecatedTypes>
176
+ struct assert_is_valid_input_type<std::array<T, N>, AllowDeprecatedTypes>
177
+ : assert_is_valid_input_type<T, AllowDeprecatedTypes> {
178
+ static_assert(!std::is_same<T, at::Scalar>::value,
179
+ "You tried to register a kernel with an unsupported input type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
180
+ };
181
+
182
+ template<class T, bool AllowDeprecatedTypes>
183
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
184
+ // There is no reason to support float when we have double. Keep the API lean.
185
+ static_assert(guts::false_t<T>::value,
186
+ "You tried to register a kernel with an unsupported input type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string.");
187
+ };
188
+ template<class T, bool AllowDeprecatedTypes>
189
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
190
+ static_assert(guts::false_t<T>::value,
191
+ "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead.");
192
+ };
193
+ template<class T, bool AllowDeprecatedTypes>
194
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
195
+ static_assert(guts::false_t<T>::value,
196
+ "You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
197
+ };
198
+ template<class T, bool AllowDeprecatedTypes>
199
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
200
+ static_assert(guts::false_t<T>::value,
201
+ "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string.");
202
+ };
203
+ template<class T, bool AllowDeprecatedTypes>
204
+ struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> {
205
+ static_assert(guts::false_t<T>::value,
206
+ "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead.");
207
+ };
208
+
209
+ // TODO: it probably would be good to tighten this up quite a bit more with
210
+ // an explicit list for everything
211
+
212
+ //
213
+ // assert_is_valid_output_type
214
+ //
215
+
216
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
217
+ struct assert_is_valid_output_type {
218
+ assert_is_valid_output_type() {
219
+ if constexpr(guts::typelist::contains<supported_primitive_arg_types, T>::value) {
220
+ /* everything is ok, this is a primitive type */
221
+ } else {
222
+ /* otherwise T is verified to be a registered custom class in the IValue
223
+ constructor, so no benefit in double-checking here */
224
+ }
225
+ }
226
+ };
227
+
228
+ template<class T, bool AllowDeprecatedTypes>
229
+ struct assert_is_valid_output_type<std::optional<T>, AllowDeprecatedTypes>
230
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
231
+
232
+ template<class T, bool AllowDeprecatedTypes>
233
+ struct assert_is_valid_output_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
234
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {};
235
+
236
+ template<class Key, class Value, bool AllowDeprecatedTypes>
237
+ struct assert_is_valid_output_type<Dict<Key, Value>, AllowDeprecatedTypes>
238
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
239
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
240
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
241
+ static_assert(!std::is_same<Value, at::Scalar>::value,
242
+ "You tried to register a kernel with an unsupported output type: Dict<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
243
+ };
244
+
245
+ template<class Key, class Value, bool AllowDeprecatedTypes>
246
+ struct assert_is_valid_output_type<std::unordered_map<Key, Value>, AllowDeprecatedTypes>
247
+ : assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
248
+ static_assert(AllowDeprecatedTypes,
249
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
250
+ static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
251
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
252
+ static_assert(!std::is_same<Value, at::Scalar>::value,
253
+ "You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
254
+ };
255
+
256
+ template<class T, bool AllowDeprecatedTypes>
257
+ struct assert_is_valid_output_type<List<T>, AllowDeprecatedTypes>
258
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
259
+ static_assert(!std::is_same<T, at::Scalar>::value,
260
+ "You tried to register a kernel with an unsupported output type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
261
+ };
262
+
263
+ template<class T, bool AllowDeprecatedTypes>
264
+ struct assert_is_valid_output_type<std::vector<T>, AllowDeprecatedTypes>
265
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
266
+ static_assert(!std::is_same<T, at::Scalar>::value,
267
+ "You tried to register a kernel with an unsupported output type: std::vector<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
268
+ // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector<T>. Please use List<T> instead.");
269
+ };
270
+
271
+ template<class T, size_t N, bool AllowDeprecatedTypes>
272
+ struct assert_is_valid_output_type<std::array<T, N>, AllowDeprecatedTypes>
273
+ : assert_is_valid_output_type<T, AllowDeprecatedTypes> {
274
+ static_assert(!std::is_same<T, at::Scalar>::value,
275
+ "You tried to register a kernel with an unsupported output type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
276
+ };
277
+
278
+ // The following specialisations of assert_is_valid_output_type are technically not
279
+ // necessary since we would hit the base case and show an error message
280
+ // there if they didn't exist, but we can show a better error message
281
+ // in some common error scenarios.
282
+ template<class T, bool AllowDeprecatedTypes>
283
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
284
+ // There is no reason to support float when we have double. Keep the API lean.
285
+ static_assert(guts::false_t<T>::value,
286
+ "You tried to register a kernel with an unsupported output type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string.");
287
+ };
288
+ template<class T, bool AllowDeprecatedTypes>
289
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
290
+ static_assert(guts::false_t<T>::value,
291
+ "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead.");
292
+ };
293
+ template<class T, bool AllowDeprecatedTypes>
294
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<std::vector<bool>, T>::value>> {
295
+ static_assert(guts::false_t<T>::value,
296
+ "You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
297
+ };
298
+ template<class T, bool AllowDeprecatedTypes>
299
+ struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
300
+ static_assert(guts::false_t<T>::value,
301
+ "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string.");
302
+ };
303
+
304
+ // ivalue_to_arg
305
+
306
+ template<class T>
307
+ struct decay_if_not_tensor final {
308
+ using type = std::decay_t<T>;
309
+ };
310
+
311
+ template<>
312
+ struct decay_if_not_tensor<at::Tensor&> final {
313
+ using type = at::Tensor&;
314
+ };
315
+
316
+ template<>
317
+ struct decay_if_not_tensor<const at::Tensor&> final {
318
+ using type = const at::Tensor&;
319
+ };
320
+
321
+ template<class T, bool AllowDeprecatedTypes>
322
+ struct ivalue_to_arg final {
323
+ static decltype(auto) call(IValue& v) {
324
+ assert_is_valid_input_type<T, AllowDeprecatedTypes>();
325
+ return std::move(v).to<T>();
326
+ }
327
+ };
328
+
329
+ // The following two specializations take advantage of specialized
330
+ // `toTensor()` overloads on IValue to avoid copying.
331
+ template<bool AllowDeprecatedTypes>
332
+ struct ivalue_to_arg<at::Tensor&, AllowDeprecatedTypes> final {
333
+ // We cannot use the default implementation if they asked for a
334
+ // `at::Tensor&` because it moves from the IValue, so it can't get
335
+ // an lvalue reference.
336
+ static at::Tensor& call(IValue& v) {
337
+ // Tensor& is valid, don't bother asserting
338
+ return v.toTensor();
339
+ }
340
+ };
341
+
342
+ template<bool AllowDeprecatedTypes>
343
+ struct ivalue_to_arg<const at::Tensor&, AllowDeprecatedTypes> final {
344
+ // We should not use the default implementation if they asked for
345
+ // a `const at::Tensor&` because it moves from the IValue and they
346
+ // didn't ask for that.
347
+ static const at::Tensor& call(IValue& v) {
348
+ // const Tensor& is valid, don't bother asserting
349
+ return v.toTensor();
350
+ }
351
+ };
352
+
353
+ template<bool AllowDeprecatedTypes>
354
+ struct ivalue_to_arg<at::ITensorListRef, AllowDeprecatedTypes> final {
355
+ static List<at::Tensor> call(IValue& v) {
356
+ return v.toTensorList();
357
+ }
358
+ };
359
+
360
+ template<class T, bool AllowDeprecatedTypes>
361
+ struct ivalue_to_arg<ArrayRef<T>, AllowDeprecatedTypes> final {
362
+ // If an argument is ArrayRef<T>, convert the IValue to a std::vector<T> and pass that
363
+ // to the operator. std::vector<T> is implicitly convertible to ArrayRef<T>.
364
+ static std::vector<T> call(IValue& v) {
365
+ return ivalue_to_arg<std::vector<T>, AllowDeprecatedTypes>::call(v);
366
+ }
367
+ };
368
+ template<bool AllowDeprecatedTypes>
369
+ struct ivalue_to_arg<c10::SymIntArrayRef, AllowDeprecatedTypes> final {
370
+ static std::vector<c10::SymInt> call(IValue& v) {
371
+ if (v.isIntList()) {
372
+ std::vector<c10::SymInt> r;
373
+ auto src = v.toIntList();
374
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
375
+ return r;
376
+ } else {
377
+ return ivalue_to_arg<std::vector<c10::SymInt>, AllowDeprecatedTypes>::call(v);
378
+ }
379
+ }
380
+ };
381
+ template<bool AllowDeprecatedTypes>
382
+ struct ivalue_to_arg<c10::OptionalArray<c10::SymInt>, AllowDeprecatedTypes> final {
383
+ static OptionalArray<c10::SymInt> call(IValue& v) {
384
+ if (v.isIntList()) {
385
+ std::vector<c10::SymInt> r;
386
+ auto src = v.toIntList();
387
+ std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); });
388
+ return OptionalArray<c10::SymInt>(std::move(r));
389
+ } else {
390
+ return std::move(v).to<OptionalArray<c10::SymInt>>();
391
+ }
392
+ }
393
+ };
394
+ template<class T, bool AllowDeprecatedTypes>
395
+ struct ivalue_to_arg<std::optional<ArrayRef<T>>, AllowDeprecatedTypes> final {
396
+ // If an argument is std::optional<ArrayRef<T>>, convert the IValue to an std::optional<std::vector<T>> and pass that
397
+ // to the operator. OptionalArray<T> is basically a std::optional<std::vector<T>> but implicitly convertible
398
+ // to std::optional<ArrayRef<T>>.
399
+ static OptionalArray<T> call(IValue& v) {
400
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
401
+ }
402
+ };
403
+
404
+ template<class T, bool AllowDeprecatedTypes>
405
+ struct ivalue_to_arg<OptionalArrayRef<T>, AllowDeprecatedTypes> final {
406
+ // If an argument is OptionalArrayRef<T>, convert the IValue to an
407
+ // std::optional<std::vector<T>> and pass that to the operator. OptionalArray<T>
408
+ // is basically a std::optional<std::vector<T>> but implicitly convertible to
409
+ // OptionalArrayRef<T>
410
+ static OptionalArray<T> call(IValue& v) {
411
+ return ivalue_to_arg<OptionalArray<T>, AllowDeprecatedTypes>::call(v);
412
+ }
413
+ };
414
+
415
+ // return_to_ivalue
416
+ template<class T, bool AllowDeprecatedTypes, class Enable = void>
417
+ struct return_to_ivalue final {};
418
+
419
+ template<class T, bool AllowDeprecatedTypes>
420
+ struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<at::Tensor&, T>::value>> final {
421
+ static IValue call(T&& v) {
422
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
423
+ return c10::ivalue::from(std::move(v));
424
+ }
425
+ static IValue copy(const T& v) {
426
+ assert_is_valid_output_type<T, AllowDeprecatedTypes>();
427
+ return IValue(v);
428
+ }
429
+ };
430
+
431
+ // Special case to allow kernels to return `Tensor&`.
432
+ // TODO Delete this once kernels don't do that anymore
433
+ template<bool AllowDeprecatedTypes>
434
+ struct return_to_ivalue<at::Tensor&, AllowDeprecatedTypes, void> final {
435
+ static IValue call(at::Tensor& v) {
436
+ return c10::ivalue::from(v);
437
+ }
438
+ static IValue copy(at::Tensor& v) {
439
+ return IValue(v);
440
+ }
441
+ };
442
+
443
+ // wrap_kernel_functor_unboxed_
444
+
445
+ template<class KernelFunctor, class OpSignature>
446
+ struct wrap_kernel_functor_unboxed_ final {};
447
+
448
+ // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet
449
+ // This includes kernels with 0 arguments.
450
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
451
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(ParameterTypes...)> final {
452
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
453
+ "Return type mismatch");
454
+ static_assert(std::is_same<guts::typelist::typelist<ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
455
+ "Parameter types mismatch");
456
+
457
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
458
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) {
459
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
460
+ // Note [Plumbing Keys Through The Dispatcher 2]
461
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
462
+ // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel.
463
+ //
464
+ // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type
465
+ // DispatchKeySet.
466
+ // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention
467
+ // of the dispatcher from the calling convention of manually written kernels.
468
+ return (*functor_)(std::forward<ParameterTypes>(args)...);
469
+ }
470
+ };
471
+
472
+ // This specialization is for kernels with a first argument of type DispatchKeySet
473
+ template<class KernelFunctor, class ReturnType, class... ParameterTypes>
474
+ struct wrap_kernel_functor_unboxed_<KernelFunctor, ReturnType(DispatchKeySet, ParameterTypes...)> final {
475
+ static_assert(std::is_same<ReturnType, typename guts::infer_function_traits_t<KernelFunctor>::return_type>::value,
476
+ "Return type mismatch");
477
+ static_assert(std::is_same<guts::typelist::typelist<DispatchKeySet, ParameterTypes...>, typename guts::infer_function_traits_t<KernelFunctor>::parameter_types>::value,
478
+ "Parameter types mismatch");
479
+
480
+ // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use &&
481
+ static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) {
482
+ KernelFunctor* functor_ = static_cast<KernelFunctor*>(functor);
483
+ // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel.
484
+ // See Note [Plumbing Keys Through The Dispatcher 2] for details.
485
+ return (*functor_)(dispatchKeySet, std::forward<ParameterTypes>(args)...);
486
+ }
487
+ };
488
+
489
+ template<class KernelFunctor>
490
+ using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_<KernelFunctor, typename guts::infer_function_traits_t<KernelFunctor>::func_type>;
491
+
492
+ // call_functor_with_args_from_stack
493
+
494
+ template<class Functor, bool AllowDeprecatedTypes, size_t... ivalue_arg_indices, typename... ArgTypes>
495
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
496
+ call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence<ivalue_arg_indices...>, guts::typelist::typelist<ArgTypes...>*) {
497
+ (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.
498
+
499
+ // We're explicitly filtering out DispatchKeySet from the argument list.
500
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
501
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
502
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
503
+ return wrap_kernel_functor_unboxed<Functor>::call(functor, dispatchKeySet,
504
+ ivalue_to_arg<typename decay_if_not_tensor<ArgTypes>::type, AllowDeprecatedTypes>::call(
505
+ torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices))
506
+ )...);
507
+ }
508
+
509
+ template<class Functor, bool AllowDeprecatedTypes>
510
+ std::decay_t<typename guts::infer_function_traits_t<Functor>::return_type>
511
+ call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) {
512
+ // We're explicitly filtering out DispatchKeySet from the argument list.
513
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
514
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
515
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
516
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<Functor>::parameter_types;
517
+ constexpr size_t num_ivalue_args = guts::typelist::size<ArgTypes>::value;
518
+ return call_functor_with_args_from_stack_<Functor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack, std::make_index_sequence<num_ivalue_args>(), static_cast<ArgTypes*>(nullptr));
519
+ }
520
+
521
+ // push_outputs
522
+
523
+ template<class OutputType, bool AllowDeprecatedTypes>
524
+ struct push_outputs final {
525
+ // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here
526
+ // to avoid one extra call to the move constructor in this case. This is still not a
527
+ // universal reference though because OutputType is an explicitly specified class
528
+ // template parameter.
529
+ static void call(OutputType&& output, Stack* stack) {
530
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::call(std::forward<OutputType>(output)));
531
+ }
532
+ static void copy(const OutputType& output, Stack* stack) {
533
+ torch::jit::push(*stack, return_to_ivalue<OutputType, AllowDeprecatedTypes>::copy(output));
534
+ }
535
+ };
536
+ template<class... OutputTypes, bool AllowDeprecatedTypes>
537
+ struct push_outputs<std::tuple<OutputTypes...>, AllowDeprecatedTypes> final {
538
+ static void call(std::tuple<OutputTypes...>&& output, Stack* stack) {
539
+ call_(std::move(output), stack, std::make_index_sequence<sizeof...(OutputTypes)>());
540
+ }
541
+ static void copy(const std::tuple<OutputTypes...>& output, Stack* stack) {
542
+ copy_(output, stack, std::make_index_sequence<sizeof...(OutputTypes)>());
543
+ }
544
+
545
+ private:
546
+ template<size_t... indices>
547
+ static void call_(std::tuple<OutputTypes...>&& output, Stack* stack, std::index_sequence<indices...>) {
548
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::call(std::forward<OutputTypes>(std::get<indices>(output)))...);
549
+ }
550
+ template<size_t... indices>
551
+ static void copy_(const std::tuple<OutputTypes...>& output, Stack* stack, std::index_sequence<indices...>) {
552
+ torch::jit::push(*stack, return_to_ivalue<OutputTypes, AllowDeprecatedTypes>::copy(std::get<indices>(output))...);
553
+ }
554
+ };
555
+ template<bool AllowDeprecatedTypes>
556
+ struct push_outputs<void, AllowDeprecatedTypes> final {
557
+ static void call(int /*dummy*/, Stack* /*stack*/) {
558
+ }
559
+ static void copy(int /*dummy*/, Stack* /*stack*/) {
560
+ }
561
+ };
562
+
563
+ // make_boxed_from_unboxed_functor
564
+
565
+ template<class KernelFunctor, bool AllowDeprecatedTypes>
566
+ struct make_boxed_from_unboxed_functor final {
567
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value,
568
+ "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
569
+
570
+ static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) {
571
+ using ReturnType = typename guts::infer_function_traits_t<KernelFunctor>::return_type;
572
+ // We're explicitly filtering out DispatchKeySet from the argument list.
573
+ // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher.
574
+ // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
575
+ // See Note [Plumbing Keys Through The Dispatcher] for the background.
576
+ using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::parameter_types;
577
+ constexpr bool has_outputs = !std::is_same<void, ReturnType>::value;
578
+ constexpr size_t num_inputs = guts::typelist::size<ArgTypes>::value;
579
+ if constexpr (has_outputs) {
580
+ // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value
581
+ // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`.
582
+ // [Note: VC++ and 'std': ambiguous symbol]
583
+ using ReturnType_ = ::std::decay_t<ReturnType>;
584
+ ReturnType_ output = call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
585
+ torch::jit::drop(*stack, num_inputs);
586
+ // See note [ VC++ and 'std': ambiguous symbol]
587
+ push_outputs<ReturnType_, AllowDeprecatedTypes>::call(::std::move(output), stack);
588
+ } else {
589
+ call_functor_with_args_from_stack<KernelFunctor, AllowDeprecatedTypes>(functor, dispatchKeySet, stack);
590
+ torch::jit::drop(*stack, num_inputs);
591
+ }
592
+ }
593
+ };
594
+ } // namespace impl
595
+
596
+ } // namespace c10
597
+
598
+ namespace torch {
599
+ using OperatorKernel = c10::OperatorKernel;
600
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <gtest/gtest.h>
4
+ #include <gmock/gmock.h>
5
+
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/core/dispatch/Dispatcher.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <c10/core/CPUAllocator.h>
10
+ #include <c10/util/irange.h>
11
+
12
+ template<class... Inputs>
13
+ inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
14
+ return {std::forward<Inputs>(inputs)...};
15
+ }
16
+
17
+ inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
18
+ auto* allocator = c10::GetCPUAllocator();
19
+ int64_t nelements = 1;
20
+ auto dtype = caffe2::TypeMeta::Make<float>();
21
+ int64_t size_bytes = nelements * dtype.itemsize();
22
+ auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
23
+ c10::StorageImpl::use_byte_size_t(),
24
+ size_bytes,
25
+ allocator->allocate(size_bytes),
26
+ allocator,
27
+ /*resizable=*/true);
28
+ at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
29
+ // TODO: We add this to simulate the ideal case where we only have Autograd backend keys
30
+ // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
31
+ // constructor by default.
32
+ if (!requires_grad) {
33
+ t.unsafeGetTensorImpl()->remove_autograd_key();
34
+ }
35
+ return t;
36
+ }
37
+
38
+ inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
39
+ return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
40
+ }
41
+
42
+ template<class... Args>
43
+ inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
44
+ auto stack = makeStack(std::forward<Args>(args)...);
45
+ op.callBoxed(&stack);
46
+ return stack;
47
+ }
48
+
49
+ template<class Result, class... Args>
50
+ inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
51
+ return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
52
+ }
53
+
54
+ template<class Result, class... Args>
55
+ inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
56
+ return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
57
+ }
58
+
59
+ template<class Result, class... Args>
60
+ inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
61
+ return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
62
+ }
63
+
64
+ inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
65
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
66
+ EXPECT_ANY_THROW(
67
+ callOp(*op, dummyTensor(dispatch_key), 5);
68
+ );
69
+ }
70
+
71
+ inline void expectDoesntFindOperator(const char* op_name) {
72
+ auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
73
+ EXPECT_FALSE(op.has_value());
74
+ }
75
+
76
+ template<class Exception, class Functor>
77
+ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
78
+ try {
79
+ std::forward<Functor>(functor)();
80
+ } catch (const Exception& e) {
81
+ EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
82
+ return;
83
+ }
84
+ ADD_FAILURE() << "Expected to throw exception containing \""
85
+ << expectMessageContains << "\" but didn't throw";
86
+ }
87
+
88
+ template<class T, size_t N>
89
+ void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
90
+ EXPECT_EQ(expected.size(), actual.size());
91
+ for (const auto i : c10::irange(expected.size())) {
92
+ EXPECT_EQ(expected[i], actual[i]);
93
+ }
94
+ }
95
+
96
+ template<class T>
97
+ void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
98
+ EXPECT_EQ(expected.size(), actual.size());
99
+ for (const auto i : c10::irange(expected.size())) {
100
+ EXPECT_EQ(expected[i], actual[i]);
101
+ }
102
+ }
103
+
104
+ template<class T>
105
+ void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
106
+ EXPECT_EQ(expected.size(), actual.size());
107
+ for (const auto i : c10::irange(expected.size())) {
108
+ EXPECT_EQ(expected[i], actual.get(i));
109
+ }
110
+ }
111
+
112
+ template<class T>
113
+ void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
114
+ EXPECT_EQ(expected.size(), actual.size());
115
+ for (const auto i : c10::irange(expected.size())) {
116
+ EXPECT_EQ(expected[i], actual[i]);
117
+ }
118
+ }
119
+
120
+ // NB: This is not really sound, but all of the type sets constructed here
121
+ // are singletons so it's fine
122
+ static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
123
+ return legacyExtractDispatchKey(t.key_set());
124
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/intrusive_ptr.h>
7
+ #include <functional>
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct BuiltinOpFunction : public Function {
13
+ BuiltinOpFunction(
14
+ c10::QualifiedName qualname,
15
+ c10::FunctionSchema schema,
16
+ std::function<void(Stack&)> callable,
17
+ std::string doc_string = "")
18
+ : name_(std::move(qualname)),
19
+ callable_(std::move(callable)),
20
+ schema_(std::move(schema)),
21
+ doc_string_(std::move(doc_string)) {
22
+ TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1);
23
+ }
24
+
25
+ c10::string_view doc_string() const override {
26
+ return doc_string_;
27
+ }
28
+
29
+ void run(Stack& stack) override {
30
+ callable_(stack);
31
+ }
32
+
33
+ c10::intrusive_ptr<c10::ivalue::Future> runAsync(
34
+ Stack& stack,
35
+ TaskLauncher /* not used */) override {
36
+ run(stack);
37
+ auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type());
38
+ res->markCompleted(std::move(stack.front()));
39
+ return res;
40
+ }
41
+
42
+ const c10::QualifiedName& qualname() const override {
43
+ return name_;
44
+ }
45
+
46
+ // if this isn't yet defined, run its method_creator function
47
+ void ensure_defined() override {
48
+ // nop
49
+ }
50
+
51
+ const c10::FunctionSchema& getSchema() const override {
52
+ return schema_;
53
+ }
54
+
55
+ size_t num_inputs() const override {
56
+ return schema_.arguments().size();
57
+ }
58
+
59
+ Function& setSchema(c10::FunctionSchema schema) override {
60
+ schema_ = std::move(schema);
61
+ return *this;
62
+ }
63
+
64
+ bool call(
65
+ Stack& stack,
66
+ std::optional<size_t>,
67
+ c10::function_ref<void(const Code&)>) override {
68
+ run(stack);
69
+ return false;
70
+ }
71
+
72
+ bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>)
73
+ override {
74
+ run(stack);
75
+ return false;
76
+ }
77
+
78
+ ~BuiltinOpFunction() override = default;
79
+
80
+ private:
81
+ c10::QualifiedName name_;
82
+
83
+ std::function<void(Stack&)> callable_;
84
+
85
+ c10::FunctionSchema schema_;
86
+
87
+ std::string doc_string_;
88
+ };
89
+
90
+ } // namespace torch::jit
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <memory>
5
+
6
+ #include <c10/macros/Export.h>
7
+ #include <c10/macros/Macros.h>
8
+ #include <c10/util/Exception.h>
9
+
10
+ namespace c10 {
11
+
12
+ struct ClassType;
13
+ using ClassTypePtr = std::shared_ptr<ClassType>;
14
+
15
+ TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex);
16
+
17
+ template <typename T>
18
+ const c10::ClassTypePtr& getCustomClassType() {
19
+ // Classes are never unregistered from getCustomClassTypeMap and the
20
+ // hash lookup can be a hot path, so just cache.
21
+ // For the same reason, it's fine If this ends up getting duplicated across
22
+ // DSO boundaries for whatever reason.
23
+ static c10::ClassTypePtr cache = getCustomClassTypeImpl(
24
+ std::type_index(typeid(T)));
25
+ return cache;
26
+ }
27
+
28
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <typeindex>
4
+ #include <c10/core/DispatchKeySet.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Metaprogramming.h>
7
+ #include <c10/util/Type.h>
8
+
9
+ namespace c10 {
10
+ namespace impl {
11
+
12
+ // A CppSignature object holds RTTI information about a C++ function signature at runtime
13
+ // and can compare them or get a debug-printable name.
14
+ class TORCH_API CppSignature final {
15
+ public:
16
+ CppSignature(const CppSignature&) = default;
17
+ CppSignature(CppSignature&&) noexcept = default;
18
+ CppSignature& operator=(const CppSignature&) = default;
19
+ CppSignature& operator=(CppSignature&&) noexcept = default;
20
+
21
+ template<class FuncType>
22
+ static CppSignature make() {
23
+ // Normalize functors, lambdas, function pointers, etc. into the plain function type
24
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
25
+ // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
26
+ // with different calling conventions.
27
+ // See Note [Plumbing Keys Through The Dispatcher]
28
+ using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
29
+
30
+ return CppSignature(std::type_index(typeid(decayed_function_type)));
31
+ }
32
+
33
+ std::string name() const {
34
+ return c10::demangle(signature_.name());
35
+ }
36
+
37
+ friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
38
+ if (lhs.signature_ == rhs.signature_) {
39
+ return true;
40
+ }
41
+ // Without RTLD_GLOBAL, the type_index comparison could yield false because
42
+ // they point to different instances of the RTTI data, but the types would
43
+ // still be the same. Let's check for that case too.
44
+ // Note that there still is a case where this might not work, i.e. when
45
+ // linking libraries of different compilers together, they might have
46
+ // different ways to serialize a type name. That, together with a missing
47
+ // RTLD_GLOBAL, would still fail this.
48
+ if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
49
+ return true;
50
+ }
51
+
52
+ return false;
53
+ }
54
+
55
+ private:
56
+ explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
57
+ std::type_index signature_;
58
+ };
59
+
60
+ inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
61
+ return !(lhs == rhs );
62
+ }
63
+
64
+ }
65
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <ATen/core/function_schema.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <c10/util/Bitset.h>
7
+ #include <c10/core/DispatchKeySet.h>
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/core/Variadic.h>
10
+ #include <ATen/core/stack.h>
11
+
12
+ namespace c10 {
13
+
14
+ namespace impl {
15
+
16
+ // Take a DispatchKeySet for a Tensor and determine what the actual dispatch
17
+ // DispatchKey should be, taking into account TLS, and skipping backends which
18
+ // fall through.
19
+ //
20
+ // Unlike Tensor::key_set(), the value of this on a tensor can change depending
21
+ // on TLS.
22
+ //
23
+ // NB: If there is no valid dispatch key, this will return Undefined
24
+ inline DispatchKeySet computeDispatchKeySet(
25
+ DispatchKeySet ks,
26
+ // The key mask lets us eliminate (by zero entries) keys which should not
27
+ // be considered for dispatch. There are two cases when we use this:
28
+ //
29
+ // - If an operator's dispatch table contains a fallthrough entry, we
30
+ // should bypass it entirely when finding the key
31
+ // - If a user invokes with redispatch, the mask lets us
32
+ // zero out the key the user asked us to stop.
33
+ //
34
+ // These excluded backends are NOT tracked in the TLS, but must be applied
35
+ // AFTER TLS (since the backend may have been introduced for consideration
36
+ // by the included TLS), which is why you have to pass them in to this
37
+ // function (as opposed to just applying it to the input 'ks').
38
+ DispatchKeySet key_mask
39
+ ) {
40
+ c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
41
+ // TODO: It's a bit irritating that we have to do logical ORs here, it would
42
+ // be nice to only do one. Can always_included be folded into the TLS? Well,
43
+ // it's a bit troublesome, because fastpath TLS access requires the type of
44
+ // the TLS in question to be zero-initialized, so you don't actually win
45
+ // anything in that case.
46
+ return (((ks | local.included_) - local.excluded_) & key_mask);
47
+ }
48
+
49
+ }
50
+
51
+ namespace detail {
52
+ // A small gadget to extract the DispatchKeySet from types which are known
53
+ // to have it. Used to extract dispatch keys from unboxed calls.
54
+ struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
55
+ DispatchKeySet ts;
56
+ void operator()(const at::Tensor& x) {
57
+ ts = ts | x.key_set();
58
+ }
59
+ void operator()(const std::optional<at::Tensor>& x) {
60
+ if (x.has_value()) {
61
+ ts = ts | x->key_set();
62
+ }
63
+ }
64
+ void operator()(at::ArrayRef<at::Tensor> xs) {
65
+ for (const auto& x : xs) {
66
+ ts = ts | x.key_set();
67
+ }
68
+ }
69
+ // Tensor?[] translates to this case.
70
+ void operator()(const c10::List<std::optional<at::Tensor>>& xs) {
71
+ for (std::optional<at::Tensor> x : xs) {
72
+ if (x.has_value()) {
73
+ ts = ts | x.value().key_set();
74
+ }
75
+ }
76
+ }
77
+ // Structured Tensor[] translates to this case
78
+ void operator()(const at::ITensorListRef& xs) {
79
+ for (const auto& x : xs) {
80
+ ts = ts | x.key_set();
81
+ }
82
+ }
83
+ [[noreturn]] void operator()(at::ArrayRef<std::optional<at::Tensor>>) {
84
+ // Just checking that the handling of Tensor?[] didn't change.
85
+ TORCH_INTERNAL_ASSERT(false);
86
+ }
87
+ void operator()(const at::Generator& gen) {
88
+ if (gen.defined()) {
89
+ ts = ts | gen.key_set();
90
+ }
91
+ }
92
+ void operator()(const std::optional<at::Generator>& gen) {
93
+ if (gen.has_value() && gen->defined()) {
94
+ ts = ts | gen->key_set();
95
+ }
96
+ }
97
+ template <typename T>
98
+ void operator()(const T&) {
99
+ // do nothing
100
+ }
101
+ };
102
+
103
+ // NB: take by const reference (Don't do universal forwarding here! You
104
+ // don't want to move into this function!)
105
+ template <typename... Args>
106
+ DispatchKeySet multi_dispatch_key_set(const Args&... args) {
107
+ return MultiDispatchKeySet().apply(args...).ts;
108
+ }
109
+ }
110
+
111
+ /**
112
+ * An instance of DispatchKeyExtractor knows how to get a dispatch key given
113
+ * a list of arguments for an operator call.
114
+ *
115
+ * The instance is specific for a certain operator as:
116
+ * - In boxed dispatch, different operators have different ways to extract
117
+ * the dispatch key (e.g. different numbers of arguments), and we precompute
118
+ * the stack locations we should look at; and
119
+ * - In all dispatch, some backends should be excluded from dispatch because
120
+ * they have been registered as fallthrough. The set of excluded backends
121
+ * varies from operator, as some operators may have overridden the
122
+ * fallthrough with custom behavior.
123
+ *
124
+ * Note - this should maintain identical impl to the py dispatcher key extraction logic
125
+ * at pytorch/torch/dispatcher.py
126
+ */
127
+ struct TORCH_API DispatchKeyExtractor final {
128
+ public:
129
+ static DispatchKeyExtractor make(const FunctionSchema& schema) {
130
+ return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
131
+ }
132
+
133
+ static DispatchKeyExtractor makeUninitialized() {
134
+ return DispatchKeyExtractor(c10::utils::bitset());
135
+ }
136
+
137
+ void registerSchema(const FunctionSchema& schema) {
138
+ TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
139
+ dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
140
+ }
141
+ void deregisterSchema() {
142
+ dispatch_arg_indices_reverse_ = c10::utils::bitset();
143
+ }
144
+
145
+ DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
146
+ DispatchKeySet ks;
147
+ dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
148
+ const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
149
+ if (C10_LIKELY(ivalue.isTensor())) {
150
+ // NB: Take care not to introduce a refcount bump (there's
151
+ // no safe toTensorRef method, alas)
152
+ ks = ks | ivalue.unsafeToTensorImpl()->key_set();
153
+ } else if (C10_UNLIKELY(ivalue.isTensorList())) {
154
+ for (const at::Tensor& tensor : ivalue.toTensorList()) {
155
+ ks = ks | tensor.key_set();
156
+ }
157
+ }
158
+ // Tensor?[] translates to a c10::List<IValue> so we need to peek inside
159
+ else if (C10_UNLIKELY(ivalue.isList())) {
160
+ for (const auto& elt : ivalue.toListRef()) {
161
+ if (elt.isTensor()) {
162
+ ks = ks | elt.toTensor().key_set();
163
+ }
164
+ }
165
+ }
166
+ });
167
+ // Keys that are fallthrough should be skipped
168
+ if (requiresBitsetPerBackend_) {
169
+ auto backend_idx = ks.getBackendIndex();
170
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
171
+ } else {
172
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
173
+ }
174
+ }
175
+
176
+ template<class... Args>
177
+ DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
178
+ auto ks = detail::multi_dispatch_key_set(args...);
179
+ // Keys that are fallthrough should be skipped
180
+ if (requiresBitsetPerBackend_) {
181
+ auto backend_idx = ks.getBackendIndex();
182
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
183
+ } else {
184
+ return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
185
+ }
186
+ }
187
+
188
+ void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
189
+
190
+ std::string dumpState() const;
191
+ void checkInvariants(const FunctionSchema& schema) const;
192
+
193
+ private:
194
+ static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
195
+ TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
196
+ "The function schema has ", schema.arguments().size(),
197
+ " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
198
+ c10::utils::bitset dispatch_arg_indices_reverse;
199
+ for (const auto index : c10::irange(schema.arguments().size())) {
200
+ if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
201
+ schema.arguments()[index].type()->isSubtypeOf(
202
+ *ListType::ofTensors()) ||
203
+ schema.arguments()[index].type()->isSubtypeOf(
204
+ *ListType::ofOptionalTensors()) ||
205
+ schema.arguments()[index].type()->isSubtypeOf(
206
+ *OptionalType::ofTensor())) {
207
+ dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
208
+ }
209
+ }
210
+ return dispatch_arg_indices_reverse;
211
+ }
212
+
213
+ explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
214
+ : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
215
+ , nonFallthroughKeys_(DispatchKeySet::FULL)
216
+ , requiresBitsetPerBackend_(false) {
217
+ for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
218
+ nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
219
+ }
220
+ }
221
+
222
+ // this is a bitset that has ones for each argument index which has to be
223
+ // considered for dispatch. This avoids having to iterate over the stack
224
+ // to find all the tensors. The bits are stored in reverse order, i.e.
225
+ // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
226
+ // the top of the stack (i.e. the i-th last argument of the function)
227
+ // is relevant for dispatch.
228
+ // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
229
+ // fallthrough
230
+ c10::utils::bitset dispatch_arg_indices_reverse_;
231
+
232
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel.
233
+ DispatchKeySet nonFallthroughKeys_;
234
+ // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
235
+ // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
236
+ std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
237
+ // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
238
+ // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
239
+ bool requiresBitsetPerBackend_;
240
+ };
241
+
242
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/SequenceNumber.h>
4
+ #include <ATen/core/boxing/KernelFunction.h>
5
+ #include <ATen/core/boxing/impl/boxing.h>
6
+ #include <ATen/core/dispatch/OperatorEntry.h>
7
+ #include <ATen/core/dispatch/CppSignature.h>
8
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
9
+ #include <ATen/record_function.h>
10
+ #include <c10/util/Exception.h>
11
+ #include <c10/util/LeftRight.h>
12
+ #include <list>
13
+ #include <mutex>
14
+ #include <condition_variable>
15
+ #include <type_traits>
16
+ #include <c10/core/SafePyObject.h>
17
+
18
+ #include <ATen/core/grad_mode.h>
19
+ #include <ATen/core/enum_tag.h>
20
+
21
+ #ifndef NDEBUG
22
+ #include <iostream>
23
+ #endif
24
+
25
+ namespace c10 {
26
+
27
+ TORCH_API bool show_dispatch_trace();
28
+ TORCH_API void dispatch_trace_nesting_incr();
29
+ TORCH_API void dispatch_trace_nesting_decr();
30
+ TORCH_API int64_t dispatch_trace_nesting_value();
31
+
32
+ struct DispatchTraceNestingGuard {
33
+ DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); }
34
+ ~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); }
35
+ };
36
+
37
+ class TORCH_API OperatorHandle;
38
+ template<class FuncType> class TypedOperatorHandle;
39
+
40
+ /**
41
+ * Implement this interface and register your instance with the dispatcher
42
+ * to get notified when operators are registered or deregistered with
43
+ * the dispatcher.
44
+ *
45
+ * NB: registration events only occur when a 'def' occurs; we don't trigger
46
+ * on 'impl' or 'fallback' calls.
47
+ */
48
+ class TORCH_API OpRegistrationListener {
49
+ public:
50
+ virtual ~OpRegistrationListener();
51
+
52
+ virtual void onOperatorRegistered(const OperatorHandle& op) = 0;
53
+ virtual void onOperatorDeregistered(const OperatorHandle& op) = 0;
54
+ };
55
+
56
+ namespace detail {
57
+ class RegistrationListenerList;
58
+ }
59
+ class SchemaRegistrationHandleRAII;
60
+
61
+ /**
62
+ * Top-level dispatch interface for dispatching via the dynamic dispatcher.
63
+ * Most end users shouldn't use this directly; if you're trying to register
64
+ * ops look in op_registration
65
+ */
66
+ class TORCH_API Dispatcher final {
67
+ private:
68
+ // For direct access to backend fallback information
69
+ friend class impl::OperatorEntry;
70
+
71
+ struct OperatorDef final {
72
+ explicit OperatorDef(OperatorName&& op_name)
73
+ : op(std::move(op_name)) {}
74
+
75
+ impl::OperatorEntry op;
76
+
77
+ // These refer to the number of outstanding RegistrationHandleRAII
78
+ // for this operator. def_count reflects only def() registrations
79
+ // (in the new world, this should only ever be 1, but old style
80
+ // registrations may register the schema multiple times, which
81
+ // will increase this count). def_and_impl_count reflects the number
82
+ // of combined def() and impl() registrations. When the last def() gets
83
+ // unregistered, we must immediately call the Deregistered listeners, but we
84
+ // must not actually delete the handle as there are other outstanding RAII
85
+ // destructors which will try to destruct and they had better still have a
86
+ // working operator handle in this case
87
+ size_t def_count = 0;
88
+ size_t def_and_impl_count = 0;
89
+ };
90
+ friend class OperatorHandle;
91
+ template<class> friend class TypedOperatorHandle;
92
+
93
+ struct Guard final {
94
+ Guard() : alive(true), mutex() {}
95
+ std::atomic<bool> alive;
96
+ std::mutex mutex;
97
+ };
98
+
99
+ public:
100
+ ~Dispatcher();
101
+
102
+ // Implementation note: this class abstracts over the fact that we have per-operator
103
+ // dispatch tables. This could be easily adjusted to have a single global hash
104
+ // table.
105
+ static Dispatcher& realSingleton();
106
+
107
+ C10_ALWAYS_INLINE static Dispatcher& singleton() {
108
+ #if !defined C10_MOBILE
109
+ // Implemented inline so that steady-state code needn't incur
110
+ // function-call overhead. We can't just inline `realSingleton`
111
+ // because the function-local static would get duplicated across
112
+ // all DSOs that include & use this header, leading to multiple
113
+ // singleton instances.
114
+ static Dispatcher& s = realSingleton();
115
+ return s;
116
+ #else
117
+ // For C10_MOBILE, we should never inline a static function that
118
+ // has a static member, since the generated code calls
119
+ // __cxa_guard_acquire and __cxa_guard_release which help
120
+ // implement exactly once semantics for the initialization of the
121
+ // static Dispatcher& s above (for the non-mobile case). That
122
+ // additional code when duplicated across all operator stubs
123
+ // for every backend results in a lot of additional code
124
+ // being generated by the compiler.
125
+ return realSingleton();
126
+ #endif
127
+ }
128
+
129
+ // ------------------------------------------------------------------------
130
+ //
131
+ // Accessing operators by schema
132
+ //
133
+ // ------------------------------------------------------------------------
134
+
135
+ /**
136
+ * Looks for an operator schema with the given name and overload name
137
+ * and returns it if it is registered WITH A SCHEMA.
138
+ * Returns nullopt otherwise.
139
+ */
140
+ std::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
141
+
142
+ /**
143
+ * Variant of findSchema that results in less code generated at the call site.
144
+ * It (1) takes const char* pointer rather than OperatorName (so we skip
145
+ * generating std::string constructor calls at the call site), and (2)
146
+ * it raises an exception if the operator is not found (so we skip
147
+ * generating exception raising code at the call site)
148
+ *
149
+ * Irritatingly, we still have to generate the handful of instructions
150
+ * for dealing with an exception being thrown during static initialization
151
+ * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we
152
+ * could avoid this code too, but as the name of the function suggests,
153
+ * it does throw exceptions.
154
+ */
155
+ OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
156
+
157
+ // Like findSchema, but also returns OperatorHandle even if there is no schema
158
+ std::optional<OperatorHandle> findOp(const OperatorName& operator_name);
159
+
160
+ // Returns a list of all operator names present in the operatorLookupTable_
161
+ const std::vector<OperatorName> getAllOpNames();
162
+
163
+ // ------------------------------------------------------------------------
164
+ //
165
+ // Invoking operators
166
+ //
167
+ // ------------------------------------------------------------------------
168
+
169
+ template<class Return, class... Args>
170
+ Return call(const TypedOperatorHandle<Return (Args...)>& op, Args... args) const;
171
+
172
+
173
+ template<class Return, class... Args>
174
+ static Return callWithDispatchKeySlowPath(const TypedOperatorHandle<Return (Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args);
175
+
176
+ // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation.
177
+ // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set.
178
+ // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key.
179
+ // See Note [Plumbing Keys Through The Dispatcher]
180
+ template<class Return, class... Args>
181
+ Return redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const;
182
+
183
+ // Invoke an operator via the boxed calling convention using an IValue stack
184
+ void callBoxed(const OperatorHandle& op, Stack* stack) const;
185
+ void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const;
186
+
187
+ // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
188
+ // See Note [Plumbing Keys Through The Dispatcher]
189
+ void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const;
190
+
191
+ bool hasBackendFallbackForDispatchKey(DispatchKey dk) {
192
+ auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk);
193
+ if (dispatch_ix < 0) return false;
194
+ return backendFallbackKernels_[dispatch_ix].kernel.isValid();
195
+ }
196
+
197
+ // Used by torchdeploy/multipy for multiple interpreters racing.
198
+ void waitForDef(const FunctionSchema& schema);
199
+ void waitForImpl(const OperatorName& op_name, std::optional<DispatchKey> dispatch_key);
200
+
201
+ // ------------------------------------------------------------------------
202
+ //
203
+ // Performing registrations (NON user public; use op_registration)
204
+ //
205
+ // ------------------------------------------------------------------------
206
+
207
+ /**
208
+ * Register a new operator schema.
209
+ *
210
+ * If a schema with the same operator name and overload name already exists,
211
+ * this function will check that both schemas are exactly identical.
212
+ */
213
+ RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector<at::Tag> tags = {});
214
+
215
+ /**
216
+ * Register a kernel to the dispatch table for an operator.
217
+ * If dispatch_key is nullopt, then this registers a fallback kernel.
218
+ *
219
+ * @return A RAII object that manages the lifetime of the registration.
220
+ * Once that object is destructed, the kernel will be deregistered.
221
+ */
222
+ // NB: steals the inferred function schema, as we may need to hold on to
223
+ // it for a bit until the real schema turns up
224
+ RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
225
+
226
+ /**
227
+ * Given an operator, tells the Dispatcher that we have implemented a fake impl
228
+ * for this op in the given Python module. Call this a "pystub".
229
+ */
230
+ RegistrationHandleRAII registerPythonModule(const OperatorName& op_name, const char* pymodule, const char* context);
231
+
232
+ /**
233
+ * Given an operator, throws if we have a pystub.
234
+ */
235
+ void throwIfHasPythonModule(OperatorName op_name);
236
+
237
+ std::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
238
+
239
+ /**
240
+ * Register a new operator by name.
241
+ */
242
+ RegistrationHandleRAII registerName(OperatorName op_name);
243
+
244
+ /**
245
+ * Register a fallback kernel for a backend.
246
+ * If an operator is called but there is no concrete kernel for the dispatch
247
+ * key of the given operator arguments, it will check if there is such a
248
+ * fallback kernel for the given dispatch key and, if yes, call that one.
249
+ */
250
+ RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug);
251
+
252
+ /**
253
+ * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend
254
+ * API. These invocations are only permitted once per program, so we raise
255
+ * an error if this is called again for the same namespace.
256
+ */
257
+ RegistrationHandleRAII registerLibrary(std::string ns, std::string debug);
258
+
259
+ // ------------------------------------------------------------------------
260
+ //
261
+ // Listeners on registrations
262
+ //
263
+ // ------------------------------------------------------------------------
264
+
265
+ /**
266
+ * Add a listener that gets called whenever a new op is registered or an existing
267
+ * op is deregistered. Immediately after registering, this listener gets called
268
+ * for all previously registered ops, so it can be used to keep track of ops
269
+ * registered with this dispatcher.
270
+ */
271
+ RegistrationHandleRAII addRegistrationListener(std::unique_ptr<OpRegistrationListener> listener);
272
+
273
+ void checkInvariants() const;
274
+
275
+ //
276
+ // ------------------------------------------------------------------------
277
+ //
278
+ // Assertions
279
+ //
280
+ // ------------------------------------------------------------------------
281
+
282
+ /**
283
+ * For testing purposes.
284
+ * Returns a list of all operators that were created through calls to registerImpl(),
285
+ * without any corresponding calls to registerDef(). After static initialization
286
+ * is done this is almost certainly a bug, as the created OperatorHandle won't have
287
+ * any schema associated with it and users calling the op through the dispatcher
288
+ * won't be able to access it
289
+ *
290
+ * Note that we cannot enforce this invariant "as we go" during static initialization,
291
+ * due to undefined static initialization order- we have no guarantees over the order
292
+ * in which .def() and .impl() calls are registered in the dispatcher at static
293
+ * initialization time. So this function should only be called after static initialization.
294
+ */
295
+ std::vector<OperatorHandle> findDanglingImpls() const;
296
+
297
+ /**
298
+ * Useful for inspecting global Dispatcher registration state.
299
+ * Returns the names of all operators with a kernel registered for the specified DispatchKey.
300
+ * If no DispatchKey is specified, it returns all registered operators.
301
+ */
302
+ std::vector<OperatorName> getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const;
303
+
304
+ private:
305
+ Dispatcher();
306
+
307
+ static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
308
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
309
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef<const c10::IValue> args);
310
+
311
+ #ifdef FBCODE_CAFFE2
312
+ static bool profilingOperatorEvents();
313
+ static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref);
314
+ static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref);
315
+ #endif // FBCODE_CAFFE2
316
+
317
+ OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema);
318
+ OperatorHandle findOrRegisterName_(const OperatorName& op_name);
319
+
320
+ void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name);
321
+ void deregisterImpl_(
322
+ const OperatorHandle& op,
323
+ const OperatorName& op_name,
324
+ std::optional<DispatchKey> dispatch_key,
325
+ impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
326
+ void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
327
+ void deregisterFallback_(DispatchKey dispatchKey);
328
+ void deregisterLibrary_(const std::string& ns);
329
+ void cleanup(const OperatorHandle& op, const OperatorName& op_name);
330
+ void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug);
331
+
332
+ std::list<OperatorDef> operators_;
333
+ #if !defined(C10_MOBILE)
334
+ LeftRight<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
335
+ #else
336
+ RWSafeLeftRightWrapper<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
337
+ #endif
338
+ // Map from namespace to debug string (saying, e.g., where the library was defined)
339
+ ska::flat_hash_map<std::string, std::string> libraries_;
340
+
341
+ std::array<impl::AnnotatedKernel, num_runtime_entries> backendFallbackKernels_;
342
+
343
+ std::unique_ptr<detail::RegistrationListenerList> listeners_;
344
+
345
+ // This condition variable gets notified whenever we add a new def/impl to the
346
+ // dispatch table. This is primarily used by multipy/torchdeploy, when
347
+ // we have multiple interpreters trying to register to the dispatch table.
348
+ // In this situation, whenever the non-primary interpreter would have tried
349
+ // to register to the dispatch table, instead it will check to see if the
350
+ // expected registration has already been made, and if it hasn't, wait on
351
+ // this condition variable to see if it was just racing with the primary
352
+ // interpreter.
353
+ //
354
+ // We expect it to be rare for there to be any waiters on this condition
355
+ // variable. This is mostly just to help give better diagnostics if
356
+ // something goes horribly wrong
357
+ std::condition_variable cond_var_;
358
+
359
+ // Protect concurrent access to the dispatcher. We store this in a
360
+ // `shared_ptr` as we return callbacks that call back into dispatcher methods,
361
+ // and we need to be able to handle and guard against the event when the
362
+ // `Dispatcher` has been destroyed before the callbacks fire.
363
+ std::shared_ptr<Guard> guard_;
364
+ };
365
+
366
+ /**
367
+ * This is a handle to an operator schema registered with the dispatcher.
368
+ * This handle can be used to register kernels with the dispatcher or
369
+ * to lookup a kernel for a certain set of arguments.
370
+ */
371
+ class TORCH_API OperatorHandle {
372
+ template <typename T> friend struct std::hash;
373
+
374
+ public:
375
+ OperatorHandle(OperatorHandle&&) noexcept = default;
376
+ OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
377
+ OperatorHandle(const OperatorHandle&) = default;
378
+ OperatorHandle& operator=(const OperatorHandle&) = default;
379
+ // NOLINTNEXTLINE(performance-trivially-destructible)
380
+ ~OperatorHandle();
381
+
382
+ const OperatorName& operator_name() const {
383
+ return operatorDef_->op.operator_name();
384
+ }
385
+
386
+ bool hasSchema() const {
387
+ return operatorDef_->op.hasSchema();
388
+ }
389
+
390
+ const FunctionSchema& schema() const {
391
+ return operatorDef_->op.schema();
392
+ }
393
+
394
+ const std::string& debug() const {
395
+ return operatorDef_->op.debug();
396
+ }
397
+
398
+ std::string dumpState() const {
399
+ return operatorDef_->op.dumpState();
400
+ }
401
+
402
+ bool hasKernelForDispatchKey(DispatchKey k) const {
403
+ return operatorDef_->op.hasKernelForDispatchKey(k);
404
+ }
405
+
406
+ bool isKernelFallthroughKernel(DispatchKey k) const {
407
+ return operatorDef_->op.kernelForDispatchKey(k).isFallthrough();
408
+ }
409
+
410
+ bool hasKernelForAnyDispatchKey(DispatchKeySet k) const {
411
+ return operatorDef_->op.hasKernelForAnyDispatchKey(k);
412
+ }
413
+
414
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const {
415
+ return operatorDef_->op.hasComputedKernelForDispatchKey(k);
416
+ }
417
+
418
+ std::string dumpComputedTable() const {
419
+ return operatorDef_->op.dumpComputedTable();
420
+ }
421
+
422
+ void checkInvariants() const {
423
+ return operatorDef_->op.checkInvariants();
424
+ }
425
+
426
+ c10::ArrayRef<at::Tag> getTags() const {
427
+ return operatorDef_->op.getTags();
428
+ }
429
+
430
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback) {
431
+ operatorDef_->op.setReportErrorCallback_(std::move(callback));
432
+ }
433
+
434
+ bool hasTag(const at::Tag& tag) const {
435
+ for(const auto& tag_: getTags()) {
436
+ if (tag == tag_) {
437
+ return true;
438
+ }
439
+ }
440
+ return false;
441
+ }
442
+
443
+ template<class FuncType>
444
+ TypedOperatorHandle<FuncType> typed() const {
445
+ // NB: This assert is not 100% sound: you can retrieve a typed() operator
446
+ // handle prior to ANY C++ signature being registered on the operator
447
+ // and the check will say everything is OK (at which point you can then
448
+ // smuggle in a kernel that is typed incorrectly). For everything
449
+ // in core library this won't happen, because all the static registrations
450
+ // will be done by the time a typed() handle is acquired.
451
+ #if !defined C10_MOBILE
452
+ operatorDef_->op.assertSignatureIsCorrect<FuncType>();
453
+ if (fn_has_symint<FuncType>::value) {
454
+ operatorDef_->op.assertSignatureIsCorrect<typename fn_remove_symint<FuncType>::type>();
455
+ }
456
+ #endif
457
+ return TypedOperatorHandle<FuncType>(operatorIterator_);
458
+ }
459
+
460
+ void callBoxed(Stack* stack) const {
461
+ c10::Dispatcher::singleton().callBoxed(*this, stack);
462
+ }
463
+
464
+ void callBoxed(Stack& stack) const {
465
+ callBoxed(&stack);
466
+ }
467
+
468
+ void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const {
469
+ c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack);
470
+ }
471
+
472
+ void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
473
+ c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
474
+ }
475
+
476
+ template <typename F>
477
+ PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const {
478
+ return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor);
479
+ }
480
+
481
+ bool operator==(const OperatorHandle& other) const {
482
+ return operatorDef_ == other.operatorDef_;
483
+ }
484
+
485
+ bool operator!=(const OperatorHandle& other) const {
486
+ return operatorDef_ != other.operatorDef_;
487
+ }
488
+
489
+ private:
490
+ explicit OperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
491
+ : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {}
492
+ friend class Dispatcher;
493
+ template<class> friend class TypedOperatorHandle;
494
+
495
+ // Storing a direct pointer to the OperatorDef even though we
496
+ // already have the iterator saves an instruction in the critical
497
+ // dispatch path. The iterator is effectively a
498
+ // pointer-to-std::list-node, and (at least in libstdc++'s
499
+ // implementation) the element is at an offset 16 bytes from that,
500
+ // because the prev/next pointers come first in the list node
501
+ // struct. So, an add instruction would be necessary to convert from the
502
+ // iterator to an OperatorDef*.
503
+ Dispatcher::OperatorDef* operatorDef_;
504
+
505
+ // We need to store this iterator in order to make
506
+ // Dispatcher::cleanup() fast -- it runs a lot on program
507
+ // termination (and presuambly library unloading).
508
+ std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
509
+ };
510
+
511
+ /**
512
+ * This is a handle to an operator schema registered with the dispatcher.
513
+ * It holds the same information as an OperatorHandle, but it is templated
514
+ * on the operator arguments and allows calling the operator in an
515
+ * unboxed way.
516
+ */
517
+ template<class FuncType>
518
+ class TypedOperatorHandle final {
519
+ static_assert(guts::false_t<FuncType>(), "FuncType in OperatorHandle::typed<FuncType> was not a valid function type");
520
+ };
521
+ template<class Return, class... Args>
522
+ class TypedOperatorHandle<Return (Args...)> final : public OperatorHandle {
523
+ public:
524
+ TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default;
525
+ TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default;
526
+ TypedOperatorHandle(const TypedOperatorHandle&) = default;
527
+ TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default;
528
+
529
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
530
+ C10_ALWAYS_INLINE Return call(Args... args) const {
531
+ return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
532
+ }
533
+
534
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
535
+ C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const {
536
+ return c10::Dispatcher::singleton().redispatch<Return, Args...>(*this, currentDispatchKeySet, std::forward<Args>(args)...);
537
+ }
538
+
539
+ private:
540
+ explicit TypedOperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
541
+ : OperatorHandle(operatorIterator) {}
542
+ friend class OperatorHandle;
543
+ };
544
+
545
+ namespace detail {
546
+ template <class... Args> inline void unused_arg_(const Args&...) {}
547
+
548
+ // CaptureKernelCall is intended to capture return values from Dispatcher
549
+ // unboxed kernel calls. A record function may request to get outputs from the
550
+ // kernel calls. For boxed kernels, it's straightforward, the returned values
551
+ // are in the stack object. The stack can be passed to record functions. For
552
+ // unboxed kernels, we need to handle different kinds of return values, cache
553
+ // them temporarily, then release the values for the actual function call
554
+ // return.
555
+ template <typename ReturnType>
556
+ struct CaptureKernelCall {
557
+ template <typename F, typename... Args>
558
+ CaptureKernelCall(
559
+ const F& kernel,
560
+ const TypedOperatorHandle<ReturnType(Args...)>& op,
561
+ const DispatchKeySet& dispatchKeySet,
562
+ Args&&... args)
563
+ // Calls the kernel and capture the result in output_.
564
+ : output_{kernel.template call<ReturnType, Args...>(
565
+ op,
566
+ dispatchKeySet,
567
+ std::forward<Args>(args)...)} {}
568
+ // Wraps the return values in a Stack.
569
+ Stack getOutputs() {
570
+ Stack stack;
571
+ impl::push_outputs<ReturnType, false>::copy(output_, &stack);
572
+ return stack;
573
+ }
574
+ // Since we are returning the output_, we don't expect the output_ to be used
575
+ // afterward. Copy elision and RVO do not apply to class data members. Using
576
+ // move semantic to avoid copies when possible.
577
+ ReturnType release() && {
578
+ return std::move(output_);
579
+ }
580
+
581
+ private:
582
+ ReturnType output_;
583
+ };
584
+
585
+ // Handle the lvalue reference differently since it should not be moved.
586
+ template <>
587
+ inline at::Tensor& CaptureKernelCall<at::Tensor&>::release() && {
588
+ return output_;
589
+ }
590
+
591
+ // Handle case where the kernel returns void.
592
+ template <>
593
+ struct CaptureKernelCall<void> {
594
+ template <typename F, typename... Args>
595
+ CaptureKernelCall(
596
+ const F& kernel,
597
+ const TypedOperatorHandle<void(Args...)>& op,
598
+ const DispatchKeySet& dispatchKeySet,
599
+ Args&&... args) {
600
+ // Calling the kernel and no need to capture void.
601
+ kernel.template call<void, Args...>(
602
+ op, dispatchKeySet, std::forward<Args>(args)...);
603
+ }
604
+ Stack getOutputs() {
605
+ return Stack();
606
+ }
607
+ void release() && {}
608
+ };
609
+
610
+ TORCH_API void _print_dispatch_trace(const std::string& label, const std::string& op_name, const DispatchKeySet& dispatchKeySet);
611
+
612
+ } // namespace detail
613
+
614
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
615
+ template<class Return, class... Args>
616
+ inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<Return(Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) {
617
+ // If callbacks need inputs, we box the arguments and pass them to the guard.
618
+ // Note: For perf reasons we wouldn't want to prematurely box the arguments.
619
+ at::RecordFunction guard(std::move(stepCallbacks));
620
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved());
621
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
622
+ auto& schema = op.schema();
623
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
624
+ constexpr auto num_boxed_args = impl::boxed_size<Args...>();
625
+ if constexpr (num_boxed_args != 0) {
626
+ if (guard.needsInputs()) {
627
+ // If we used std::array<IValue, num_boxed_args> here, we would
628
+ // have to spend time default constructing the IValues in
629
+ // boxedArgs. aligned_storage has no such requirement.
630
+ impl::IValueAlignedStorage boxedArgs[num_boxed_args];
631
+ // For debugging only; could be removed (but the compiler will do
632
+ // that for us and it's nice to have the extra assurance of
633
+ // correctness from our debug builds).
634
+ int lastArgIdx = 0;
635
+ impl::boxArgsToStack(boxedArgs, lastArgIdx, args...);
636
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
637
+ // I don't *think* we need std::launder here, because IValue has
638
+ // no subclasses and no const or reference fields.
639
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
640
+ for (size_t ii = 0; ii < num_boxed_args; ++ii) {
641
+ reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
642
+ }
643
+ } else {
644
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
645
+ }
646
+ } else {
647
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
648
+ }
649
+
650
+ if (C10_UNLIKELY(guard.needsOutputs())) {
651
+ // Calls the kernel and capture the output temporarily to pass to
652
+ // RecordFunction.
653
+ detail::CaptureKernelCall<Return> captureKernelCall(
654
+ kernel, op, dispatchKeySet, std::forward<Args>(args)...);
655
+ guard.setOutputs(captureKernelCall.getOutputs());
656
+ // Releases the captured output to return to caller.
657
+ return std::move(captureKernelCall).release();
658
+ }
659
+
660
+ // keeping the guard alive while executing the kernel
661
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
662
+ }
663
+
664
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
665
+ template<class Return, class... Args>
666
+ C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle<Return(Args...)>& op, Args... args) const {
667
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
668
+ auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor()
669
+ .template getDispatchKeySetUnboxed<Args...>(args...);
670
+ #ifndef NDEBUG
671
+ DispatchTraceNestingGuard debug_guard;
672
+ if (show_dispatch_trace()) {
673
+ detail::_print_dispatch_trace("[call]", toString(op.operator_name()), dispatchKeySet);
674
+ }
675
+ #endif
676
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet);
677
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
678
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
679
+ if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) {
680
+ return callWithDispatchKeySlowPath<Return, Args...>(op, *step_callbacks, dispatchKeySet, kernel, std::forward<Args>(args)...);
681
+ }
682
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
683
+
684
+ #ifdef FBCODE_CAFFE2
685
+ if(profilingOperatorEvents()) {
686
+ struct FireOpRAII {
687
+ FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) {
688
+ fireOpStartUSDT(schema_ref);
689
+ }
690
+ ~FireOpRAII() { fireOpEndUSDT(schema_ref_); }
691
+ at::RecordFunction::schema_ref_t schema_ref_;
692
+ } event(op.schema());
693
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
694
+ } else {
695
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
696
+ }
697
+ #else
698
+ return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
699
+ #endif // FBCODE_CAFFE2
700
+ }
701
+
702
+ // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
703
+ template<class Return, class... Args>
704
+ inline Return Dispatcher::redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const {
705
+ detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
706
+ // do not use RecordFunction on redispatch
707
+ #ifndef NDEBUG
708
+ DispatchTraceNestingGuard debug_guard;
709
+ if (show_dispatch_trace()) {
710
+ detail::_print_dispatch_trace("[redispatch]", toString(op.operator_name()), currentDispatchKeySet);
711
+ }
712
+ #endif
713
+ const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet);
714
+ return kernel.template call<Return, Args...>(op, currentDispatchKeySet, std::forward<Args>(args)...);
715
+ }
716
+
717
+ inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
718
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
719
+ const auto& entry = op.operatorDef_->op;
720
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
721
+ #ifndef NDEBUG
722
+ DispatchTraceNestingGuard debug_guard;
723
+ if (show_dispatch_trace()) {
724
+ detail::_print_dispatch_trace("[callBoxed]", toString(op.operator_name()), dispatchKeySet);
725
+ }
726
+ #endif
727
+ const auto& kernel = entry.lookup(dispatchKeySet);
728
+ #ifndef PYTORCH_DISABLE_PER_OP_PROFILING
729
+ auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
730
+ if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) {
731
+ at::RecordFunction guard(std::move(*step_callbacks));
732
+ auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
733
+ auto& schema = op.schema();
734
+ auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
735
+ guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
736
+ : runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
737
+
738
+ // keeping the guard alive while executing the kernel
739
+ kernel.callBoxed(op, dispatchKeySet, stack);
740
+
741
+ if (C10_UNLIKELY(guard.needsOutputs())) {
742
+ guard.setOutputs(*stack);
743
+ }
744
+ return;
745
+ }
746
+ #endif // PYTORCH_DISABLE_PER_OP_PROFILING
747
+ kernel.callBoxed(op, dispatchKeySet, stack);
748
+ }
749
+
750
+ // NB: this doesn't count as a "true" dispatcher jump, so no instrumentation
751
+ inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const {
752
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
753
+ const auto& entry = op.operatorDef_->op;
754
+ // We still compute this as we're obligated to pass it on to the internal
755
+ // kernel, if it is a boxed fallback
756
+ auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
757
+ const auto& kernel = ([&]() {
758
+ if (op.hasKernelForDispatchKey(dk)) {
759
+ return entry.kernelForDispatchKey(dk);
760
+ } else {
761
+ auto idx = getDispatchTableIndexForDispatchKey(dk);
762
+ TORCH_INTERNAL_ASSERT(idx >= 0);
763
+ return backendFallbackKernels_[idx].kernel;
764
+ }
765
+ })();
766
+ kernel.callBoxed(op, dispatchKeySet, stack);
767
+ }
768
+
769
+ inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const {
770
+ // note: this doesn't need the mutex because write operations on the list keep iterators intact.
771
+ const auto& entry = op.operatorDef_->op;
772
+ #ifndef NDEBUG
773
+ DispatchTraceNestingGuard debug_guard;
774
+ if (show_dispatch_trace()) {
775
+ detail::_print_dispatch_trace("[redispatchBoxed]", toString(op.operator_name()), dispatchKeySet);
776
+ }
777
+ #endif
778
+ const auto& kernel = entry.lookup(dispatchKeySet);
779
+ return kernel.callBoxed(op, dispatchKeySet, stack);
780
+ }
781
+
782
+ } // namespace c10
783
+
784
+ namespace std {
785
+
786
+ template <>
787
+ struct hash<c10::OperatorHandle> {
788
+ size_t operator()(const c10::OperatorHandle& op) const noexcept {
789
+ return std::hash<void*>{}(static_cast<void*>(op.operatorDef_));
790
+ }
791
+ };
792
+
793
+ } // namespace std
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/operator_name.h>
4
+ #include <string>
5
+ #include <unordered_set>
6
+
7
+ namespace c10 {
8
+
9
+ struct TORCH_API ObservedOperators {
10
+ ObservedOperators() = delete;
11
+
12
+ static bool isObserved(const OperatorName& name);
13
+
14
+ static std::unordered_set<std::string>& getUnobservedOperatorList();
15
+ };
16
+
17
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/function_schema.h>
4
+ #include <c10/util/Metaprogramming.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+ #include <c10/core/DispatchKey.h>
7
+ #include <c10/core/PyHandleCache.h>
8
+ #include <c10/core/SafePyObject.h>
9
+ #include <ATen/core/ivalue.h>
10
+ #include <ATen/core/boxing/KernelFunction.h>
11
+ #include <ATen/core/dispatch/DispatchKeyExtractor.h>
12
+
13
+ #include <ATen/core/dispatch/OperatorOptions.h>
14
+ #include <ATen/core/dispatch/CppSignature.h>
15
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
16
+ #include <ATen/core/enum_tag.h>
17
+
18
+ #include <optional>
19
+ #include <array>
20
+ #include <list>
21
+
22
+ #ifdef C10_MOBILE
23
+ #define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
24
+ #endif
25
+
26
+ namespace c10 {
27
+
28
+ class Dispatcher;
29
+
30
+ namespace impl {
31
+
32
+ // This data structure represents a kernel that was registered to us from a
33
+ // user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
34
+ // about the kernel that isn't necessary for actual dispatching (this is why
35
+ // we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
36
+ // giving good error messages.
37
+ struct AnnotatedKernel final {
38
+ AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
39
+ : kernel(std::move(k))
40
+ , inferred_function_schema(std::move(s))
41
+ , debug(std::move(d))
42
+ {}
43
+ AnnotatedKernel() = default;
44
+ KernelFunction kernel;
45
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
46
+ // A little debug string to help us identify the kernel in question.
47
+ // Most importantly it records the TORCH_LIBRARY block that did the
48
+ // registration.
49
+ std::string debug;
50
+ };
51
+
52
+ // This data structure represents operator schema, with metadata specifying
53
+ // where the registration of this schema occurred
54
+ struct AnnotatedSchema final {
55
+ AnnotatedSchema(FunctionSchema s, std::string d)
56
+ : schema(std::move(s))
57
+ , debug(std::move(d))
58
+ {}
59
+ FunctionSchema schema;
60
+ std::string debug;
61
+ };
62
+
63
+ // Internal data structure that records information about a specific operator.
64
+ // It's not part of the public API; typically, users will interact with
65
+ // OperatorHandle instead.
66
+ //
67
+ // Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
68
+ // lock (this is important because some methods in OperatorEntry access
69
+ // dispatcher state)
70
+ class TORCH_API OperatorEntry final {
71
+ public:
72
+ explicit OperatorEntry(OperatorName&& operator_name);
73
+
74
+ OperatorEntry(const OperatorEntry&) = delete;
75
+ OperatorEntry(OperatorEntry&&) noexcept = delete;
76
+ OperatorEntry& operator=(const OperatorEntry&) = delete;
77
+ OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
78
+
79
+ const FunctionSchema& schema() const {
80
+ TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
81
+ return schema_->schema;
82
+ }
83
+ const std::string& debug() const {
84
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
85
+ return schema_->debug;
86
+ }
87
+ bool hasSchema() const {
88
+ return schema_.has_value();
89
+ }
90
+
91
+ bool isObserved() const {
92
+ return is_observed_;
93
+ }
94
+
95
+ // We may allocate an OperatorEntry for an operator even when we don't
96
+ // have a schema. When we receive the schema registration, we post
97
+ // facto register a schema.
98
+ //
99
+ // NB: registerSchema/deregisterSchema are not idempotent; if you
100
+ // attempt to register a schema when one is already present or vice
101
+ // versa that is an error. (Refcounting for the registrations is
102
+ // handled in the OperatorHandle in Dispatcher)
103
+ void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
104
+ void deregisterSchema();
105
+
106
+ const OperatorName& operator_name() const {
107
+ return name_;
108
+ }
109
+
110
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
111
+ using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
112
+ #else
113
+ using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
114
+ #endif
115
+ using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
116
+
117
+ // Why are kernels and fallback asymmetric? It has to do with ownership.
118
+ // Kernels and the computed dispatch tables for them are canonically
119
+ // owned by OperatorEntry, but backend fallbacks are specified once
120
+ // and apply for all operators, so they should be owned by Dispatcher.
121
+ // However, the registration of a backend fallback affects the
122
+ // state of the computed dispatch table, so when a backend fallback
123
+ // is updated, we need to update the operator tables too. Thus,
124
+ // registerKernel is the mechanism by which we give kernels to
125
+ // operator entry to own (and update dispatch table), but we only
126
+ // need a non-owning mechanism to update fallback.
127
+
128
+ // Precondition: Dispatcher::mutex_ is held
129
+ // Postcondition: caller is responsible for disposing of the kernel
130
+ AnnotatedKernelContainerIterator registerKernel(
131
+ const Dispatcher& dispatcher,
132
+ std::optional<DispatchKey> dispatch_key,
133
+ KernelFunction kernel,
134
+ std::optional<CppSignature> cpp_signature,
135
+ std::unique_ptr<FunctionSchema> inferred_function_schema,
136
+ std::string debug
137
+ );
138
+
139
+ // Precondition: Dispatcher::mutex_ is held
140
+ void deregisterKernel_(
141
+ const Dispatcher& dispatcher,
142
+ std::optional<DispatchKey> dispatch_key,
143
+ AnnotatedKernelContainerIterator kernel
144
+ );
145
+
146
+ // Precondition: Dispatcher::mutex_ is held
147
+ void updateFallback(
148
+ const Dispatcher& dispatcher,
149
+ DispatchKey dispatch_key
150
+ );
151
+
152
+ // Precondition: Dispatcher::mutex_ is held
153
+ void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
154
+ TORCH_INTERNAL_ASSERT(schema_.has_value());
155
+ schema_->schema.setAliasAnalysis(a);
156
+ }
157
+
158
+ std::string dumpComputedTable() const;
159
+ std::string dumpState() const;
160
+ void checkInvariants() const;
161
+
162
+ const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
163
+
164
+ // Asserts that the given FuncType is correct for calling this operator in an unboxed way.
165
+ template<class FuncType>
166
+ inline void assertSignatureIsCorrect() {
167
+ assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
168
+ }
169
+
170
+ void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
171
+
172
+ [[noreturn]] void reportError(DispatchKey dispatchKey) const;
173
+
174
+ const KernelFunction& lookup(DispatchKeySet ks) const {
175
+ const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
176
+ if (C10_UNLIKELY(idx == -1)) {
177
+ reportError(ks.highestPriorityTypeId());
178
+ }
179
+ const auto& kernel = dispatchTable_[idx];
180
+ // A valid kernel *always* has a boxed kernel and *may* have an
181
+ // unboxed kernel. However, we typically do unboxed calls in at::
182
+ // APIs, where the kernel 1) will very likely be valid and 2)
183
+ // should have an unboxed kernel. Checking the unboxed kernel
184
+ // first will allow us to avoid touching the boxed kernel at all
185
+ // in the common case.
186
+ if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
187
+ if (!kernel.isValid()) {
188
+ reportError(ks.highestPriorityTypeId());
189
+ }
190
+ }
191
+ return kernel;
192
+ }
193
+
194
+ std::string listAllDispatchKeys() const;
195
+
196
+ // Returns true if kernel_ has entry for any key in ks.
197
+ //
198
+ // Invariant: There are no alias keys in the passed-in dispatch key set.
199
+ // Note [No Alias Keys in DispatchKeySet]
200
+ // Alias keys should be checked using `hasKernelForDispatchKey`
201
+ // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
202
+ // have a value > 63 (causing overflow).
203
+ bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
204
+ // Returns true if kernel_ has entry for a particular key.
205
+ bool hasKernelForDispatchKey(DispatchKey k) const;
206
+ // Retrieves the kernel entry at a particular key. Symmetric with
207
+ // hasKernelForDispatchKey. To get the AnnotatedKernel, see
208
+ // getKernelForDispatchKey (private)
209
+ const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
210
+ // Returns true if the "computed table" has an entry for a particular key.
211
+ bool hasComputedKernelForDispatchKey(DispatchKey k) const;
212
+ // Returns all the operator tags added at the time of registration
213
+ const std::vector<at::Tag>& getTags() const;
214
+ void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
215
+
216
+ template <typename F>
217
+ PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
218
+ return py_cache_.ptr_or(self_interpreter, slow_accessor);
219
+ }
220
+
221
+ private:
222
+
223
+ OperatorName name_;
224
+ std::optional<AnnotatedSchema> schema_;
225
+ #ifndef C10_MOBILE
226
+ std::vector<at::Tag> tags_;
227
+ #endif
228
+ std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
229
+ DispatchKeyExtractor dispatchKeyExtractor_;
230
+ // Pointer to the torch.ops.ns.op.overload object for speed
231
+ c10::PyHandleCache py_cache_;
232
+
233
+ // kernels_ stores all registered kernels for the corresponding dispatch key
234
+ // and catchAllKernels_ stores the catch-all kernels.
235
+ // If an operator library gets loaded that overwrites an already existing kernel,
236
+ // both kernels will be in that list but only the newer one will be in
237
+ // dispatchTable. If any of the kernels go away (say the library gets
238
+ // unloaded), we remove the kernel from this list and update the
239
+ // dispatchTable if necessary.
240
+ // Kernels in the list are ordered by registration time descendingly,
241
+ // newer registrations are before older registrations.
242
+ // We do not combine dispatchTable and kernels into one hash map because
243
+ // kernels is a larger data structure and accessed quite infrequently
244
+ // while dispatchTable is accessed often and should be kept small to fit
245
+ // into CPU caches.
246
+ // Invariants:
247
+ // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
248
+ // - dispatchTable[dispatch_key] does not exist if and only if
249
+ // kernels_[dispatch_key] does not exist
250
+ // - If kernels_[dispatch_key] exists, then it has elements.
251
+ // It is never an empty list.
252
+ //
253
+ // Why do we do that?
254
+ // -----
255
+ // We mostly do this to enable Jupyter notebooks where a cell registering
256
+ // a kernel could be executed multiple times and the later execution
257
+ // should overwrite the earlier one. Note that this still fails when the
258
+ // function schema changed between the executions, but it works as long
259
+ // as the function schema didn't change. A better solution would be to
260
+ // unload the old extension library from the Jupyter cell when the cell is
261
+ // re-executed and then only allow one kernel here, i.e. error if a kernel
262
+ // is already registered, but that's a lot of effort to implement and
263
+ // currently not high-pri.
264
+ ska::flat_hash_map<DispatchKey,
265
+ #ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
266
+ // On mobile, we needn't worry about Jupyter notebooks.
267
+ std::array<AnnotatedKernel, 1>
268
+ #else
269
+ std::list<AnnotatedKernel>
270
+ #endif
271
+ > kernels_;
272
+
273
+ const AnnotatedKernel& missingKernel() const;
274
+ const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
275
+
276
+ // cpp_signature_ stores function signature if any of
277
+ // the kernels was created in a way that allowed us to know the function
278
+ // signature (i.e. by supplying an unboxed C++ kernel function).
279
+ // If this is set, it will be used to check that future kernel
280
+ // registrations match and it will be used in unboxed function calls
281
+ // to verify their arguments against the known function signature.
282
+ struct CppSignatureWithDebug {
283
+ CppSignature signature;
284
+ std::string debug;
285
+ std::optional<DispatchKey> dispatch_key;
286
+ };
287
+ std::optional<CppSignatureWithDebug> cpp_signature_;
288
+ std::optional<CppSignatureWithDebug> sym_cpp_signature_;
289
+
290
+ // A Python custom error handler for OperatorEntry::reportError
291
+ std::unique_ptr<c10::SafePyObject> report_error_callback_;
292
+
293
+ // Whether this operator needs to be observed with RecordFunction
294
+ const bool is_observed_;
295
+
296
+ [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
297
+ const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
298
+ std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
299
+ const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
300
+ ) const;
301
+ // This function re-establishes the invariant that dispatchTable
302
+ // contains the front element from the kernels list for a given runtime dispatch key.
303
+ void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
304
+ // Like above, but also handles alias dispatch keys.
305
+ void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
306
+ // Like above, but for ALL entries in the dispatch table.
307
+ void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
308
+ // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
309
+ const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
310
+ };
311
+
312
+ } // namespace impl
313
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ namespace c10 {
6
+
7
+ enum class AliasAnalysisKind : uint8_t {
8
+ INTERNAL_SPECIAL_CASE,
9
+ CONSERVATIVE, // The most conservative alias analysis type, assumes
10
+ // side-effects. This is the default analysis.
11
+ FROM_SCHEMA,
12
+ PURE_FUNCTION
13
+ };
14
+
15
+ #if !defined(_MSC_VER)
16
+ constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
17
+ #endif
18
+ inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
19
+ return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
20
+ ? "CONSERVATIVE"
21
+ : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
22
+ ? "FROM_SCHEMA"
23
+ : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
24
+ ? "PURE_FUNCTION"
25
+ : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
26
+ ? "INTERNAL_SPECIAL_CASE"
27
+ : "UNKNOWN";
28
+ }
29
+
30
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+
5
+ namespace c10 {
6
+
7
+ class RegistrationHandleRAII final {
8
+ public:
9
+ explicit RegistrationHandleRAII(std::function<void()> onDestruction)
10
+ : onDestruction_(std::move(onDestruction)) {}
11
+
12
+ ~RegistrationHandleRAII() {
13
+ if (onDestruction_) {
14
+ onDestruction_();
15
+ }
16
+ }
17
+
18
+ RegistrationHandleRAII(const RegistrationHandleRAII&) = delete;
19
+ RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete;
20
+
21
+ RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept
22
+ : onDestruction_(std::move(rhs.onDestruction_)) {
23
+ rhs.onDestruction_ = nullptr;
24
+ }
25
+
26
+ RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept {
27
+ onDestruction_ = std::move(rhs.onDestruction_);
28
+ rhs.onDestruction_ = nullptr;
29
+ return *this;
30
+ }
31
+
32
+ private:
33
+ std::function<void()> onDestruction_;
34
+ };
35
+
36
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from enum_tag.h
4
+
5
+ namespace at {
6
+ // Enum of valid tags obtained from the entries in tags.yaml
7
+ enum class Tag {
8
+ core,
9
+ data_dependent_output,
10
+ dynamic_output_shape,
11
+ flexible_layout,
12
+ generated,
13
+ inplace_view,
14
+ needs_fixed_stride_order,
15
+ nondeterministic_bitwise,
16
+ nondeterministic_seeded,
17
+ pointwise,
18
+ pt2_compliant_tag,
19
+ view_copy
20
+ };
21
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+
5
+ #include <utility>
6
+
7
+ namespace c10 {
8
+
9
+ struct EnumType;
10
+ using EnumTypePtr = std::shared_ptr<EnumType>;
11
+ using EnumNameValue = std::pair<std::string, IValue>;
12
+ struct TORCH_API EnumType : public NamedType {
13
+ friend struct Type;
14
+ static const TypeKind Kind = TypeKind::EnumType;
15
+
16
+ static EnumTypePtr create(
17
+ const c10::QualifiedName& qualified_class_name,
18
+ TypePtr value,
19
+ std::vector<EnumNameValue> enum_names_values,
20
+ std::weak_ptr<::torch::jit::CompilationUnit> cu) {
21
+ switch (value->kind()) {
22
+ case TypeKind::IntType:
23
+ case TypeKind::FloatType:
24
+ case TypeKind::StringType:
25
+ return EnumTypePtr(new EnumType(
26
+ qualified_class_name,
27
+ std::move(value),
28
+ std::move(enum_names_values),
29
+ std::move(cu)));
30
+ default:
31
+ AT_ERROR(
32
+ "Cannot create Enum with value type '",
33
+ value->str(),
34
+ "', only int, float and string are supported");
35
+ }
36
+ }
37
+
38
+ std::string str() const override {
39
+ return "Enum<" + annotation_str() + ">";
40
+ }
41
+
42
+ std::string repr_str() const override {
43
+ return str();
44
+ }
45
+
46
+ const TypePtr& getValueType() const {
47
+ return value_type_;
48
+ }
49
+
50
+ bool equals(const Type& rhs) const override {
51
+ if (auto* enum_rhs = rhs.castRaw<EnumType>()) {
52
+ return name().value() == enum_rhs->name().value() &&
53
+ *getValueType() == *(enum_rhs->getValueType()) &&
54
+ this->compilation_unit() == enum_rhs->compilation_unit();
55
+ }
56
+ return false;
57
+ }
58
+
59
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
60
+
61
+ std::shared_ptr<const ::torch::jit::CompilationUnit> compilation_unit()
62
+ const {
63
+ auto cu = cu_.lock();
64
+ return cu;
65
+ }
66
+
67
+ const QualifiedName& qualifiedClassName() const {
68
+ return name().value();
69
+ }
70
+
71
+ at::ArrayRef<TypePtr> containedTypes() const override {
72
+ return value_type_;
73
+ }
74
+
75
+ const at::ArrayRef<EnumNameValue> enumNamesValues() const {
76
+ return enum_names_values_;
77
+ }
78
+
79
+ private:
80
+ EnumType(
81
+ c10::QualifiedName qualified_class_name,
82
+ TypePtr value_type,
83
+ std::vector<EnumNameValue> enum_names_values,
84
+ std::weak_ptr<torch::jit::CompilationUnit> cu)
85
+ : NamedType(TypeKind::EnumType, std::move(qualified_class_name)),
86
+ value_type_(std::move(value_type)),
87
+ enum_names_values_(std::move(enum_names_values)),
88
+ cu_(std::move(cu)) {}
89
+
90
+ std::string annotation_str_impl(
91
+ C10_UNUSED const TypePrinter& printer = nullptr) const override {
92
+ const auto& n = name().value();
93
+ return n.qualifiedName();
94
+ }
95
+
96
+ TypePtr value_type_;
97
+ std::vector<EnumNameValue> enum_names_values_;
98
+ std::weak_ptr<::torch::jit::CompilationUnit> cu_;
99
+ };
100
+
101
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Macros.h>
4
+
5
+ #include <ATen/core/aten_interned_strings.h>
6
+ #include <ATen/core/symbol.h>
7
+
8
+ namespace c10 {
9
+
10
+ #define FORALL_NS_SYMBOLS(_) \
11
+ _(namespaces, prim) \
12
+ _(namespaces, prims) \
13
+ _(namespaces, nvprims) \
14
+ _(namespaces, aten) \
15
+ _(namespaces, cuda) \
16
+ _(namespaces, onnx) \
17
+ _(namespaces, attr) \
18
+ _(namespaces, scope) \
19
+ _(namespaces, user) \
20
+ _(namespaces, _caffe2) \
21
+ _(namespaces, dimname) \
22
+ _(namespaces, namespaces) \
23
+ _(prim, Assign) \
24
+ _(prim, BroadcastingChunk) \
25
+ _(prim, BroadcastSizes) \
26
+ _(prim, ReductionSizes) \
27
+ _(prim, Constant) \
28
+ _(prim, ChunkSizes) \
29
+ _(prim, ConstantMKLDNNTensor) \
30
+ _(prim, BroadcastMKLDNNTensors) \
31
+ _(prim, MKLDNNGroup) \
32
+ _(prim, MKLDNNHardSwish) \
33
+ _(prim, MKLDNNHardSigmoid) \
34
+ _(prim, MKLDNNHardTanh) \
35
+ _(prim, MKLDNNClamp) \
36
+ _(prim, StaticRuntimeCopyOuts) \
37
+ _(prim, Drop) \
38
+ _(prim, Eval) \
39
+ _(prim, Expand) /* onnx */ \
40
+ _(prim, FusionGroup) \
41
+ _(prim, CudaFusionGroup) \
42
+ _(prim, CudaFusionGuard) \
43
+ _(prim, oneDNNFusionGroup) \
44
+ _(prim, oneDNNFusionGuard) \
45
+ _(prim, FunctionalGraph) \
46
+ _(prim, add_optional) \
47
+ _(prim, view_copy) \
48
+ _(prim, permute_copy) \
49
+ _(prim, reshape_copy) \
50
+ _(prim, squeeze_copy) \
51
+ _(prim, t_copy) \
52
+ _(prim, transpose_copy) \
53
+ _(prim, unsqueeze_copy) \
54
+ _(prim, flatten_copy) \
55
+ _(prim, expand_copy) \
56
+ _(prim, expand_as_copy) \
57
+ _(prim, DifferentiableGraph) \
58
+ _(prim, TensorExprGroup) \
59
+ _(prim, TensorExprDynamicGroup) \
60
+ _(prim, StaticSubgraph) \
61
+ _(prim, If) \
62
+ _(prim, Jump) /* debug */ \
63
+ _(prim, JumpNZ) /* debug */ \
64
+ _(prim, JumpZ) /* debug */ \
65
+ _(prim, Load) \
66
+ _(prim, Loop) \
67
+ _(prim, Param) \
68
+ _(prim, PackPadded) /* onnx */ \
69
+ _(prim, PadPacked) /* onnx */ \
70
+ _(prim, Placeholder) /* debug */ \
71
+ _(prim, Print) \
72
+ _(prim, EmptyListLiteral) \
73
+ _(prim, LegacyTypedConstructor) \
74
+ _(prim, PythonOp) \
75
+ _(prim, IgnoredPythonOp) \
76
+ _(prim, Reverse) \
77
+ _(prim, Return) \
78
+ _(prim, ReturnStmt) \
79
+ _(prim, BreakStmt) \
80
+ _(prim, ContinueStmt) \
81
+ _(prim, ComprehensionScope) \
82
+ _(prim, Store) \
83
+ _(prim, AutogradZero) \
84
+ _(prim, AutogradAnyNonZero) \
85
+ _(prim, AutogradAllNonZero) \
86
+ _(prim, AutogradAllZero) \
87
+ _(prim, Starred) \
88
+ _(prim, TupleConstruct) \
89
+ _(prim, TupleUnpack) \
90
+ _(prim, TupleIndex) \
91
+ _(prim, TupleSlice) \
92
+ _(prim, ListConstruct) \
93
+ _(prim, ListUnpack) \
94
+ _(prim, DictConstruct) \
95
+ _(prim, ModuleContainerIndex) \
96
+ _(prim, EnumName) \
97
+ _(prim, EnumValue) \
98
+ _(prim, StringIndex) \
99
+ _(prim, NumToTensor) \
100
+ _(prim, Uninitialized) \
101
+ _(prim, VarConcat) \
102
+ _(prim, VarStack) \
103
+ _(prim, With) \
104
+ _(prim, Enter) \
105
+ _(prim, Exit) \
106
+ _(prim, IfThenElse) \
107
+ _(aten, Bool) \
108
+ _(aten, Int) \
109
+ _(aten, FloatImplicit) \
110
+ _(aten, ComplexImplicit) \
111
+ _(aten, IntImplicit) \
112
+ _(aten, ScalarImplicit) \
113
+ _(aten, Float) \
114
+ _(aten, Complex) \
115
+ _(aten, str) \
116
+ _(aten, Delete) \
117
+ _(prim, device) \
118
+ _(prim, dtype) \
119
+ _(prim, layout) \
120
+ _(prim, id) \
121
+ _(prim, requires_grad) \
122
+ _(prim, MakeTestTensor) /* test */ \
123
+ _(prim, AutogradAdd) \
124
+ _(prim, GradOf) \
125
+ _(aten, grad) \
126
+ _(aten, backward) \
127
+ _(prim, Guard) \
128
+ _(prim, BailOut) \
129
+ _(prim, TypeCheck) \
130
+ _(prim, RequiresGradCheck) \
131
+ _(prim, FallbackGraph) \
132
+ _(prim, FusedConcat) \
133
+ _(prim, ConstantChunk) \
134
+ _(prim, MMTreeReduce) \
135
+ _(prim, MMBatchSide) \
136
+ _(prim, list) \
137
+ _(prim, dict) \
138
+ _(prim, min) \
139
+ _(prim, max) \
140
+ _(prim, abs) \
141
+ _(aten, divmod) \
142
+ _(prim, zip) \
143
+ _(prim, enumerate) \
144
+ _(prim, range) \
145
+ _(prim, rangelist) \
146
+ _(prim, isinstance) \
147
+ _(prim, tolist) \
148
+ _(prim, unchecked_cast) \
149
+ _(aten, _grad_sum_to_size) \
150
+ _(aten, _size_if_not_equal) \
151
+ _(aten, _ncf_unsqueeze) \
152
+ _(aten, warn) \
153
+ _(aten, sorted) \
154
+ _(aten, floordiv) \
155
+ _(aten, __range_length) \
156
+ _(aten, __derive_index) \
157
+ _(aten, __round_to_zero_floordiv) \
158
+ _(aten, is_scripting) \
159
+ _(aten, _unwrap_optional) \
160
+ _(prim, fork) \
161
+ _(prim, awaitable) \
162
+ _(prim, forkClosure) \
163
+ _(prim, awaitableClosure) \
164
+ _(prim, awaitable_nowait) \
165
+ _(prim, awaitable_wait) \
166
+ _(prim, RaiseException) \
167
+ _(prim, Closure) \
168
+ _(prim, CreateObject) \
169
+ _(prim, SetAttr) \
170
+ _(prim, GetAttr) \
171
+ _(prim, HasAttr) \
172
+ _(prim, profile) \
173
+ _(prim, profile_ivalue) \
174
+ _(prim, AddStatValue) \
175
+ _(prim, TimePoint) \
176
+ _(prim, CallFunction) \
177
+ _(prim, CallMethod) \
178
+ _(prim, LoopContinuation) \
179
+ _(prim, annotate) \
180
+ _(prim, TracedModuleForward) \
181
+ _(prim, TracedFork) \
182
+ _(prim, TracedAttr) \
183
+ _(prim, rpc_async) \
184
+ _(prim, rpc_sync) \
185
+ _(prim, rpc_remote) \
186
+ _(prim, is_cuda) \
187
+ _(aten, append) \
188
+ _(aten, as_tensor) \
189
+ _(aten, adaptive_avg_pool2d_backward) \
190
+ _(aten, dim) \
191
+ _(aten, format) \
192
+ _(aten, percentFormat) \
193
+ _(aten, __not__) \
194
+ _(aten, __is__) \
195
+ _(aten, __isnot__) \
196
+ _(aten, _ger) \
197
+ _(aten, __getitem__) \
198
+ _(aten, _set_item) \
199
+ _(aten, manual_seed) \
200
+ _(aten, device) \
201
+ _(aten, hash) \
202
+ _(aten, len) \
203
+ _(aten, list) \
204
+ _(aten, dict) \
205
+ _(aten, wait) \
206
+ _(aten, save) \
207
+ _(aten, keys) \
208
+ _(aten, ord) \
209
+ _(aten, chr) \
210
+ _(aten, hex) \
211
+ _(aten, oct) \
212
+ _(aten, clear) \
213
+ _(aten, setdefault) \
214
+ _(aten, bin) \
215
+ _(aten, pop) \
216
+ _(aten, insert) \
217
+ _(aten, tensor) \
218
+ _(prim, unchecked_unwrap_optional) \
219
+ _(aten, __contains__) \
220
+ _(prim, BailoutTemplate) \
221
+ _(prim, grad) \
222
+ _(cuda, _set_device) \
223
+ _(cuda, set_stream) \
224
+ _(cuda, _current_device) \
225
+ _(cuda, synchronize) \
226
+ _(aten, has_torch_function) \
227
+ _(aten, is_autocast_enabled) \
228
+ _(aten, is_autocast_cpu_enabled) \
229
+ _(aten, is_autocast_xla_enabled) \
230
+ _(aten, get_autocast_dtype) \
231
+ _(aten, is_autocast_mps_enabled) \
232
+ FORALL_ATEN_BASE_SYMBOLS(_) \
233
+ _(onnx, Add) \
234
+ _(onnx, Concat) \
235
+ _(onnx, Constant) \
236
+ _(onnx, ConstantFill) \
237
+ _(onnx, Div) \
238
+ _(onnx, GRU) \
239
+ _(onnx, Gather) \
240
+ _(onnx, Gemm) \
241
+ _(onnx, LSTM) \
242
+ _(onnx, MatMul) \
243
+ _(onnx, Min) \
244
+ _(onnx, Max) \
245
+ _(onnx, Mul) \
246
+ _(onnx, Pow) \
247
+ _(onnx, RNN) \
248
+ _(onnx, Shape) \
249
+ _(onnx, Size) \
250
+ _(onnx, Slice) \
251
+ _(onnx, Softmax) \
252
+ _(onnx, Squeeze) \
253
+ _(onnx, Sub) \
254
+ _(onnx, Transpose) \
255
+ _(onnx, Unsqueeze) \
256
+ _(onnx, Loop) \
257
+ _(onnx, If) \
258
+ _(onnx, Reshape) \
259
+ _(onnx, Expand) \
260
+ _(onnx, Equal) \
261
+ _(onnx, Greater) \
262
+ _(onnx, GreaterOrEqual) \
263
+ _(onnx, Less) \
264
+ _(onnx, LessOrEqual) \
265
+ _(onnx, Not) \
266
+ _(aten, ATen) \
267
+ _(onnx, Split) \
268
+ _(onnx, ConstantOfShape) \
269
+ _(onnx, Cast) \
270
+ _(onnx, Mod) \
271
+ _(onnx, Sqrt) \
272
+ _(onnx, SplitToSequence) \
273
+ _(onnx, SequenceAt) \
274
+ _(onnx, SequenceConstruct) \
275
+ _(onnx, SequenceEmpty) \
276
+ _(onnx, SequenceInsert) \
277
+ _(onnx, SequenceErase) \
278
+ _(onnx, ConcatFromSequence) \
279
+ _(onnx, Identity) \
280
+ _(onnx, SoftmaxCrossEntropyLoss) \
281
+ _(onnx, NegativeLogLikelihoodLoss) \
282
+ _(onnx, LogSoftmax) \
283
+ _(onnx, ReduceL1) \
284
+ _(onnx, ReduceL2) \
285
+ _(onnx, Conv) \
286
+ _(onnx, BatchNormalization) \
287
+ _(onnx, ReduceMean) \
288
+ _(onnx, ReduceProd) \
289
+ _(onnx, Relu) \
290
+ _(onnx, Neg) \
291
+ _(onnx, NonZero) \
292
+ _(onnx, Range) \
293
+ _(onnx, Tile) \
294
+ _(onnx, Where) \
295
+ _(onnx, Optional) \
296
+ _(onnx, OptionalGetElement) \
297
+ _(onnx, OptionalHasElement) \
298
+ FORALL_ATTR_BASE_SYMBOLS(_) \
299
+ _(attr, Subgraph) \
300
+ _(attr, ReverseSubgraph) \
301
+ _(attr, f_real_outputs) \
302
+ _(attr, df_input_vjps) \
303
+ _(attr, df_input_captured_inputs) \
304
+ _(attr, df_input_captured_outputs) \
305
+ _(attr, df_output_vjps) \
306
+ _(attr, axes) \
307
+ _(attr, symbolic_shape_inputs) \
308
+ _(attr, allow_stack_outputs) \
309
+ _(attr, striding_inputs_desc) \
310
+ _(attr, striding_outputs_desc) \
311
+ _(attr, broadcast) \
312
+ _(attr, direction) \
313
+ _(attr, ends) \
314
+ _(attr, inplace) \
315
+ _(attr, input_as_shape) \
316
+ _(attr, is_zero) \
317
+ _(attr, num_none) \
318
+ _(attr, num_present) \
319
+ _(attr, perm) \
320
+ _(attr, starts) \
321
+ _(attr, profiled_type) \
322
+ _(attr, transA) \
323
+ _(attr, transB) \
324
+ _(attr, name) \
325
+ _(attr, module) \
326
+ _(attr, beg) \
327
+ _(attr, idx) \
328
+ _(attr, split) \
329
+ _(attr, slot) \
330
+ _(attr, kinds) \
331
+ _(attr, types) \
332
+ _(attr, scope) \
333
+ _(attr, keepdims) \
334
+ _(attr, cache_id) \
335
+ _(attr, new_axis) \
336
+ _(attr, warn_id) \
337
+ _(attr, output_layouts) \
338
+ _(attr, allowzero) \
339
+ _(attr, seen_none) \
340
+ _(attr, overload_name) \
341
+ _(attr, node_stack_idx)
342
+
343
+ enum class _keys : unique_t {
344
+ #define DEFINE_KEY(ns, s) ns##_##s,
345
+ FORALL_NS_SYMBOLS(DEFINE_KEY)
346
+ #undef DEFINE_KEY
347
+ num_symbols
348
+ };
349
+
350
+ #define DEFINE_SYMBOL(ns, s) \
351
+ namespace ns { constexpr Symbol s(static_cast<unique_t>(_keys::ns##_##s)); }
352
+ FORALL_NS_SYMBOLS(DEFINE_SYMBOL)
353
+ #undef DEFINE_SYMBOL
354
+
355
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/TensorUtils.h>
5
+ #include <ATen/core/List.h>
6
+ #include <c10/core/TensorOptions.h>
7
+
8
+ /*
9
+ * [Note: hacky wrapper removal for optional tensor]
10
+ *
11
+ * The kernel implementation takes an optional tensor marked in the schema as
12
+ * Tensor? but the C++ function takes Tensor instead of the std::optional<Tensor>
13
+ * expected by the dispatcher.
14
+ *
15
+ * To remove the hacky wrapper, the C++ function is changed to take
16
+ * std::optional<Tensor> and unwrap the Tensor value at the beginning of
17
+ * the function, e.g.:
18
+ * > c10::MaybeOwned<Tensor> weight_maybe_owned =
19
+ * > at::borrow_from_optional_tensor(weight_opt);
20
+ * > const Tensor& weight = *weight_maybe_owned;
21
+ *
22
+ * We may want to make the kernel handle optional directly without
23
+ * going through the creation of a default-constructed Tensor in
24
+ * at::borrow_from_optional_tensor.
25
+ */
26
+
27
+ /*
28
+ * [Note: hacky wrapper removal for TensorOptions]
29
+ *
30
+ * The kernel implementation takes a TensorOptions argument but the dispatcher
31
+ * expects separate arguments for dtype, layout, device, pin_memory.
32
+ *
33
+ * To remove the hacky wrapper, the kernel implementation is changed to take
34
+ * the 4 arguments (dtype, layout, device, pin_memory), and assemble the
35
+ * TensorOptions value at the beginning of the function, e.g.:
36
+ * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
37
+ * > .device(device).pinned_memory(pin_memory);
38
+ *
39
+ * We may want make the kernel handle these parameters directly without going
40
+ * through the creation of a TensorOptions value.
41
+ */
42
+
43
+ namespace c10 {
44
+ namespace impl {
45
+
46
+ TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
47
+
48
+ inline void check_and_update_common_device(std::optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
49
+ // TODO: Remove this once the following issue is addressed:
50
+ // https://github.com/pytorch/pytorch/issues/57380
51
+ if (!tensor.defined()) {
52
+ return;
53
+ }
54
+
55
+ if (!common_device.has_value()) {
56
+ common_device = tensor.device();
57
+ return;
58
+ }
59
+
60
+ if (C10_UNLIKELY(common_device != tensor.device())) {
61
+ common_device_check_failure(*common_device, tensor, methodName, argName);
62
+ }
63
+ }
64
+
65
+ inline void check_and_update_common_device(std::optional<Device>& common_device, const std::optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
66
+ if (tensor.has_value()) {
67
+ check_and_update_common_device(common_device, tensor.value(), methodName, argName);
68
+ }
69
+ }
70
+
71
+ inline void check_and_update_common_device(std::optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
72
+ for (const auto& tensor : tensors) {
73
+ check_and_update_common_device(common_device, tensor, methodName, argName);
74
+ }
75
+ }
76
+
77
+ inline void check_and_update_common_device(std::optional<Device>& common_device, const List<std::optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
78
+ for (const auto& tensor : tensors) {
79
+ check_and_update_common_device(common_device, tensor, methodName, argName);
80
+ }
81
+ }
82
+ } // namespace impl
83
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * This file contains functionality to take a C++ function and infer its
5
+ * c10::FunctionSchema.
6
+ */
7
+
8
+ #include <ATen/core/function_schema.h>
9
+ #include <c10/util/Metaprogramming.h>
10
+
11
+ namespace c10 {
12
+ namespace detail {
13
+
14
+ namespace infer_schema {
15
+
16
+ /// The templated inference code creates `ArgumentDef` instead of `Argument`,
17
+ /// because that can be constructed at compile time and has a much smaller
18
+ /// binary size than having calls to `Argument` constructors in the template.
19
+ /// Creating `Argument` objects from `ArgumentDef` can then be done at
20
+ /// runtime in a non-templated way.
21
+ struct ArgumentDef final {
22
+ using GetTypeFn = TypePtr();
23
+ GetTypeFn* getTypeFn;
24
+ GetTypeFn* getFakeTypeFn;
25
+ constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
26
+ explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
27
+ };
28
+
29
+ template<bool V>
30
+ struct bool_t {};
31
+ template<> struct bool_t<true> : std::true_type {};
32
+ template<> struct bool_t<false> : std::false_type {};
33
+
34
+ /// Checks the static C++ types `Types` for correctness to catch common error cases.
35
+ template <class... Types>
36
+ constexpr int checkStaticTypes() {
37
+ // Give nice error messages for some of the common error cases.
38
+ // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
39
+ static_assert(std::conjunction<
40
+ bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
41
+ >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
42
+ static_assert(std::conjunction<
43
+ bool_t<!std::is_same<Types, float>::value>...
44
+ >::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
45
+ return 0;
46
+ }
47
+
48
+ template <typename... Ts, size_t... Is>
49
+ constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
50
+ return (
51
+ // Check types for common errors
52
+ checkStaticTypes<Ts...>(),
53
+
54
+ // Create the return value
55
+ std::array<ArgumentDef, sizeof...(Ts)>{
56
+ ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
57
+ );
58
+ }
59
+
60
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
61
+ /// as template arguments.
62
+ template<class ParameterTypes> struct createArguments final {};
63
+ template<class... ParameterTypes>
64
+ struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
65
+ static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
66
+ return createArgumentVectorFromTypes<ParameterTypes...>(
67
+ std::make_index_sequence<sizeof...(ParameterTypes)>()
68
+ );
69
+ }
70
+ };
71
+
72
+ /// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
73
+ /// as a tuple (i.e. in the way c10 kernels return values).
74
+ /// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
75
+ /// It can be an empty tuple<>, or void for kernels that don't return anything.
76
+ /// It can be a single type A (i.e. no tuple) for the case where a kernel just
77
+ /// returns one value.
78
+ template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
79
+
80
+ template<class... ReturnTypes>
81
+ struct createReturns<std::tuple<ReturnTypes...>, void> final {
82
+ static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
83
+ return createArgumentVectorFromTypes<ReturnTypes...>(
84
+ std::make_index_sequence<sizeof...(ReturnTypes)>()
85
+ );
86
+ }
87
+ };
88
+
89
+ template<class ReturnType>
90
+ struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
91
+ static constexpr std::array<ArgumentDef, 1> call() {
92
+ return createReturns<std::tuple<ReturnType>>::call();
93
+ }
94
+ };
95
+
96
+ template<>
97
+ struct createReturns<void, void> final {
98
+ static constexpr std::array<ArgumentDef, 0> call() {
99
+ return createReturns<std::tuple<>>::call();
100
+ }
101
+ };
102
+
103
+ template <typename ReturnType>
104
+ struct createSingleReturn {
105
+ static constexpr std::array<ArgumentDef, 1> call() {
106
+ return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
107
+ }
108
+ };
109
+
110
+ TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
111
+ TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
112
+
113
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
114
+ /// function. Flattens std::tuple returns into multiple return types
115
+ template <typename FunctionTraits>
116
+ FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
117
+ using ReturnType = typename FunctionTraits::return_type;
118
+ using ParameterTypes = typename FunctionTraits::parameter_types;
119
+
120
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
121
+ // The only code executed at runtime here is the one that creates a std::vector
122
+ // of the arguments/returns from the std::array.
123
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
124
+ constexpr auto returns = createReturns<ReturnType>::call();
125
+
126
+ return make_function_schema(arguments, returns);
127
+ }
128
+
129
+ /// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
130
+ /// function. Preserves std::tuple returns as a Tuple return type
131
+ template <typename FunctionTraits>
132
+ FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
133
+ using ReturnType = typename FunctionTraits::return_type;
134
+ using ParameterTypes = typename FunctionTraits::parameter_types;
135
+
136
+ // arguments and returns are computed into a std::array at compile time and embedded into the binary.
137
+ // The only code executed at runtime here is the one that creates a std::vector
138
+ // of the arguments/returns from the std::array.
139
+ constexpr auto arguments = createArguments<ParameterTypes>::call();
140
+ constexpr auto returns = createSingleReturn<ReturnType>::call();
141
+
142
+ return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
143
+ }
144
+
145
+ }
146
+ }
147
+
148
+ template<class FuncType>
149
+ FunctionSchema inferFunctionSchemaFlattenedReturns() {
150
+ return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
151
+ }
152
+
153
+ template<class FuncType>
154
+ FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
155
+ return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
156
+ }
157
+
158
+ TORCH_API std::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
159
+
160
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
4
+ #ifdef TEMPLATE_SELECTIVE_BUILD
5
+ #include <ATen/selected_mobile_ops.h>
6
+ #endif
7
+
8
+ /**
9
+ * This header implements functionality to build PyTorch with only a certain
10
+ * set of operators (+ dependencies) included.
11
+ *
12
+ * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
13
+ * two ops will be included in your build. The allowlist records operators
14
+ * only, no overloads; if you include aten::add, all overloads of aten::add
15
+ * will be included.
16
+ *
17
+ * Internally, this is done by removing the operator registration calls
18
+ * using compile time programming, and the linker will then prune all
19
+ * operator functions that weren't registered.
20
+ * See Note [Selective build] for more details
21
+ *
22
+ * WARNING: The allowlist mechanism doesn't work for all ways you could go about
23
+ * registering an operator. If the dispatch key / operator name is not
24
+ * sufficiently obvious at compile time, then the allowlisting mechanism
25
+ * will fail (and the operator will be included in the binary anyway).
26
+ */
27
+
28
+ #include <c10/util/string_view.h>
29
+ #include <c10/core/DispatchKey.h>
30
+ #include <c10/macros/Macros.h>
31
+
32
+
33
+ #if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
34
+ #include <ATen/record_function.h>
35
+ #endif
36
+
37
+ namespace c10 {
38
+
39
+ namespace impl {
40
+
41
+ constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
42
+
43
+ /**
44
+ * In selective build mode returns true/false depending on whether a build
45
+ * feature is available or not.
46
+ *
47
+ * In instrumenting mode (tracing mode), always returns true, and doesn't
48
+ * trigger any side effects.
49
+ */
50
+ constexpr bool is_build_feature_available(const char* name) {
51
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
52
+ // Selective Build mode.
53
+ #if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
54
+ (void)name;
55
+ return true;
56
+ #else
57
+ return allowlist_contains(
58
+ C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
59
+ name);
60
+ #endif
61
+
62
+ #else
63
+ // Instrumenting mode.
64
+ (void)name;
65
+ return true;
66
+ #endif
67
+ }
68
+
69
+ [[noreturn]] void build_feature_required_feature_not_available(const char* feature);
70
+
71
+ /**
72
+ * Use BUILD_FEATURE_REQUIRED macro in user-code.
73
+ *
74
+ * In selective build mode becomes a no-op if the build feature passed
75
+ * in is available. If not available, throws an exception (c10::Error).
76
+ * The compiler is able to perform dead code elimination for code
77
+ * following this method if the build feature is not available.
78
+ *
79
+ * In instrumenting mode (tracing mode), registers (as a side effect)
80
+ * the presence of this specific build feature being triggered.
81
+ */
82
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
83
+
84
+ #if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
85
+ #define BUILD_FEATURE_REQUIRED(NAME) \
86
+ if (!c10::impl::is_build_feature_available(NAME)) { \
87
+ ::c10::impl::build_feature_required_feature_not_available(NAME); \
88
+ }
89
+ #else // Everything trivially selected
90
+ #define BUILD_FEATURE_REQUIRED(NAME)
91
+
92
+ #endif
93
+
94
+ #else // trace mode
95
+ #define BUILD_FEATURE_REQUIRED(NAME) \
96
+ RECORD_FUNCTION_WITH_SCOPE( \
97
+ at::RecordScope::BUILD_FEATURE, \
98
+ std::string(NAME), \
99
+ {});
100
+ #endif
101
+
102
+ // Use this macro, and not is_build_feature_available
103
+ #define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
104
+
105
+ // returns true iff allowlist contains item
106
+ // allowlist_contains("a;bc;d", "bc") == true
107
+ constexpr bool allowlist_contains(string_view allowlist, string_view item) {
108
+ //Choose a really big value for next so that if something goes wrong
109
+ //this code will blow up in a hopefully detectable way.
110
+ size_t next = std::numeric_limits<size_t>::max();
111
+ for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
112
+ next = allowlist.find(';', cur);
113
+ if (next != string_view::npos) {
114
+ if (allowlist.substr(cur, next - cur).compare(item) == 0) {
115
+ return true;
116
+ }
117
+ next++;
118
+ } else {
119
+ if (allowlist.substr(cur).compare(item) == 0) {
120
+ return true;
121
+ }
122
+ break;
123
+ }
124
+ }
125
+ return false;
126
+ }
127
+
128
+ // Returns true iff the given op name is on the allowlist
129
+ // and should be registered
130
+ constexpr bool op_allowlist_check(string_view op_name [[maybe_unused]]) {
131
+ assert(op_name.find("::") != string_view::npos);
132
+ // Use assert() instead of throw() due to a gcc bug. See:
133
+ // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
134
+ // https://github.com/fmtlib/fmt/issues/682
135
+ assert(op_name.find("(") == string_view::npos);
136
+ #if !defined(TORCH_OPERATOR_WHITELIST)
137
+ // If the TORCH_OPERATOR_WHITELIST parameter is not defined,
138
+ // all ops are to be registered
139
+ return true;
140
+ #else
141
+ return allowlist_contains(
142
+ C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
143
+ // This function is majorly used for mobile selective build with
144
+ // root operators, where the overload is included in the allowlist.
145
+ op_name);
146
+ // // Strip overload name (as allowlist doesn't contain overloads)
147
+ // // Another function based on this may be added when there's usage
148
+ // // on op names without overload.
149
+ // OperatorNameView::parse(op_name).name);
150
+ #endif
151
+ }
152
+
153
+ // Returns true iff the given schema string is on the allowlist
154
+ // and should be registered
155
+ constexpr bool schema_allowlist_check(string_view schema) {
156
+ #if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
157
+ return true;
158
+ #else
159
+ return op_allowlist_check(schema.substr(0, schema.find("(")));
160
+ #endif
161
+ }
162
+
163
+ // Returns true iff the given custom class name is on the allowlist
164
+ // and should be registered
165
+ constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
166
+ #if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
167
+ // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
168
+ // all custom classes are to be registered
169
+ (void)custom_class_name;
170
+ return true;
171
+ #else
172
+ return allowlist_contains(
173
+ C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
174
+ custom_class_name);
175
+ #endif
176
+ }
177
+
178
+ // schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
179
+ // Add this API to pass arbitrary allowlist.
180
+ constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
181
+ return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
182
+ }
183
+
184
+ // Returns true iff the given dispatch key is on the allowlist
185
+ // and should be registered. When we turn this on, the list of valid
186
+ // mobile dispatch keys is hard coded (but you need to make sure
187
+ // that you have the correct set of dispatch keys for this).
188
+ constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
189
+ #ifdef C10_MOBILE
190
+ return true;
191
+ // Disabled for now: to be enabled later!
192
+ // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
193
+ #else
194
+ return true;
195
+ #endif
196
+ }
197
+
198
+ } // namespace impl
199
+ } // namespace c10
videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /**
4
+ * Include this file if you want to register operators. It includes all
5
+ * functionality needed to do so for you.
6
+ */
7
+
8
+ #include <c10/core/DispatchKey.h>
9
+ #include <c10/core/DispatchKeySet.h>
10
+ #include <c10/core/CompileTimeFunctionPointer.h>
11
+ #include <ATen/core/boxing/KernelFunction.h>
12
+ #include <ATen/core/dispatch/CppSignature.h>
13
+ #include <ATen/core/dispatch/RegistrationHandleRAII.h>
14
+ #include <ATen/core/op_registration/infer_schema.h>
15
+ #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
16
+ #include <torch/csrc/jit/frontend/function_schema_parser.h>
17
+ #endif
18
+ #include <ATen/core/ATenOpList.h>
19
+
20
+ namespace c10 {
21
+
22
+ namespace detail {
23
+ // The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
24
+ // We do this because every argument in a function schema is expected to be convertable
25
+ // to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
26
+ // See Note [Plumbing Keys Through The Dispatcher]
27
+ template<class KernelFunctor>
28
+ std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
29
+ using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
30
+ return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
31
+ }
32
+ }
33
+
34
+ /**
35
+ * An instance of this class handles the registration for one or more operators.
36
+ * Make sure you keep the RegisterOperators instance around since it will
37
+ * deregister the operator it's responsible for in its destructor.
38
+ *
39
+ * Example:
40
+ *
41
+ * > namespace {
42
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
43
+ * > public:
44
+ * > Tensor operator()(Tensor a, Tensor b) {...}
45
+ * > };
46
+ * > }
47
+ * >
48
+ * > static auto registry = c10::RegisterOperators()
49
+ * > .op(c10::RegisterOperators::options()
50
+ * > .schema("my_op")
51
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
52
+ */
53
+ class TORCH_API RegisterOperators final {
54
+ public:
55
+ RegisterOperators() = default;
56
+ ~RegisterOperators() = default;
57
+
58
+ RegisterOperators(const RegisterOperators&) = delete;
59
+ RegisterOperators& operator=(const RegisterOperators&) = delete;
60
+ RegisterOperators(RegisterOperators&&) noexcept = default;
61
+ RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
62
+
63
+ class TORCH_API Options final {
64
+ public:
65
+ Options(const Options&) = delete;
66
+ Options(Options&&) noexcept = delete;
67
+ Options& operator=(const Options&) = delete;
68
+ Options& operator=(Options&&) noexcept = delete;
69
+
70
+ // internal-only for registering stack based kernels
71
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
72
+ Options&& kernel(DispatchKey dispatch_key) && {
73
+ return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), std::nullopt, nullptr);
74
+ }
75
+
76
+ // internal-only for registering stack based catch-all kernels
77
+ template<KernelFunction::BoxedKernelFunction* kernel_func>
78
+ Options&& catchAllKernel() && {
79
+ return std::move(*this).kernel(std::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), std::nullopt, nullptr);
80
+ }
81
+
82
+ // internal only for registering caffe2 ops
83
+ Options&& schema(FunctionSchema&& schema) {
84
+ TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
85
+ schemaOrName_ = FunctionSchema(std::move(schema));
86
+ return std::move(*this);
87
+ }
88
+
89
+ /**
90
+ * Use this to specify the schema for an operator. You can also specify
91
+ * the operator name only to have the function signature part of the
92
+ * schema be inferred from the kernel function.
93
+ *
94
+ * Example:
95
+ *
96
+ * > // Infer function signature from my_kernel_cpu
97
+ * > static auto registry = c10::RegisterOperators()
98
+ * > .op(c10::RegisterOperators::options()
99
+ * > .schema("my_op")
100
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
101
+ * >
102
+ * >
103
+ * > // Explicitly specify full schema
104
+ * > static auto registry = c10::RegisterOperators()
105
+ * > .op(c10::RegisterOperators::options()
106
+ * > .schema("my_op(Tensor a) -> Tensor")
107
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
108
+ */
109
+ Options&& schema(const std::string& schemaOrName) {
110
+ TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
111
+
112
+ #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
113
+ throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
114
+ #else
115
+ schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
116
+ #endif
117
+
118
+ return std::move(*this);
119
+ }
120
+
121
+ /**
122
+ * Use this to register an operator whose kernel is implemented as a functor.
123
+ * The kernel is only called for inputs matching the given dispatch key.
124
+ * You can register multiple kernels for different dispatch keys.
125
+ *
126
+ * Example:
127
+ *
128
+ * > namespace {
129
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
130
+ * > public:
131
+ * > Tensor operator()(Tensor a, Tensor b) {...}
132
+ * > };
133
+ * > }
134
+ * >
135
+ * > static auto registry = c10::RegisterOperators()
136
+ * > .op(c10::RegisterOperators::options()
137
+ * > .schema("my_op")
138
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU));
139
+ *
140
+ * The functor constructor can take arguments to configure the kernel.
141
+ * The arguments are defined in the kernel registration.
142
+ * Example:
143
+ *
144
+ * > namespace {
145
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
146
+ * > public:
147
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
148
+ * > : ... {...}
149
+ * >
150
+ * > Tensor operator()(Tensor a, Tensor b) {...}
151
+ * > };
152
+ * > }
153
+ * >
154
+ * > static auto registry = c10::RegisterOperators()
155
+ * > .op(c10::RegisterOperators::options()
156
+ * > .schema("my_op")
157
+ * > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
158
+ */
159
+ template<class KernelFunctor, class... ConstructorParameters>
160
+ // enable_if: only enable it if KernelFunctor is actually a functor
161
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
162
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
163
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
164
+
165
+ return std::move(*this).kernel(
166
+ dispatch_key,
167
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
168
+ impl::CppSignature::make<KernelFunctor>(),
169
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
170
+ );
171
+ }
172
+
173
+ /**
174
+ * Use this to register an operator whose kernel is implemented as a functor.
175
+ * The kernel is a catch-all kernel, meaning it's called independent from
176
+ * the input. Dispatch is disabled for this operator.
177
+ *
178
+ * Example:
179
+ *
180
+ * > namespace {
181
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
182
+ * > public:
183
+ * > Tensor operator()(Tensor a, Tensor b) {...}
184
+ * > };
185
+ * > }
186
+ * >
187
+ * > static auto registry = c10::RegisterOperators()
188
+ * > .op(c10::RegisterOperators::options()
189
+ * > .schema("my_op")
190
+ * > .catchAllKernel<my_kernel_cpu>());
191
+ *
192
+ * The functor constructor can take arguments to configure the kernel.
193
+ * The arguments are defined in the kernel registration.
194
+ * Example:
195
+ *
196
+ * > namespace {
197
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
198
+ * > public:
199
+ * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
200
+ * > : ... {...}
201
+ * >
202
+ * > Tensor operator()(Tensor a, Tensor b) {...}
203
+ * > };
204
+ * > }
205
+ * >
206
+ * > static auto registry = c10::RegisterOperators()
207
+ * > .op(c10::RegisterOperators::options()
208
+ * > .schema("my_op")
209
+ * > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
210
+ */
211
+ template<class KernelFunctor, class... ConstructorParameters>
212
+ // enable_if: only enable it if KernelFunctor is actually a functor
213
+ std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
214
+ static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
215
+ static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
216
+
217
+ return std::move(*this).kernel(
218
+ std::nullopt,
219
+ KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
220
+ impl::CppSignature::make<KernelFunctor>(),
221
+ detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
222
+ );
223
+ }
224
+
225
+ /**
226
+ * Use this to register an operator whose kernel is implemented by a function.
227
+ * The kernel is only called for inputs matching the given dispatch key.
228
+ * You can register multiple kernels for different dispatch keys.
229
+ *
230
+ * Example:
231
+ *
232
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
233
+ * >
234
+ * > static auto registry = c10::RegisterOperators()
235
+ * > .op(c10::RegisterOperators::options()
236
+ * > .schema("my_op")
237
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
238
+ */
239
+ template<class FuncType, FuncType* kernel_func>
240
+ // enable_if: only enable it if FuncType is actually a function
241
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
242
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
243
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
244
+
245
+ return std::move(*this).kernel(
246
+ dispatch_key,
247
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
248
+ impl::CppSignature::make<FuncType>(),
249
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
250
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
251
+ );
252
+ }
253
+
254
+ /**
255
+ * Use this to register an operator whose kernel is implemented by a function.
256
+ * The kernel is a catch-all kernel, meaning it's called independent from
257
+ * the input. Dispatch is disabled for this operator.
258
+ *
259
+ * Example:
260
+ *
261
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
262
+ * >
263
+ * > static auto registry = c10::RegisterOperators()
264
+ * > .op(c10::RegisterOperators::options()
265
+ * > .schema("my_op")
266
+ * > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
267
+ */
268
+ template<class FuncType, FuncType* kernel_func>
269
+ // enable_if: only enable it if FuncType is actually a function
270
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
271
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
272
+ static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
273
+
274
+ return std::move(*this).kernel(
275
+ std::nullopt,
276
+ KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
277
+ impl::CppSignature::make<FuncType>(),
278
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
279
+ detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
280
+ );
281
+ }
282
+
283
+ template<class FuncType>
284
+ // enable_if: only enable it if FuncType is actually a function
285
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
286
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
287
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
288
+
289
+ return std::move(*this).kernel(
290
+ dispatch_key,
291
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
292
+ impl::CppSignature::make<FuncType>(),
293
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
294
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
295
+ );
296
+ }
297
+
298
+ template<class FuncType>
299
+ // enable_if: only enable it if FuncType is actually a function
300
+ std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
301
+ static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
302
+ TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
303
+
304
+ return std::move(*this).kernel(
305
+ std::nullopt,
306
+ KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
307
+ impl::CppSignature::make<FuncType>(),
308
+ // TODO Do schema inference without relying on WrapFunctionIntoFunctor
309
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
310
+ );
311
+ }
312
+
313
+ /**
314
+ * Use this to register an operator whose kernel is implemented as a lambda.
315
+ * The kernel is only called for inputs matching the given dispatch key.
316
+ * You can register multiple kernels for different dispatch keys.
317
+ *
318
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
319
+ * needs to store some configuration parameters, write the kernel as a
320
+ * functor instead.
321
+ *
322
+ * Example:
323
+ *
324
+ * > static auto registry = c10::RegisterOperators()
325
+ * > .op(c10::RegisterOperators::options()
326
+ * > .schema("my_op")
327
+ * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
328
+ */
329
+ template<class Lambda>
330
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
331
+ std::enable_if_t<
332
+ guts::is_functor<std::decay_t<Lambda>>::value
333
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
334
+ Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
335
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
336
+
337
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
338
+ // behavior would be nonobvious. A functor kernel with cache gets a new instance of
339
+ // its cache each time the kernel is looked up from the dispatch table.
340
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
341
+ // So, instead of making users having to think about it (including the thread-safety
342
+ // issues this causes), let's just forbid stateful lambdas altogether.
343
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
344
+
345
+ return std::move(*this).kernel(
346
+ dispatch_key,
347
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
348
+ impl::CppSignature::make<Lambda>(),
349
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
350
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
351
+ );
352
+ }
353
+
354
+ /**
355
+ * Use this to register an operator whose kernel is implemented as a lambda.
356
+ * The kernel is a catch-all kernel, meaning it's called independent from
357
+ * the input. Dispatch is disabled for this operator.
358
+ *
359
+ * The lambda must be stateless, i.e. not have a capture. If your kernel
360
+ * needs to store some configuration parameters, write the kernel as a
361
+ * functor instead.
362
+ *
363
+ * Example:
364
+ *
365
+ * > static auto registry = c10::RegisterOperators()
366
+ * > .op(c10::RegisterOperators::options()
367
+ * > .schema("my_op")
368
+ * > .catchAllKernel([] (Tensor a) -> Tensor {...}));
369
+ */
370
+ template<class Lambda>
371
+ // enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
372
+ std::enable_if_t<
373
+ guts::is_functor<std::decay_t<Lambda>>::value
374
+ && !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
375
+ Options&&> catchAllKernel(Lambda&& lambda) && {
376
+ static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
377
+
378
+ // We don't support stateful lambdas (i.e. lambdas with a capture), because their
379
+ // behavior would be nonobvious.
380
+ // A lambda with a capture would be global and share its capture between all kernel lookups.
381
+ // This would be a likely source for unexpected race conditions, so we forbid it.
382
+ // If a kernel really needs global state, they can just have regular global state
383
+ // in their .cpp file next to the kernel lambda.
384
+ static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
385
+
386
+ return std::move(*this).kernel(
387
+ std::nullopt,
388
+ KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
389
+ impl::CppSignature::make<Lambda>(),
390
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
391
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
392
+ );
393
+ }
394
+
395
+ Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
396
+ TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
397
+ aliasAnalysisKind_ = aliasAnalysisKind;
398
+ return std::move(*this);
399
+ }
400
+
401
+ private:
402
+ Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
403
+ KernelRegistrationConfig config;
404
+ config.dispatch_key = dispatch_key;
405
+ config.func = std::move(func);
406
+ config.cpp_signature = cpp_signature;
407
+ config.inferred_function_schema = std::move(inferred_function_schema);
408
+ kernels.push_back(std::move(config));
409
+ return std::move(*this);
410
+ }
411
+
412
+ Options()
413
+ : schemaOrName_(std::nullopt)
414
+ , kernels()
415
+ , aliasAnalysisKind_(std::nullopt)
416
+ {}
417
+
418
+ // KernelRegistrationConfig accumulates all information from the config
419
+ // parameters passed to a RegisterOperators::op() call into one object.
420
+ struct KernelRegistrationConfig final {
421
+ KernelRegistrationConfig()
422
+ : dispatch_key(std::nullopt)
423
+ , func()
424
+ , cpp_signature(std::nullopt)
425
+ , inferred_function_schema(nullptr)
426
+ {}
427
+
428
+ std::optional<DispatchKey> dispatch_key;
429
+ KernelFunction func;
430
+ std::optional<impl::CppSignature> cpp_signature;
431
+ std::unique_ptr<FunctionSchema> inferred_function_schema;
432
+ };
433
+
434
+ std::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
435
+
436
+ std::vector<KernelRegistrationConfig> kernels;
437
+ std::optional<AliasAnalysisKind> aliasAnalysisKind_;
438
+ friend class RegisterOperators;
439
+ friend class Library;
440
+ };
441
+
442
+ /**
443
+ * Call this to get an instance of registration options, which
444
+ * can be passed to a call to RegisterOperators::op() to specify
445
+ * these options for the operator registration.
446
+ * See class doc comment for examples.
447
+ */
448
+ static Options options() {
449
+ return {};
450
+ }
451
+
452
+ /**
453
+ * Call this to register an operator. See class doc comment for examples.
454
+ */
455
+ RegisterOperators&& op(Options&& options) && {
456
+ checkSchemaAndRegisterOp_(std::move(options));
457
+ return std::move(*this);
458
+ }
459
+
460
+ // Regular mutator version of the && version above
461
+ RegisterOperators& op(Options&& options) & {
462
+ checkSchemaAndRegisterOp_(std::move(options));
463
+ return *this;
464
+ }
465
+
466
+ /**
467
+ * This is a shorthand for RegisterOperators::op(Options) where you can
468
+ * specify the operator schema outside of the options parameter.
469
+ * See class doc comment for examples.
470
+ */
471
+ RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
472
+ return std::move(*this).op(std::move(options).schema(schemaOrName));
473
+ }
474
+
475
+ // internal only for registering caffe2 ops
476
+ RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
477
+ return std::move(*this).op(std::move(options).schema(std::move(schema)));
478
+ }
479
+
480
+ template<class FuncType>
481
+ explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
482
+ : RegisterOperators() {
483
+ std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
484
+ }
485
+
486
+ /**
487
+ * This API registers an operator based on a kernel function pointer.
488
+ *
489
+ * Given a kernel
490
+ *
491
+ * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
492
+ *
493
+ * This API looks like:
494
+ *
495
+ * > static auto registry = c10::RegisterOperators()
496
+ * > .op("my_op", &my_kernel_cpu);
497
+ *
498
+ * If your kernel is small and the overhead of calling it matters,
499
+ * then this API might be the wrong choice since the following API
500
+ * has a slightly lower overhead for calling into the kernel:
501
+ *
502
+ * > static auto registry = c10::RegisterOperators()
503
+ * > .op("my_op", c10::RegisterOperators::options()
504
+ * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
505
+ *
506
+ * Or, alternatively, write your kernel as a functor:
507
+ *
508
+ * > namespace {
509
+ * > class my_kernel_cpu final : public c10::OperatorKernel {
510
+ * > public:
511
+ * > Tensor operator()(Tensor a, Tensor b) {...}
512
+ * > };
513
+ * > }
514
+ * >
515
+ * > static auto registry = c10::RegisterOperators()
516
+ * > .op("my_op", c10::RegisterOperators::options()
517
+ * > .kernel<my_kernel_cpu>());
518
+ */
519
+ template<class FuncType>
520
+ // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
521
+ std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
522
+ op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
523
+ constexpr bool AllowLegacyTypes = true;
524
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
525
+ std::nullopt,
526
+ KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
527
+ impl::CppSignature::make<FuncType>(),
528
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
529
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
530
+ ));
531
+ }
532
+
533
+ /**
534
+ * This API registers an operator based on a kernel lambda.
535
+ *
536
+ * This API looks like:
537
+ *
538
+ * > static auto registry = c10::RegisterOperators()
539
+ * > .op("my_op", [] (Tensor a, Tensor b) {...});
540
+ *
541
+ * This is equivalent to:
542
+ *
543
+ * > static auto registry = c10::RegisterOperators()
544
+ * > .op("my_op", c10::RegisterOperators::options()
545
+ * > .catchAllKernel([] (Tensor a, Tensor b) {...}));
546
+ *
547
+ */
548
+ template<class Lambda>
549
+ // enable_if: only enable it if Lambda is actually a stateless lambda
550
+ std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
551
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
552
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
553
+
554
+ constexpr bool AllowLegacyTypes = true;
555
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
556
+ std::nullopt,
557
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
558
+ impl::CppSignature::make<Lambda>(),
559
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
560
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
561
+ ));
562
+ }
563
+
564
+ template<class Lambda>
565
+ C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
566
+ // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
567
+ std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
568
+ op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
569
+ static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
570
+
571
+ constexpr bool AllowLegacyTypes = true;
572
+ return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
573
+ std::nullopt,
574
+ KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
575
+ impl::CppSignature::make<Lambda>(),
576
+ // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
577
+ detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
578
+ ));
579
+ }
580
+
581
+ private:
582
+ void checkSchemaAndRegisterOp_(Options&& config);
583
+
584
+ static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
585
+ void checkNoDuplicateKernels_(const Options& options);
586
+ void registerOp_(Options&& options);
587
+
588
+ std::vector<RegistrationHandleRAII> registrars_;
589
+ };
590
+
591
+ } // namespace c10
592
+
593
+ namespace torch {
594
+ // Old-style API
595
+ using RegisterOperators = c10::RegisterOperators;
596
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// Flush-To-Zero and Denormals-Are-Zero mode
2
+ ///
3
+ /// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
4
+ /// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
5
+ /// and some x86 CPUs. They result in reduced precision for values near zero,
6
+ /// but increased performance.
7
+ ///
8
+ /// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
9
+
10
+ namespace at::cpu {
11
+
12
+ bool set_flush_denormal(bool on);
13
+
14
+ } // namespace at::cpu
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ #include <c10/macros/Export.h>
6
+
7
+ namespace at::cpu {
8
+
9
+ TORCH_API bool is_avx2_supported();
10
+ TORCH_API bool is_avx512_supported();
11
+
12
+ // Detect if CPU support Vector Neural Network Instruction.
13
+ TORCH_API bool is_avx512_vnni_supported();
14
+
15
+ // Detect if CPU supports AVX512_BF16 ISA
16
+ TORCH_API bool is_avx512_bf16_supported();
17
+
18
+ // Detect if CPU support Advanced Matrix Extension.
19
+ TORCH_API bool is_amx_tile_supported();
20
+
21
+ // Enable the system to use AMX instructions.
22
+ TORCH_API bool init_amx();
23
+
24
+ // Get the L1 cache size per core in Byte
25
+ TORCH_API uint32_t L1d_cache_size();
26
+
27
+ // Get the L2 cache size per core in Byte
28
+ TORCH_API uint32_t L2_cache_size();
29
+
30
+ } // namespace at::cpu
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/functional_base.h>
4
+ #include <ATen/cpu/vec/functional_bfloat16.h>
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::vec {
10
+
11
+ // slow path
12
+ template <typename scalar_t, typename Op>
13
+ inline scalar_t vec_reduce_all(
14
+ const Op& vec_fun,
15
+ vec::Vectorized<scalar_t> acc_vec,
16
+ int64_t size) {
17
+ using Vec = vec::Vectorized<scalar_t>;
18
+ scalar_t acc_arr[Vec::size()];
19
+ acc_vec.store(acc_arr);
20
+ for (const auto i : c10::irange(1, size)) {
21
+ std::array<scalar_t, Vec::size()> acc_arr_next = {0};
22
+ acc_arr_next[0] = acc_arr[i];
23
+ Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
24
+ acc_vec = vec_fun(acc_vec, acc_vec_next);
25
+ }
26
+ acc_vec.store(acc_arr);
27
+ return acc_arr[0];
28
+ }
29
+
30
+ template <typename scalar_t, typename Op>
31
+ struct VecReduceAllSIMD {
32
+ static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
33
+ return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
34
+ }
35
+ };
36
+
37
+ #if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
38
+ #if defined(CPU_CAPABILITY_AVX2)
39
+ template <typename Op>
40
+ struct VecReduceAllSIMD<float, Op> {
41
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
42
+ using Vec = Vectorized<float>;
43
+ Vec v = acc_vec;
44
+ // 128-bit shuffle
45
+ Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
46
+ v = vec_fun(v, v1);
47
+ // 64-bit shuffle
48
+ v1 = _mm256_shuffle_ps(v, v, 0x4E);
49
+ v = vec_fun(v, v1);
50
+ // 32-bit shuffle
51
+ v1 = _mm256_shuffle_ps(v, v, 0xB1);
52
+ v = vec_fun(v, v1);
53
+ return _mm256_cvtss_f32(v);
54
+ }
55
+ };
56
+ #endif // defined(CPU_CAPABILITY_AVX2)
57
+ #if defined(CPU_CAPABILITY_AVX512)
58
+ template <typename Op>
59
+ struct VecReduceAllSIMD<float, Op> {
60
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
61
+ using Vec = Vectorized<float>;
62
+ Vec v = acc_vec;
63
+ // 256-bit shuffle
64
+ Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
65
+ v = vec_fun(v, v1);
66
+ // 128-bit shuffle
67
+ v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
68
+ v = vec_fun(v, v1);
69
+ // 64-bit shuffle
70
+ v1 = _mm512_shuffle_ps(v, v, 0x4E);
71
+ v = vec_fun(v, v1);
72
+ // 32-bit shuffle
73
+ v1 = _mm512_shuffle_ps(v, v, 0xB1);
74
+ v = vec_fun(v, v1);
75
+ return _mm512_cvtss_f32(v);
76
+ }
77
+ };
78
+ #endif // defined(CPU_CAPABILITY_AVX512)
79
+ #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
80
+
81
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
82
+ template <typename Op>
83
+ struct VecReduceAllSIMD<float, Op> {
84
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
85
+ using Vec = Vectorized<float>;
86
+ Vec v = acc_vec;
87
+
88
+ // 128-bit shuffle: [a1, a2, a3, a4, a5, a6, a7, a8] -> [a5, a6, a7, a8, a1, a2, a3, a4]
89
+ Vec v1 = {v.get_high(), v.get_low()};
90
+ // [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] ('+' stands for the reduction function. Note that the last 4 elements are not required)
91
+ v = vec_fun(v, v1);
92
+
93
+ // 64-bit shuffle: [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] -> [a3+a7, a4+a8, a1+a5, a2+a6, -, -, -, -]
94
+ float32x4_t v1_1 = vextq_f32(v.get_low(), v.get_low(), 2);
95
+ v1 = {v1_1, v1_1};
96
+ // [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -]
97
+ v = vec_fun(v, v1);
98
+
99
+ // 32-bit shuffle: [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] -> [a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, -, -, -, -]
100
+ v1_1 = vrev64q_f32(v.get_low());
101
+ v1 = {v1_1, v1_1};
102
+ // [a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, -, -, -, -]
103
+ v = vec_fun(v, v1);
104
+
105
+ return v.get_low()[0];
106
+ }
107
+ };
108
+ #endif // defined(__aarch64__)
109
+
110
+ template <typename scalar_t, typename Op>
111
+ inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
112
+ return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
113
+ }
114
+
115
+ template <typename scalar_t, typename Op,
116
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
117
+ inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
118
+ using Vec = vec::Vectorized<scalar_t>;
119
+ if (size < Vec::size())
120
+ return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
121
+ int64_t d = Vec::size();
122
+ Vec acc_vec = Vec::loadu(data);
123
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
124
+ Vec data_vec = Vec::loadu(data + d);
125
+ acc_vec = vec_fun(acc_vec, data_vec);
126
+ }
127
+ if (size - d > 0) {
128
+ Vec data_vec = Vec::loadu(data + d, size - d);
129
+ acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
130
+ }
131
+ return vec_reduce_all(vec_fun, acc_vec);
132
+ }
133
+
134
+ // similar to reduce_all, but reduces into two outputs
135
+ template <typename scalar_t, typename Op1, typename Op2,
136
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
137
+ inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
138
+ const scalar_t* data, int64_t size) {
139
+ using Vec = vec::Vectorized<scalar_t>;
140
+ if (size < Vec::size()) {
141
+ auto loaded_data = Vec::loadu(data, size);
142
+ return std::pair<scalar_t, scalar_t>(
143
+ vec_reduce_all(vec_fun1, loaded_data, size),
144
+ vec_reduce_all(vec_fun2, loaded_data, size));
145
+ }
146
+ int64_t d = Vec::size();
147
+ Vec acc_vec1 = Vec::loadu(data);
148
+ Vec acc_vec2 = Vec::loadu(data);
149
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
150
+ Vec data_vec = Vec::loadu(data + d);
151
+ acc_vec1 = vec_fun1(acc_vec1, data_vec);
152
+ acc_vec2 = vec_fun2(acc_vec2, data_vec);
153
+ }
154
+ if (size - d > 0) {
155
+ Vec data_vec = Vec::loadu(data + d, size - d);
156
+ acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
157
+ acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
158
+ }
159
+ return std::pair<scalar_t, scalar_t>(
160
+ vec_reduce_all(vec_fun1, acc_vec1),
161
+ vec_reduce_all(vec_fun2, acc_vec2));
162
+ }
163
+
164
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
165
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
166
+ inline scalar_t map_reduce_all(
167
+ const MapOp& map_fun,
168
+ const ReduceOp& red_fun,
169
+ const scalar_t* data,
170
+ int64_t size) {
171
+ using Vec = vec::Vectorized<scalar_t>;
172
+ if (size < Vec::size())
173
+ return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
174
+ int64_t d = Vec::size();
175
+ Vec acc_vec = map_fun(Vec::loadu(data));
176
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
177
+ Vec data_vec = Vec::loadu(data + d);
178
+ data_vec = map_fun(data_vec);
179
+ acc_vec = red_fun(acc_vec, data_vec);
180
+ }
181
+ if (size - d > 0) {
182
+ Vec data_vec = Vec::loadu(data + d, size - d);
183
+ data_vec = map_fun(data_vec);
184
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
185
+ }
186
+ return vec_reduce_all(red_fun, acc_vec);
187
+ }
188
+
189
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
190
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
191
+ inline scalar_t map2_reduce_all(
192
+ const MapOp& map_fun,
193
+ const ReduceOp& red_fun,
194
+ const scalar_t* data,
195
+ const scalar_t* data2,
196
+ int64_t size) {
197
+ using Vec = vec::Vectorized<scalar_t>;
198
+ if (size < Vec::size()) {
199
+ Vec data_vec = Vec::loadu(data, size);
200
+ Vec data2_vec = Vec::loadu(data2, size);
201
+ data_vec = map_fun(data_vec, data2_vec);
202
+ return vec_reduce_all(red_fun, data_vec, size);
203
+ }
204
+ int64_t d = Vec::size();
205
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
206
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
207
+ Vec data_vec = Vec::loadu(data + d);
208
+ Vec data2_vec = Vec::loadu(data2 + d);
209
+ data_vec = map_fun(data_vec, data2_vec);
210
+ acc_vec = red_fun(acc_vec, data_vec);
211
+ }
212
+ if (size - d > 0) {
213
+ Vec data_vec = Vec::loadu(data + d, size - d);
214
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
215
+ data_vec = map_fun(data_vec, data2_vec);
216
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
217
+ }
218
+ return vec_reduce_all(red_fun, acc_vec);
219
+ }
220
+
221
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
222
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
223
+ inline scalar_t map3_reduce_all(
224
+ const MapOp& map_fun,
225
+ const ReduceOp& red_fun,
226
+ const scalar_t* data,
227
+ const scalar_t* data2,
228
+ const scalar_t* data3,
229
+ int64_t size) {
230
+ using Vec = vec::Vectorized<scalar_t>;
231
+ if (size < Vec::size()) {
232
+ Vec data_vec = Vec::loadu(data, size);
233
+ Vec data2_vec = Vec::loadu(data2, size);
234
+ Vec data3_vec = Vec::loadu(data3, size);
235
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
236
+ return vec_reduce_all(red_fun, data_vec, size);
237
+ }
238
+
239
+ int64_t d = Vec::size();
240
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
241
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
242
+ Vec data_vec = Vec::loadu(data + d);
243
+ Vec data2_vec = Vec::loadu(data2 + d);
244
+ Vec data3_vec = Vec::loadu(data3 + d);
245
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
246
+ acc_vec = red_fun(acc_vec, data_vec);
247
+ }
248
+ if (size - d > 0) {
249
+ Vec data_vec = Vec::loadu(data + d, size - d);
250
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
251
+ Vec data3_vec = Vec::loadu(data3 + d, size - d);
252
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
253
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
254
+ }
255
+ return vec_reduce_all(red_fun, acc_vec);
256
+ }
257
+
258
+ template <typename scalar_t, typename Op,
259
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
260
+ inline void map(
261
+ const Op& vec_fun,
262
+ scalar_t* output_data,
263
+ const scalar_t* input_data,
264
+ int64_t size) {
265
+ using Vec = vec::Vectorized<scalar_t>;
266
+ int64_t d = 0;
267
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
268
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d));
269
+ output_vec.store(output_data + d);
270
+ }
271
+ if (size - d > 0) {
272
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
273
+ output_vec.store(output_data + d, size - d);
274
+ }
275
+ }
276
+
277
+ template <typename scalar_t, typename Op,
278
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
279
+ inline void map2(
280
+ const Op& vec_fun,
281
+ scalar_t* output_data,
282
+ const scalar_t* input_data,
283
+ const scalar_t* input_data2,
284
+ int64_t size) {
285
+ using Vec = vec::Vectorized<scalar_t>;
286
+ int64_t d = 0;
287
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
288
+ Vec data_vec = Vec::loadu(input_data + d);
289
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
290
+ Vec output_vec = vec_fun(data_vec, data_vec2);
291
+ output_vec.store(output_data + d);
292
+ }
293
+ if (size - d > 0) {
294
+ Vec data_vec = Vec::loadu(input_data + d, size - d);
295
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
296
+ Vec output_vec = vec_fun(data_vec, data_vec2);
297
+ output_vec.store(output_data + d, size - d);
298
+ }
299
+ }
300
+
301
+ template <typename scalar_t, typename Op,
302
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
303
+ inline void map3(
304
+ const Op& vec_fun,
305
+ scalar_t* output_data,
306
+ const scalar_t* input_data1,
307
+ const scalar_t* input_data2,
308
+ const scalar_t* input_data3,
309
+ int64_t size) {
310
+ using Vec = vec::Vectorized<scalar_t>;
311
+ int64_t d = 0;
312
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
313
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
314
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
315
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
316
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
317
+ output_vec.store(output_data + d);
318
+ }
319
+ if (size - d > 0) {
320
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
321
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
322
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
323
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
324
+ output_vec.store(output_data + d, size - d);
325
+ }
326
+ }
327
+
328
+ template <typename scalar_t, typename Op,
329
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
330
+ inline void map4(
331
+ const Op& vec_fun,
332
+ scalar_t* output_data,
333
+ const scalar_t* input_data1,
334
+ const scalar_t* input_data2,
335
+ const scalar_t* input_data3,
336
+ const scalar_t* input_data4,
337
+ int64_t size) {
338
+ using Vec = vec::Vectorized<scalar_t>;
339
+ int64_t d = 0;
340
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
341
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
342
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
343
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
344
+ Vec data_vec4 = Vec::loadu(input_data4 + d);
345
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
346
+ output_vec.store(output_data + d);
347
+ }
348
+ if (size - d > 0) {
349
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
350
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
351
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
352
+ Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
353
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
354
+ output_vec.store(output_data + d, size - d);
355
+ }
356
+ }
357
+
358
+ } // namespace at::vec
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+
8
+ namespace at::vec {
9
+
10
+ // BFloat16 specification
11
+ template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
12
+ template <> struct VecScalarType<BFloat16> { using type = float; };
13
+ template <> struct VecScalarType<Half> { using type = float; };
14
+
15
+ // This is different from at::acc_type since we only need to specialize BFloat16
16
+ template <typename scalar_t>
17
+ using vec_scalar_t = typename VecScalarType<scalar_t>::type;
18
+
19
+ // Vector conversion between float and bfloat16/half
20
+ template <typename scalar_t,
21
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
22
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
23
+
24
+ template <>
25
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
26
+ return convert_bfloat16_float(a);
27
+ }
28
+
29
+ template <>
30
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
31
+ return convert_half_float(a);
32
+ }
33
+
34
+ template <typename scalar_t,
35
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
36
+ inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
37
+
38
+ template <>
39
+ inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
40
+ return convert_float_bfloat16(a, b);
41
+ }
42
+
43
+ template <>
44
+ inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
45
+ return convert_float_half(a, b);
46
+ }
47
+
48
+ template <typename scalar_t,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
50
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out1, Vectorized<float> &out2);
51
+
52
+ template <>
53
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out1, Vectorized<float> &out2) {
54
+ load_fp32_from_bf16(data, out1, out2);
55
+ }
56
+
57
+ template <>
58
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out1, Vectorized<float> &out2) {
59
+ load_fp32_from_fp16(data, out1, out2);
60
+ }
61
+
62
+ template <typename scalar_t,
63
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
64
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out);
65
+
66
+ template <>
67
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out) {
68
+ load_fp32_from_bf16(data, out);
69
+ }
70
+
71
+ template <>
72
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out) {
73
+ load_fp32_from_fp16(data, out);
74
+ }
75
+
76
+ // Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
77
+ // so the following functions would run smoothly:
78
+ // using Vec = Vectorized<BFloat16>;
79
+ // Vec one = Vec(BFloat16(1));
80
+ // vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
81
+ //
82
+ // Then why we still need to specialize "functional"?
83
+ // If we do specialization at Vectorized<> level, the above example would need 3 pairs of
84
+ // conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
85
+ // If we do specialization at vec::map<>() level, we have only 1 pair of conversion
86
+ // of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
87
+ //
88
+ // The following BFloat16 functionality will only do data type conversion for input
89
+ // and output vector (reduce functionality will only convert the final scalar back to bf16).
90
+ // Compared to Vectorized<> specialization,
91
+ // 1. better performance since we have less data type conversion;
92
+ // 2. less rounding error since immediate results are kept in fp32;
93
+ // 3. accumulation done on data type of fp32.
94
+ //
95
+ // If you plan to extend this file, please ensure adding unit tests at
96
+ // aten/src/ATen/test/vec_test_all_types.cpp
97
+ //
98
+ template <typename scalar_t, typename Op,
99
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
100
+ inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
101
+ using bVec = vec::Vectorized<scalar_t>;
102
+ using fVec = vec::Vectorized<float>;
103
+ if (size < bVec::size()) {
104
+ bVec data_bvec = bVec::loadu(data, size);
105
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
106
+ if (size > fVec::size()) {
107
+ data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
108
+ return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
109
+ } else {
110
+ return vec_reduce_all<float>(vec_fun, data_fvec0, size);
111
+ }
112
+ }
113
+ int64_t d = bVec::size();
114
+ bVec acc_bvec = bVec::loadu(data);
115
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
116
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
117
+ bVec data_bvec = bVec::loadu(data + d);
118
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
119
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
120
+ acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
121
+ }
122
+ if (size - d > 0) {
123
+ bVec data_bvec = bVec::loadu(data + d, size - d);
124
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
125
+ if (size - d > fVec::size()) {
126
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
127
+ acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
128
+ } else {
129
+ acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
130
+ }
131
+ }
132
+ acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
133
+ return vec_reduce_all<float>(vec_fun, acc_fvec0);
134
+ }
135
+
136
+ template <typename scalar_t, typename Op1, typename Op2,
137
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
138
+ inline std::pair<float, float> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
139
+ const scalar_t* data, int64_t size) {
140
+ using bVec = vec::Vectorized<scalar_t>;
141
+ using fVec = vec::Vectorized<float>;
142
+ if (size < bVec::size()) {
143
+ bVec data_bvec = bVec::loadu(data, size);
144
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
145
+ if (size > fVec::size()) {
146
+ fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
147
+ fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
148
+ return std::pair<scalar_t, scalar_t>(
149
+ vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
150
+ vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
151
+ } else {
152
+ return std::pair<scalar_t, scalar_t>(
153
+ vec_reduce_all<float>(vec_fun1, data_fvec0, size),
154
+ vec_reduce_all<float>(vec_fun2, data_fvec0, size));
155
+ }
156
+ }
157
+ int64_t d = bVec::size();
158
+ bVec acc_bvec = bVec::loadu(data);
159
+ auto [acc1_fvec0, acc1_fvec1] = convert_to_float<scalar_t>(acc_bvec);
160
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc_bvec);
161
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
162
+ bVec data_bvec = bVec::loadu(data + d);
163
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
164
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
165
+ acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
166
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
167
+ acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
168
+ }
169
+ if (size - d > 0) {
170
+ bVec data_bvec = bVec::loadu(data + d, size - d);
171
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
172
+ if (size - d > fVec::size()) {
173
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
174
+ acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
175
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
176
+ acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
177
+ } else {
178
+ acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
179
+ acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
180
+ }
181
+ }
182
+ acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
183
+ acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
184
+ return std::pair<scalar_t, scalar_t>(
185
+ vec_reduce_all<float>(vec_fun1, acc1_fvec0),
186
+ vec_reduce_all<float>(vec_fun2, acc2_fvec0));
187
+ }
188
+
189
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
190
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
191
+ inline float map_reduce_all(
192
+ const MapOp& map_fun,
193
+ const ReduceOp& red_fun,
194
+ const scalar_t* data,
195
+ int64_t size) {
196
+ using bVec = vec::Vectorized<scalar_t>;
197
+ using fVec = vec::Vectorized<float>;
198
+ if (size < bVec::size()) {
199
+ bVec data_bvec = bVec::loadu(data, size);
200
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
201
+ if (size > fVec::size()) {
202
+ data_fvec0 = map_fun(data_fvec0);
203
+ data_fvec1 = map_fun(data_fvec1);
204
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
205
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
206
+ } else {
207
+ data_fvec0 = map_fun(data_fvec0);
208
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
209
+ }
210
+ }
211
+ int64_t d = bVec::size();
212
+ bVec acc_bvec = bVec::loadu(data);
213
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
214
+ acc_fvec0 = map_fun(acc_fvec0);
215
+ acc_fvec1 = map_fun(acc_fvec1);
216
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
217
+ bVec data_bvec = bVec::loadu(data + d);
218
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
219
+ data_fvec0 = map_fun(data_fvec0);
220
+ data_fvec1 = map_fun(data_fvec1);
221
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
222
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
223
+ }
224
+ if (size - d > 0) {
225
+ bVec data_bvec = bVec::loadu(data + d, size - d);
226
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
227
+ if (size - d > fVec::size()) {
228
+ data_fvec0 = map_fun(data_fvec0);
229
+ data_fvec1 = map_fun(data_fvec1);
230
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
231
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
232
+ } else {
233
+ data_fvec0 = map_fun(data_fvec0);
234
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
235
+ }
236
+ }
237
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
238
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
239
+ }
240
+
241
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
242
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
243
+ inline float map2_reduce_all(
244
+ const MapOp& map_fun,
245
+ const ReduceOp& red_fun,
246
+ const scalar_t* data,
247
+ const scalar_t* data2,
248
+ int64_t size) {
249
+ using bVec = vec::Vectorized<scalar_t>;
250
+ using fVec = vec::Vectorized<float>;
251
+ if (size < bVec::size()) {
252
+ bVec data_bvec = bVec::loadu(data, size);
253
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
254
+ bVec data2_bvec = bVec::loadu(data2, size);
255
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
256
+ if (size > fVec::size()) {
257
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
258
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
259
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
260
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
261
+ } else {
262
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
263
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
264
+ }
265
+ }
266
+ int64_t d = bVec::size();
267
+ bVec acc_bvec = bVec::loadu(data);
268
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
269
+ bVec acc2_bvec = bVec::loadu(data2);
270
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
271
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
272
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
273
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
274
+ bVec data_bvec = bVec::loadu(data + d);
275
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
276
+ bVec data2_bvec = bVec::loadu(data2 + d);
277
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
278
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
279
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
280
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
281
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
282
+ }
283
+ if (size - d > 0) {
284
+ bVec data_bvec = bVec::loadu(data + d, size - d);
285
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
286
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
287
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
288
+ if (size - d > fVec::size()) {
289
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
290
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
291
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
292
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
293
+ } else {
294
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
295
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
296
+ }
297
+ }
298
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
299
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
300
+ }
301
+
302
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
303
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
304
+ inline float map3_reduce_all(
305
+ const MapOp& map_fun,
306
+ const ReduceOp& red_fun,
307
+ const scalar_t* data,
308
+ const scalar_t* data2,
309
+ const scalar_t* data3,
310
+ int64_t size) {
311
+ using bVec = vec::Vectorized<scalar_t>;
312
+ using fVec = vec::Vectorized<float>;
313
+ if (size < bVec::size()) {
314
+ bVec data_bvec = bVec::loadu(data, size);
315
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
316
+ bVec data2_bvec = bVec::loadu(data2, size);
317
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
318
+ bVec data3_bvec = bVec::loadu(data3, size);
319
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
320
+ if (size > fVec::size()) {
321
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
322
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
323
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
324
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
325
+ } else {
326
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
327
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
328
+ }
329
+ }
330
+ int64_t d = bVec::size();
331
+ bVec acc_bvec = bVec::loadu(data);
332
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
333
+ bVec acc2_bvec = bVec::loadu(data2);
334
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
335
+ bVec acc3_bvec = bVec::loadu(data3);
336
+ auto [acc3_fvec0, acc3_fvec1] = convert_to_float<scalar_t>(acc3_bvec);
337
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
338
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
339
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
340
+ bVec data_bvec = bVec::loadu(data + d);
341
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
342
+ bVec data2_bvec = bVec::loadu(data2 + d);
343
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
344
+ bVec data3_bvec = bVec::loadu(data3 + d);
345
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
346
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
347
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
348
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
349
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
350
+ }
351
+ if (size - d > 0) {
352
+ bVec data_bvec = bVec::loadu(data + d, size - d);
353
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
354
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
355
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
356
+ bVec data3_bvec = bVec::loadu(data3 + d, size - d);
357
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
358
+ if (size - d > fVec::size()) {
359
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
360
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
361
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
362
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
363
+ } else {
364
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
365
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
366
+ }
367
+ }
368
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
369
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
370
+ }
371
+
372
+ template <typename scalar_t, typename Op,
373
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
374
+ inline void map(
375
+ const Op& vec_fun,
376
+ scalar_t* output_data,
377
+ const scalar_t* input_data,
378
+ int64_t size) {
379
+ using bVec = vec::Vectorized<scalar_t>;
380
+ using fVec = vec::Vectorized<float>;
381
+ int64_t d = 0;
382
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
383
+ bVec data_bvec = bVec::loadu(input_data + d);
384
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
385
+ fVec output_fvec0 = vec_fun(data_fvec0);
386
+ fVec output_fvec1 = vec_fun(data_fvec1);
387
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
388
+ output_bvec.store(output_data + d);
389
+ }
390
+ if (size - d > 0) {
391
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
392
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
393
+ fVec output_fvec0 = vec_fun(data_fvec0);
394
+ fVec output_fvec1 = vec_fun(data_fvec1);
395
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
396
+ output_bvec.store(output_data + d, size - d);
397
+ }
398
+ }
399
+
400
+ template <typename scalar_t, typename Op,
401
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
402
+ inline void map(
403
+ const Op& vec_fun,
404
+ scalar_t* output_data,
405
+ const float* input_data,
406
+ int64_t size) {
407
+ using bVec = vec::Vectorized<scalar_t>;
408
+ using fVec = vec::Vectorized<float>;
409
+ int64_t d = 0;
410
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
411
+ fVec data_fvec0 = fVec::loadu(input_data + d);
412
+ fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
413
+ fVec output_fvec0 = vec_fun(data_fvec0);
414
+ fVec output_fvec1 = vec_fun(data_fvec1);
415
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
416
+ output_bvec.store(output_data + d);
417
+ }
418
+ if (size - d > 0) {
419
+ fVec data_fvec0, data_fvec1;
420
+ if (size - d > fVec::size()) {
421
+ data_fvec0 = fVec::loadu(input_data + d);
422
+ data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
423
+ } else {
424
+ // choose to align with behaviour of bVec::loadu(ptr, size),
425
+ // which leaves data_fvec1 uninitialized
426
+ data_fvec0 = fVec::loadu(input_data + d, size - d);
427
+ }
428
+ fVec output_fvec0 = vec_fun(data_fvec0);
429
+ fVec output_fvec1 = vec_fun(data_fvec1);
430
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
431
+ output_bvec.store(output_data + d, size - d);
432
+ }
433
+ }
434
+
435
+ template <typename scalar_t, typename Op,
436
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
437
+ inline void map2(
438
+ const Op& vec_fun,
439
+ scalar_t* output_data,
440
+ const scalar_t* input_data,
441
+ const scalar_t* input_data2,
442
+ int64_t size) {
443
+ using bVec = vec::Vectorized<scalar_t>;
444
+ using fVec = vec::Vectorized<float>;
445
+ int64_t d = 0;
446
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
447
+ bVec data_bvec = bVec::loadu(input_data + d);
448
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
449
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
450
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
451
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
452
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
453
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
454
+ output_bvec.store(output_data + d);
455
+ }
456
+ if (size - d > 0) {
457
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
458
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
459
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
460
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
461
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
462
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
463
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
464
+ output_bvec.store(output_data + d, size - d);
465
+ }
466
+ }
467
+
468
+ template <typename scalar_t, typename Op,
469
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
470
+ inline void map3(
471
+ const Op& vec_fun,
472
+ scalar_t* output_data,
473
+ const scalar_t* input_data1,
474
+ const scalar_t* input_data2,
475
+ const scalar_t* input_data3,
476
+ int64_t size) {
477
+ using bVec = vec::Vectorized<scalar_t>;
478
+ using fVec = vec::Vectorized<float>;
479
+ int64_t d = 0;
480
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
481
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
482
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
483
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
484
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
485
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
486
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
487
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
488
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
489
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
490
+ output_bvec.store(output_data + d);
491
+ }
492
+ if (size - d > 0) {
493
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
494
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
495
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
496
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
497
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
498
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
499
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
500
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
501
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
502
+ output_bvec.store(output_data + d, size - d);
503
+ }
504
+ }
505
+
506
+ template <typename scalar_t, typename Op,
507
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
508
+ inline void map4(
509
+ const Op& vec_fun,
510
+ scalar_t* output_data,
511
+ const scalar_t* input_data1,
512
+ const scalar_t* input_data2,
513
+ const scalar_t* input_data3,
514
+ const scalar_t* input_data4,
515
+ int64_t size) {
516
+ using bVec = vec::Vectorized<scalar_t>;
517
+ using fVec = vec::Vectorized<float>;
518
+ int64_t d = 0;
519
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
520
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
521
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
522
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
523
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
524
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
525
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
526
+ bVec data4_bvec = bVec::loadu(input_data4 + d);
527
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
528
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
529
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
530
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
531
+ output_bvec.store(output_data + d);
532
+ }
533
+ if (size - d > 0) {
534
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
535
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
536
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
537
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
538
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
539
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
540
+ bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
541
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
542
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
543
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
544
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
545
+ output_bvec.store(output_data + d, size - d);
546
+ }
547
+ }
548
+
549
+ } // namespace at::vec
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
3
+ /* GCC or clang-compatible compiler, targeting x86/x86-64 */
4
+ #include <x86intrin.h>
5
+ #elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
6
+ /* Clang-compatible compiler, targeting arm neon */
7
+ #include <arm_neon.h>
8
+ #elif defined(_MSC_VER)
9
+ /* Microsoft C/C++-compatible compiler */
10
+ #include <intrin.h>
11
+ #if _MSC_VER <= 1900
12
+ #define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2))
13
+ #define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4))
14
+ #define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8))
15
+ #define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16))
16
+ #endif
17
+ #elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
18
+ /* GCC-compatible compiler, targeting ARM with NEON */
19
+ #include <arm_neon.h>
20
+ #if defined (MISSING_ARM_VLD1)
21
+ #include <ATen/cpu/vec/vec256/missing_vld1_neon.h>
22
+ #elif defined (MISSING_ARM_VST1)
23
+ #include <ATen/cpu/vec/vec256/missing_vst1_neon.h>
24
+ #endif
25
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
26
+ /* GCC-compatible compiler, targeting ARM with WMMX */
27
+ #include <mmintrin.h>
28
+ #elif defined(__s390x__)
29
+ // targets Z/architecture
30
+ // we will include vecintrin later
31
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
32
+ (defined(__VEC__) || defined(__ALTIVEC__))
33
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
34
+ #include <altivec.h>
35
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
36
+ with the C++ types. => Can still use __bool/__vector */
37
+ #undef bool
38
+ #undef vector
39
+ #undef pixel
40
+ #elif defined(__GNUC__) && defined(__SPE__)
41
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
42
+ #include <spe.h>
43
+ #endif
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(CPU_CAPABILITY_AVX512)
4
+ #include <ATen/cpu/vec/vec512/vec512.h>
5
+ #else
6
+ #include <ATen/cpu/vec/vec256/vec256.h>
7
+ #endif
8
+
9
+ namespace at::vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
14
+ __at_align__ bool buffer[x.size()];
15
+ x.ne(Vectorized<int8_t>(0)).store(buffer);
16
+
17
+ Vectorized<bool> ret;
18
+ static_assert(x.size() == ret.size());
19
+ std::memcpy(ret, buffer, ret.size() * sizeof(bool));
20
+ return ret;
21
+ }
22
+
23
+ template <>
24
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
25
+ // See NOTE [Loading boolean values]
26
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
27
+ }
28
+
29
+ template <>
30
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
31
+ // See NOTE [Loading boolean values]
32
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
33
+ }
34
+
35
+ template <typename VT>
36
+ struct VecHoldType { using hold_type = typename VT::value_type; };
37
+
38
+ template <>
39
+ struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
40
+
41
+ template <>
42
+ struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
43
+
44
+ template <typename VT>
45
+ using vechold_type = typename VecHoldType<VT>::hold_type;
46
+
47
+ }} // namespace at::vec::CPU_CAPABILITY
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */
2
+
3
+ __extension__ extern __inline uint8x8x2_t
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vld1_u8_x2 (const uint8_t *__a)
6
+ {
7
+ uint8x8x2_t ret;
8
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
9
+ return ret;
10
+ }
11
+
12
+ __extension__ extern __inline int8x8x2_t
13
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14
+ vld1_s8_x2 (const int8_t *__a)
15
+ {
16
+ int8x8x2_t ret;
17
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
18
+ return ret;
19
+ }
20
+
21
+ __extension__ extern __inline uint16x4x2_t
22
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23
+ vld1_u16_x2 (const uint16_t *__a)
24
+ {
25
+ uint16x4x2_t ret;
26
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
27
+ return ret;
28
+ }
29
+
30
+ __extension__ extern __inline int16x4x2_t
31
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32
+ vld1_s16_x2 (const int16_t *__a)
33
+ {
34
+ int16x4x2_t ret;
35
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
36
+ return ret;
37
+ }
38
+
39
+ __extension__ extern __inline uint32x2x2_t
40
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
41
+ vld1_u32_x2 (const uint32_t *__a)
42
+ {
43
+ uint32x2x2_t ret;
44
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
45
+ return ret;
46
+ }
47
+
48
+ __extension__ extern __inline int32x2x2_t
49
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
50
+ vld1_s32_x2 (const int32_t *__a)
51
+ {
52
+ int32x2x2_t ret;
53
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
54
+ return ret;
55
+ }
56
+
57
+ __extension__ extern __inline uint64x1x2_t
58
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
59
+ vld1_u64_x2 (const uint64_t *__a)
60
+ {
61
+ uint64x1x2_t ret;
62
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
63
+ return ret;
64
+ }
65
+
66
+ __extension__ extern __inline int64x1x2_t
67
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
68
+ vld1_s64_x2 (const int64_t *__a)
69
+ {
70
+ int64x1x2_t ret;
71
+ __builtin_aarch64_simd_oi __o;
72
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
73
+ return ret;
74
+ }
75
+
76
+ __extension__ extern __inline float16x4x2_t
77
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
78
+ vld1_f16_x2 (const float16_t *__a)
79
+ {
80
+ float16x4x2_t ret;
81
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
82
+ return ret;
83
+ }
84
+
85
+ __extension__ extern __inline float32x2x2_t
86
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
87
+ vld1_f32_x2 (const float32_t *__a)
88
+ {
89
+ float32x2x2_t ret;
90
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
91
+ return ret;
92
+ }
93
+
94
+ __extension__ extern __inline float64x1x2_t
95
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
96
+ vld1_f64_x2 (const float64_t *__a)
97
+ {
98
+ float64x1x2_t ret;
99
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
100
+ return ret;
101
+ }
102
+
103
+ __extension__ extern __inline poly8x8x2_t
104
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
105
+ vld1_p8_x2 (const poly8_t *__a)
106
+ {
107
+ poly8x8x2_t ret;
108
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
109
+ return ret;
110
+ }
111
+
112
+ __extension__ extern __inline poly16x4x2_t
113
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
114
+ vld1_p16_x2 (const poly16_t *__a)
115
+ {
116
+ poly16x4x2_t ret;
117
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
118
+ return ret;
119
+ }
120
+
121
+ __extension__ extern __inline poly64x1x2_t
122
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
123
+ vld1_p64_x2 (const poly64_t *__a)
124
+ {
125
+ poly64x1x2_t ret;
126
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
127
+ return ret;
128
+ }
129
+
130
+ __extension__ extern __inline uint8x16x2_t
131
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
132
+ vld1q_u8_x2 (const uint8_t *__a)
133
+ {
134
+ uint8x16x2_t ret;
135
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
136
+ return ret;
137
+ }
138
+
139
+ __extension__ extern __inline int8x16x2_t
140
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
141
+ vld1q_s8_x2 (const int8_t *__a)
142
+ {
143
+ int8x16x2_t ret;
144
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
145
+ return ret;
146
+ }
147
+
148
+ __extension__ extern __inline uint16x8x2_t
149
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
150
+ vld1q_u16_x2 (const uint16_t *__a)
151
+ {
152
+ uint16x8x2_t ret;
153
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
154
+ return ret;
155
+ }
156
+
157
+ __extension__ extern __inline int16x8x2_t
158
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
159
+ vld1q_s16_x2 (const int16_t *__a)
160
+ {
161
+ int16x8x2_t ret;
162
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
163
+ return ret;
164
+ }
165
+
166
+ __extension__ extern __inline uint32x4x2_t
167
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
168
+ vld1q_u32_x2 (const uint32_t *__a)
169
+ {
170
+ uint32x4x2_t ret;
171
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
172
+ return ret;
173
+ }
174
+
175
+ __extension__ extern __inline int32x4x2_t
176
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
177
+ vld1q_s32_x2 (const int32_t *__a)
178
+ {
179
+ int32x4x2_t ret;
180
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
181
+ return ret;
182
+ }
183
+
184
+ __extension__ extern __inline uint64x2x2_t
185
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
186
+ vld1q_u64_x2 (const uint64_t *__a)
187
+ {
188
+ uint64x2x2_t ret;
189
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
190
+ return ret;
191
+ }
192
+
193
+ __extension__ extern __inline int64x2x2_t
194
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
195
+ vld1q_s64_x2 (const int64_t *__a)
196
+ {
197
+ int64x2x2_t ret;
198
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
199
+ return ret;
200
+ }
201
+
202
+ __extension__ extern __inline float16x8x2_t
203
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
204
+ vld1q_f16_x2 (const float16_t *__a)
205
+ {
206
+ float16x8x2_t ret;
207
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
208
+ return ret;
209
+ }
210
+
211
+ __extension__ extern __inline float32x4x2_t
212
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
213
+ vld1q_f32_x2 (const float32_t *__a)
214
+ {
215
+ float32x4x2_t ret;
216
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
217
+ return ret;
218
+ }
219
+
220
+ __extension__ extern __inline float64x2x2_t
221
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
222
+ vld1q_f64_x2 (const float64_t *__a)
223
+ {
224
+ float64x2x2_t ret;
225
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
226
+ return ret;
227
+ }
228
+
229
+ __extension__ extern __inline poly8x16x2_t
230
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
231
+ vld1q_p8_x2 (const poly8_t *__a)
232
+ {
233
+ poly8x16x2_t ret;
234
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
235
+ return ret;
236
+ }
237
+
238
+ __extension__ extern __inline poly16x8x2_t
239
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
240
+ vld1q_p16_x2 (const poly16_t *__a)
241
+ {
242
+ poly16x8x2_t ret;
243
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
244
+ return ret;
245
+ }
246
+
247
+ __extension__ extern __inline poly64x2x2_t
248
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
249
+ vld1q_p64_x2 (const poly64_t *__a)
250
+ {
251
+ poly64x2x2_t ret;
252
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
253
+ return ret;
254
+ }
255
+
256
+ /* vst1x2 */
257
+
258
+ __extension__ extern __inline void
259
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
260
+ vst1_s64_x2 (int64_t * __a, int64x1x2_t val)
261
+ {
262
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
263
+ }
264
+
265
+ __extension__ extern __inline void
266
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
267
+ vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val)
268
+ {
269
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
270
+ }
271
+
272
+ __extension__ extern __inline void
273
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
274
+ vst1_f64_x2 (float64_t * __a, float64x1x2_t val)
275
+ {
276
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
277
+ }
278
+
279
+ __extension__ extern __inline void
280
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
281
+ vst1_s8_x2 (int8_t * __a, int8x8x2_t val)
282
+ {
283
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
284
+ }
285
+
286
+ __extension__ extern __inline void
287
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
288
+ vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val)
289
+ {
290
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
291
+ }
292
+
293
+ __extension__ extern __inline void
294
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
295
+ vst1_s16_x2 (int16_t * __a, int16x4x2_t val)
296
+ {
297
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
298
+ }
299
+
300
+ __extension__ extern __inline void
301
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
302
+ vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val)
303
+ {
304
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
305
+ }
306
+
307
+ __extension__ extern __inline void
308
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
309
+ vst1_s32_x2 (int32_t * __a, int32x2x2_t val)
310
+ {
311
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
312
+ }
313
+
314
+ __extension__ extern __inline void
315
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
316
+ vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val)
317
+ {
318
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
319
+ }
320
+
321
+ __extension__ extern __inline void
322
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
323
+ vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val)
324
+ {
325
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
326
+ }
327
+
328
+ __extension__ extern __inline void
329
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
330
+ vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val)
331
+ {
332
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
333
+ }
334
+
335
+ __extension__ extern __inline void
336
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
337
+ vst1_f16_x2 (float16_t * __a, float16x4x2_t val)
338
+ {
339
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
340
+ }
341
+
342
+ __extension__ extern __inline void
343
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
344
+ vst1_f32_x2 (float32_t * __a, float32x2x2_t val)
345
+ {
346
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
347
+ }
348
+
349
+ __extension__ extern __inline void
350
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
351
+ vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val)
352
+ {
353
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
354
+ }
355
+
356
+ __extension__ extern __inline void
357
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
358
+ vst1q_s8_x2 (int8_t * __a, int8x16x2_t val)
359
+ {
360
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
361
+ }
362
+
363
+ __extension__ extern __inline void
364
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
365
+ vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val)
366
+ {
367
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
368
+ }
369
+
370
+ __extension__ extern __inline void
371
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
372
+ vst1q_s16_x2 (int16_t * __a, int16x8x2_t val)
373
+ {
374
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
375
+ }
376
+
377
+ __extension__ extern __inline void
378
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
379
+ vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val)
380
+ {
381
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
382
+ }
383
+
384
+ __extension__ extern __inline void
385
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
386
+ vst1q_s32_x2 (int32_t * __a, int32x4x2_t val)
387
+ {
388
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
389
+ }
390
+
391
+ __extension__ extern __inline void
392
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
393
+ vst1q_s64_x2 (int64_t * __a, int64x2x2_t val)
394
+ {
395
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
396
+ }
397
+
398
+ __extension__ extern __inline void
399
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
400
+ vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val)
401
+ {
402
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
403
+ }
404
+
405
+ __extension__ extern __inline void
406
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
407
+ vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val)
408
+ {
409
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
410
+ }
411
+
412
+ __extension__ extern __inline void
413
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
414
+ vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val)
415
+ {
416
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
417
+ }
418
+
419
+ __extension__ extern __inline void
420
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
421
+ vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val)
422
+ {
423
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
424
+ }
425
+
426
+ __extension__ extern __inline void
427
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
428
+ vst1q_f16_x2 (float16_t * __a, float16x8x2_t val)
429
+ {
430
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
431
+ }
432
+
433
+ __extension__ extern __inline void
434
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
435
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
436
+ {
437
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
438
+ }
439
+
440
+ __extension__ extern __inline void
441
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
442
+ vst1q_f64_x2 (float64_t * __a, float64x2x2_t val)
443
+ {
444
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
445
+ }
446
+
447
+ __extension__ extern __inline void
448
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
449
+ vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val)
450
+ {
451
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
452
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vst1q_f32_x2 in gcc-8. */
2
+
3
+ __extension__ extern __inline void
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
6
+ {
7
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
8
+ }
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR))
10
+ #include <ATen/cpu/vec/vec256/vec256_float.h>
11
+ #include <ATen/cpu/vec/vec256/vec256_float_neon.h>
12
+ #include <ATen/cpu/vec/vec256/vec256_half_neon.h>
13
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
14
+ #include <ATen/cpu/vec/vec256/vec256_double.h>
15
+ #include <ATen/cpu/vec/vec256/vec256_int.h>
16
+ #include <ATen/cpu/vec/vec256/vec256_qint.h>
17
+ #include <ATen/cpu/vec/vec256/vec256_complex_float.h>
18
+ #include <ATen/cpu/vec/vec256/vec256_complex_double.h>
19
+ #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX)
20
+ #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h>
21
+ #else
22
+ #include <ATen/cpu/vec/vec256/zarch/vec256_zarch.h>
23
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
24
+ #endif
25
+
26
+ #include <ATen/cpu/vec/vec256/vec256_convert.h>
27
+ #include <ATen/cpu/vec/vec256/vec256_mask.h>
28
+
29
+ #include <algorithm>
30
+ #include <cstddef>
31
+ #include <cstdint>
32
+ #include <cstring>
33
+ #include <ostream>
34
+
35
+ namespace at::vec {
36
+
37
+ // Note [CPU_CAPABILITY namespace]
38
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
39
+ // This header, and all of its subheaders, will be compiled with
40
+ // different architecture flags for each supported set of vector
41
+ // intrinsics. So we need to make sure they aren't inadvertently
42
+ // linked together. We do this by declaring objects in an `inline
43
+ // namespace` which changes the name mangling, but can still be
44
+ // accessed as `at::vec`.
45
+ inline namespace CPU_CAPABILITY {
46
+
47
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
48
+ stream << val.val_;
49
+ return stream;
50
+ }
51
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
52
+ stream << static_cast<int>(val.val_);
53
+ return stream;
54
+ }
55
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
56
+ stream << static_cast<unsigned int>(val.val_);
57
+ return stream;
58
+ }
59
+
60
+ template <typename T>
61
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
62
+ T buf[Vectorized<T>::size()];
63
+ vec.store(buf);
64
+ stream << "vec[";
65
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
66
+ if (i != 0) {
67
+ stream << ", ";
68
+ }
69
+ stream << buf[i];
70
+ }
71
+ stream << "]";
72
+ return stream;
73
+ }
74
+
75
+
76
+ #if defined(CPU_CAPABILITY_AVX2)
77
+
78
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
79
+
80
+ template<>
81
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
82
+ return _mm256_castpd_ps(src);
83
+ }
84
+
85
+ template<>
86
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
87
+ return _mm256_castps_pd(src);
88
+ }
89
+
90
+ template<>
91
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
92
+ return _mm256_castsi256_ps(src);
93
+ }
94
+
95
+ template<>
96
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
97
+ return _mm256_castsi256_pd(src);
98
+ }
99
+
100
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
101
+ #ifndef _MSC_VER
102
+ // MSVC is not working well on complex function overload.
103
+ template<int64_t scale = 1>
104
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
105
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
106
+ return _mm256_i64gather_pd(base_addr, vindex, scale);
107
+ }
108
+
109
+ template<int64_t scale = 1>
110
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
111
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
112
+ return _mm256_i32gather_ps(base_addr, vindex, scale);
113
+ }
114
+ #endif
115
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
116
+ #ifndef _MSC_VER
117
+ // MSVC is not working well on complex function overload.
118
+ template<int64_t scale = 1>
119
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
120
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
121
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
122
+ return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale);
123
+ }
124
+
125
+ template<int64_t scale = 1>
126
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
127
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
128
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
129
+ return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale);
130
+ }
131
+ #endif
132
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
133
+
134
+ // Only works for inputs in the range: [-2^51, 2^51]
135
+ // From: https://stackoverflow.com/a/41148578
136
+ template<>
137
+ Vectorized<int64_t>
138
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
139
+ auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000));
140
+ return _mm256_sub_epi64(
141
+ _mm256_castpd_si256(x),
142
+ _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000))
143
+ );
144
+ }
145
+
146
+ template<>
147
+ Vectorized<int32_t>
148
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
149
+ return _mm256_cvttps_epi32(src);
150
+ }
151
+
152
+ // From: https://stackoverflow.com/a/41148578
153
+ template<>
154
+ Vectorized<double>
155
+ inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
156
+ __m256i magic_i_lo = _mm256_set1_epi64x(0x4330000000000000); /* 2^52 */
157
+ __m256i magic_i_hi32 = _mm256_set1_epi64x(0x4530000080000000); /* 2^84 + 2^63 */
158
+ __m256i magic_i_all = _mm256_set1_epi64x(0x4530000080100000); /* 2^84 + 2^63 + 2^52 */
159
+ __m256d magic_d_all = _mm256_castsi256_pd(magic_i_all);
160
+
161
+ __m256i v_lo = _mm256_blend_epi32(magic_i_lo, src, 0b01010101); /* v_low = low32 + 2^52 */
162
+ __m256i v_hi = _mm256_srli_epi64(src, 32);
163
+ v_hi = _mm256_xor_si256(v_hi, magic_i_hi32); /* v_hi = high32*2^32 + 2^84 + 2^63 */
164
+ /* int64 = low32 + high32*2^32 = v_hi + v_lo - 2^52 - 2^63 - 2^84 */
165
+ __m256d v_hi_dbl = _mm256_sub_pd(_mm256_castsi256_pd(v_hi), magic_d_all);
166
+ __m256d result = _mm256_add_pd(v_hi_dbl, _mm256_castsi256_pd(v_lo));
167
+ return result;
168
+ }
169
+
170
+ template<>
171
+ Vectorized<float>
172
+ inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
173
+ return _mm256_cvtepi32_ps(src);
174
+ }
175
+
176
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
177
+
178
+ template <>
179
+ std::pair<Vectorized<double>, Vectorized<double>>
180
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
181
+ // inputs:
182
+ // a = {a0, a1, a3, a3}
183
+ // b = {b0, b1, b2, b3}
184
+
185
+ // swap lanes:
186
+ // a_swapped = {a0, a1, b0, b1}
187
+ // b_swapped = {a2, a3, b2, b3}
188
+ auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart
189
+ auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart
190
+
191
+ // group cols crossing lanes:
192
+ // return {a0, b0, a1, b1}
193
+ // {a2, b2, a3, b3}
194
+ return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3
195
+ _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3
196
+ }
197
+
198
+ template <>
199
+ std::pair<Vectorized<float>, Vectorized<float>>
200
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
201
+ // inputs:
202
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7}
203
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
204
+
205
+ // swap lanes:
206
+ // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3}
207
+ // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7}
208
+ // TODO: can we support caching this?
209
+ auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart
210
+ auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart
211
+
212
+ // group cols crossing lanes:
213
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
214
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
215
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
216
+ return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl),
217
+ _mm256_permutevar8x32_ps(b_swapped, group_ctrl));
218
+ }
219
+
220
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
221
+
222
+ template <>
223
+ std::pair<Vectorized<double>, Vectorized<double>>
224
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
225
+ // inputs:
226
+ // a = {a0, b0, a1, b1}
227
+ // b = {a2, b2, a3, b3}
228
+
229
+ // group cols crossing lanes:
230
+ // a_grouped = {a0, a1, b0, b1}
231
+ // b_grouped = {a2, a3, b2, b3}
232
+ auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3
233
+ auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3
234
+
235
+ // swap lanes:
236
+ // return {a0, a1, a2, a3}
237
+ // {b0, b1, b2, b3}
238
+ return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
239
+ _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
240
+ }
241
+
242
+ template <>
243
+ std::pair<Vectorized<float>, Vectorized<float>>
244
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
245
+ // inputs:
246
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
247
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
248
+
249
+ // group cols crossing lanes:
250
+ // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3}
251
+ // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7}
252
+ // TODO: can we support caching this?
253
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
254
+ auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl);
255
+ auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl);
256
+
257
+ // swap lanes:
258
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
259
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
260
+ return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
261
+ _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
262
+ }
263
+
264
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
265
+
266
+ template<>
267
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
268
+ const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
269
+ return _mm256_permutevar8x32_ps(v, mask_float);
270
+ }
271
+
272
+ template<>
273
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
274
+ return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
275
+ }
276
+
277
+ template<>
278
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
279
+ return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
280
+ }
281
+
282
+ template<>
283
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
284
+ const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
285
+ return _mm256_permutevar8x32_epi32(v, mask_int32);
286
+ }
287
+
288
+ template<>
289
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
290
+ const __m256i mask = _mm256_set_epi8(
291
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
292
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
293
+ );
294
+ auto reversed = _mm256_shuffle_epi8(v, mask);
295
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
296
+ }
297
+
298
+ inline __m256i flip8(const __m256i & v) {
299
+ const __m256i mask_int8 = _mm256_set_epi8(
300
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
301
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
302
+ );
303
+ auto reversed = _mm256_shuffle_epi8(v, mask_int8);
304
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
305
+ }
306
+
307
+ template<>
308
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
309
+ return flip8(v);
310
+ }
311
+
312
+ template<>
313
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
314
+ return flip8(v);
315
+ }
316
+
317
+ inline Vectorized<bool> operator&&(
318
+ const Vectorized<bool>& self,
319
+ const Vectorized<bool>& other) {
320
+ const __m256i* self_ = reinterpret_cast<const __m256i*>(self.as_bytes());
321
+ const __m256i* other_ = reinterpret_cast<const __m256i*>(other.as_bytes());
322
+ __m256i out = _mm256_and_si256(*self_, *other_);
323
+ Vectorized<bool> ret;
324
+ std::memcpy(ret, &out, ret.size() * sizeof(bool));
325
+ return ret;
326
+ }
327
+
328
+ #endif // (defined(CPU_CAPABILITY_AVX2)
329
+
330
+ }} // namepsace at::vec::CPU_CAPABILITY
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(CPU_CAPABILITY_AVX2)
11
+ #define SLEEF_STATIC_LIBS
12
+ #include <sleef.h>
13
+ #endif
14
+
15
+ #pragma GCC diagnostic push
16
+ #pragma GCC diagnostic ignored "-Wignored-qualifiers"
17
+
18
+ namespace at::vec {
19
+ // See Note [CPU_CAPABILITY namespace]
20
+ inline namespace CPU_CAPABILITY {
21
+
22
+ #if defined(CPU_CAPABILITY_AVX2)
23
+
24
+ #ifndef SLEEF_CONST
25
+ #if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
26
+ #define SLEEF_CONST const
27
+ #else
28
+ #define SLEEF_CONST
29
+ #endif
30
+ #define SLEEF_CONST_OLD SLEEF_CONST
31
+ #else
32
+ #define SLEEF_CONST_OLD
33
+ #endif
34
+
35
+ // bfloat16 conversion
36
+ static inline void cvtbf16_fp32(const __m128i& a, __m256& o) {
37
+ o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16));
38
+ }
39
+
40
+ static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
41
+ __m128i lo = _mm256_extractf128_si256(a, 0);
42
+ __m128i hi = _mm256_extractf128_si256(a, 1);
43
+ cvtbf16_fp32(lo, o1);
44
+ cvtbf16_fp32(hi, o2);
45
+ }
46
+
47
+ static inline __m128i cvtfp32_bf16(const __m256& src) {
48
+ __m256i value = _mm256_castps_si256(src);
49
+ __m256i nan = _mm256_set1_epi32(0xffff);
50
+ __m256i mask = _mm256_castps_si256(_mm256_cmp_ps(src, src, _CMP_ORD_Q));
51
+ __m256i ones = _mm256_set1_epi32(0x1);
52
+ __m256i vec_bias = _mm256_set1_epi32(0x7fff);
53
+ // uint32_t lsb = (input >> 16) & 1;
54
+ auto t_value = _mm256_and_si256(_mm256_srli_epi32(value, 16), ones);
55
+ // uint32_t rounding_bias = 0x7fff + lsb;
56
+ t_value = _mm256_add_epi32(t_value, vec_bias);
57
+ // input += rounding_bias;
58
+ t_value = _mm256_add_epi32(t_value, value);
59
+ // input = input >> 16;
60
+ t_value = _mm256_srli_epi32(t_value, 16);
61
+ // Check NaN before converting back to bf16
62
+ t_value = _mm256_blendv_epi8(nan, t_value, mask);
63
+ t_value = _mm256_packus_epi32(t_value, t_value); // t[4-7] t[4-7] t[0-4] t[0-4]
64
+ t_value = _mm256_permute4x64_epi64(t_value, 0xd8); // 11 01 10 00
65
+ return _mm256_castsi256_si128(t_value);
66
+ }
67
+
68
+ static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) {
69
+ __m256i lo = _mm256_castps_si256(a);
70
+ __m256i hi = _mm256_castps_si256(b);
71
+ __m256i nan = _mm256_set1_epi32(0xffff);
72
+ __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q));
73
+ __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q));
74
+ __m256i ones = _mm256_set1_epi32(0x1);
75
+ __m256i vec_bias = _mm256_set1_epi32(0x7fff);
76
+ // uint32_t lsb = (input >> 16) & 1;
77
+ auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones);
78
+ auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones);
79
+ // uint32_t rounding_bias = 0x7fff + lsb;
80
+ t_lo = _mm256_add_epi32(t_lo, vec_bias);
81
+ t_hi = _mm256_add_epi32(t_hi, vec_bias);
82
+ // input += rounding_bias;
83
+ t_lo = _mm256_add_epi32(t_lo, lo);
84
+ t_hi = _mm256_add_epi32(t_hi, hi);
85
+ // input = input >> 16;
86
+ t_lo = _mm256_srli_epi32(t_lo, 16);
87
+ t_hi = _mm256_srli_epi32(t_hi, 16);
88
+ // Check NaN before converting back to bf16
89
+ t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo);
90
+ t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi);
91
+
92
+ t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
93
+ return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00
94
+ }
95
+
96
+ static inline __m256i merge_compare_result(const __m256& a, const __m256& b) {
97
+ __m256i lo = _mm256_castps_si256(a);
98
+ __m256i hi = _mm256_castps_si256(b);
99
+ lo = _mm256_srli_epi32(lo, 16);
100
+ hi = _mm256_srli_epi32(hi, 16);
101
+ auto out = _mm256_packus_epi32(lo, hi);
102
+ return _mm256_permute4x64_epi64(out, 0xd8);
103
+ }
104
+
105
+ // float16 conversion
106
+ static inline void cvtfp16_fp32(const __m128i& a, __m256& o) {
107
+ o = _mm256_cvtph_ps(a);
108
+ }
109
+
110
+ static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
111
+ __m128i lo = _mm256_extractf128_si256(a, 0);
112
+ __m128i hi = _mm256_extractf128_si256(a, 1);
113
+ cvtfp16_fp32(lo, o1);
114
+ cvtfp16_fp32(hi, o2);
115
+ }
116
+
117
+ static inline __m128i cvtfp32_fp16(const __m256& src) {
118
+ return _mm256_cvtps_ph(
119
+ src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
120
+ }
121
+
122
+ static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) {
123
+ __m128i lo = _mm256_cvtps_ph(
124
+ a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
125
+ __m128i hi = _mm256_cvtps_ph(
126
+ b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
127
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
128
+ }
129
+
130
+ // dtype conversion between float16/bfloat16 and float32
131
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
132
+ inline void cvt_to_fp32(const __m128i& a, __m256& o);
133
+ template <> inline void cvt_to_fp32<BFloat16>(const __m128i& a, __m256& o) {
134
+ cvtbf16_fp32(a, o);
135
+ };
136
+ template <> inline void cvt_to_fp32<Half>(const __m128i& a, __m256& o) {
137
+ cvtfp16_fp32(a, o);
138
+ }
139
+
140
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
141
+ inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2);
142
+ template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m256& o1, __m256& o2) {
143
+ cvtbf16_fp32(a, o1, o2);
144
+ }
145
+ template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m256& o1, __m256& o2) {
146
+ cvtfp16_fp32(a, o1, o2);
147
+ }
148
+
149
+ template <typename T, bool is_compare_op = false,
150
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
151
+ inline __m256i cvt_from_fp32(const __m256& a, const __m256& b);
152
+ template <> inline __m256i cvt_from_fp32<BFloat16, false>(const __m256& a, const __m256& b) {
153
+ return cvtfp32_bf16(a, b);
154
+ }
155
+ template <> inline __m256i cvt_from_fp32<BFloat16, true>(const __m256& a, const __m256& b) {
156
+ return merge_compare_result(a, b);
157
+ }
158
+ template <> inline __m256i cvt_from_fp32<Half, false>(const __m256& a, const __m256& b) {
159
+ return cvtfp32_fp16(a, b);
160
+ }
161
+ template <> inline __m256i cvt_from_fp32<Half, true>(const __m256& a, const __m256& b) {
162
+ return cvtfp32_fp16(a, b);
163
+ }
164
+
165
+ template <typename T>
166
+ class Vectorized16 {
167
+ static_assert(
168
+ is_reduced_floating_point_v<T>,
169
+ "Support only float16 and bfloat16.");
170
+ protected:
171
+ __m256i values;
172
+ public:
173
+ using value_type = uint16_t;
174
+ using size_type = int;
175
+ static constexpr size_type size() {
176
+ return 16;
177
+ }
178
+ Vectorized16() {}
179
+ Vectorized16(__m256i v) : values(v) {}
180
+ Vectorized16(T val) {
181
+ value_type uw = val.x;
182
+ values = _mm256_set1_epi16(uw);
183
+ }
184
+ Vectorized16(T val1, T val2, T val3, T val4,
185
+ T val5, T val6, T val7, T val8,
186
+ T val9, T val10, T val11, T val12,
187
+ T val13, T val14, T val15, T val16) {
188
+ values = _mm256_setr_epi16(
189
+ val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x,
190
+ val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x);
191
+ }
192
+ operator __m256i() const {
193
+ return values;
194
+ }
195
+ T& operator[](int idx) = delete;
196
+ const T& operator[](int idx) const = delete;
197
+ int zero_mask() const {
198
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
199
+ __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0));
200
+ return _mm256_movemask_epi8(cmp);
201
+ }
202
+ static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
203
+ if (count == size())
204
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
205
+
206
+ __at_align__ int16_t tmp_values[size()];
207
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
208
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(tmp_values));
209
+ }
210
+ void store(void* ptr, int count = size()) const {
211
+ if (count == size()) {
212
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
213
+ } else if (count > 0) {
214
+ __at_align__ int16_t tmp_values[size()];
215
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
216
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
217
+ }
218
+ }
219
+ template <int64_t mask>
220
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
221
+ __at_align__ int16_t tmp_values[size()];
222
+ a.store(tmp_values);
223
+ if (mask & 0x01)
224
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
225
+ if (mask & 0x02)
226
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
227
+ if (mask & 0x04)
228
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
229
+ if (mask & 0x08)
230
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
231
+ if (mask & 0x10)
232
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
233
+ if (mask & 0x20)
234
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
235
+ if (mask & 0x40)
236
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
237
+ if (mask & 0x80)
238
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
239
+ if (mask & 0x100)
240
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
241
+ if (mask & 0x200)
242
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
243
+ if (mask & 0x400)
244
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
245
+ if (mask & 0x800)
246
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
247
+ if (mask & 0x1000)
248
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
249
+ if (mask & 0x2000)
250
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
251
+ if (mask & 0x4000)
252
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
253
+ if (mask & 0x8000)
254
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
255
+ return loadu(tmp_values);
256
+ }
257
+ static Vectorized<T> blendv(const Vectorized<T>& a,
258
+ const Vectorized<T>& b, const Vectorized<T>& mask) {
259
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
260
+ }
261
+ template<typename step_t>
262
+ static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
263
+ return Vectorized<T>(
264
+ base, base + step, base + 2 * step, base + 3 * step,
265
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
266
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
267
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
268
+ }
269
+ static Vectorized<T> set(const Vectorized<T>& a,
270
+ const Vectorized<T>& b, int64_t count = size()) {
271
+ switch (count) {
272
+ case 0:
273
+ return a;
274
+ case 1:
275
+ return blend<1>(a, b);
276
+ case 2:
277
+ return blend<3>(a, b);
278
+ case 3:
279
+ return blend<7>(a, b);
280
+ case 4:
281
+ return blend<15>(a, b);
282
+ case 5:
283
+ return blend<31>(a, b);
284
+ case 6:
285
+ return blend<63>(a, b);
286
+ case 7:
287
+ return blend<127>(a, b);
288
+ case 8:
289
+ return blend<255>(a, b);
290
+ case 9:
291
+ return blend<511>(a, b);
292
+ case 10:
293
+ return blend<1023>(a, b);
294
+ case 11:
295
+ return blend<2047>(a, b);
296
+ case 12:
297
+ return blend<4095>(a, b);
298
+ case 13:
299
+ return blend<8191>(a, b);
300
+ case 14:
301
+ return blend<16383>(a, b);
302
+ case 15:
303
+ return blend<32767>(a, b);
304
+ }
305
+ return b;
306
+ }
307
+
308
+ Vectorized<T> map(SLEEF_CONST __m256 (*SLEEF_CONST_OLD vop)(__m256)) const {
309
+ __m256 lo, hi;
310
+ cvt_to_fp32<T>(values, lo, hi);
311
+ const auto o1 = vop(lo);
312
+ const auto o2 = vop(hi);
313
+ return cvt_from_fp32<T>(o1, o2);
314
+ }
315
+ Vectorized<T> isnan() const {
316
+ __m256 lo, hi;
317
+ cvt_to_fp32<T>(values, lo, hi);
318
+ lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
319
+ hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
320
+ return merge_compare_result(lo, hi);
321
+ }
322
+ Vectorized<T> abs() const {
323
+ return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values);
324
+ }
325
+ Vectorized<T> angle() const {
326
+ __m256 lo, hi;
327
+ cvt_to_fp32<T>(values, lo, hi);
328
+ auto angle_lambda = [](__m256 values_2) {
329
+ const auto zero_vec = _mm256_set1_ps(0.f);
330
+ const auto nan_vec = _mm256_set1_ps(NAN);
331
+ const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ);
332
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
333
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
334
+
335
+ const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ);
336
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
337
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
338
+ return angle;
339
+ };
340
+ auto o1 = angle_lambda(lo);
341
+ auto o2 = angle_lambda(hi);
342
+ return cvt_from_fp32<T>(o1, o2);
343
+ }
344
+ Vectorized<T> real() const {
345
+ return *this;
346
+ }
347
+ Vectorized<T> imag() const {
348
+ return _mm256_set1_epi16(0);
349
+ }
350
+ Vectorized<T> conj() const {
351
+ return *this;
352
+ }
353
+ Vectorized<T> acos() const {
354
+ return map(Sleef_acosf8_u10);
355
+ }
356
+ Vectorized<T> acosh() const {
357
+ return map(Sleef_acoshf8_u10);
358
+ }
359
+ Vectorized<T> asin() const {
360
+ return map(Sleef_asinf8_u10);
361
+ }
362
+ Vectorized<T> atan() const {
363
+ return map(Sleef_atanf8_u10);
364
+ }
365
+ Vectorized<T> atanh() const {
366
+ return map(Sleef_atanhf8_u10);
367
+ }
368
+ Vectorized<T> atan2(const Vectorized<T> &b) const {
369
+ __m256 lo, hi;
370
+ __m256 b1, b2;
371
+ cvt_to_fp32<T>(values, lo, hi);
372
+ cvt_to_fp32<T>(b.values, b1, b2);
373
+ auto o1 = Sleef_atan2f8_u10(lo, b1);
374
+ auto o2 = Sleef_atan2f8_u10(hi, b2);
375
+ return cvt_from_fp32<T>(o1, o2);
376
+ }
377
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
378
+ // copy sign bit (0x8000) from sign and remaining bits from values
379
+ __m256i mask_value = _mm256_set1_epi32(~0x80008000);
380
+ __m256i mask_signbit = _mm256_set1_epi32(0x80008000);
381
+ return Vectorized<T>(
382
+ _mm256_or_si256(
383
+ _mm256_and_si256(values, mask_value),
384
+ _mm256_and_si256(sign, mask_signbit)));
385
+ }
386
+ Vectorized<T> erf() const {
387
+ return map(Sleef_erff8_u10);
388
+ }
389
+ Vectorized<T> erfc() const {
390
+ return map(Sleef_erfcf8_u15);
391
+ }
392
+ Vectorized<T> erfinv() const {
393
+ __m256 lo, hi;
394
+ cvt_to_fp32<T>(values, lo, hi);
395
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
396
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
397
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
398
+ for (int64_t i = 0; i < size() / 2; i++) {
399
+ tmp1[i] = calc_erfinv(tmp1[i]);
400
+ tmp2[i] = calc_erfinv(tmp2[i]);
401
+ }
402
+ auto o1 = _mm256_loadu_ps(tmp1);
403
+ auto o2 = _mm256_loadu_ps(tmp2);
404
+ return cvt_from_fp32<T>(o1, o2);
405
+ }
406
+ Vectorized<T> exp() const {
407
+ return map(Sleef_expf8_u10);
408
+ }
409
+ Vectorized<T> exp2() const {
410
+ return map(Sleef_exp2f8_u10);
411
+ }
412
+ Vectorized<T> expm1() const {
413
+ return map(Sleef_expm1f8_u10);
414
+ }
415
+ Vectorized<T> exp_u20() const {
416
+ return exp();
417
+ }
418
+ Vectorized<T> fmod(const Vectorized<T> & q) const {
419
+ __m256 x_lo, x_hi;
420
+ cvt_to_fp32<T>(values, x_lo, x_hi);
421
+ __m256 q_lo, q_hi;
422
+ cvt_to_fp32<T>(q.values, q_lo, q_hi);
423
+ auto o1 = Sleef_fmodf8(x_lo, q_lo);
424
+ auto o2 = Sleef_fmodf8(x_hi, q_hi);
425
+ return cvt_from_fp32<T>(o1, o2);
426
+ }
427
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
428
+ __m256 lo, hi;
429
+ __m256 b1, b2;
430
+ cvt_to_fp32<T>(values, lo, hi);
431
+ cvt_to_fp32<T>(b.values, b1, b2);
432
+ auto o1 = Sleef_hypotf8_u05(lo, b1);
433
+ auto o2 = Sleef_hypotf8_u05(hi, b2);
434
+ return cvt_from_fp32<T>(o1, o2);
435
+ }
436
+ Vectorized<T> i0() const {
437
+ __m256 lo, hi;
438
+ cvt_to_fp32<T>(values, lo, hi);
439
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
440
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
441
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
442
+ for (int64_t i = 0; i < size() / 2; i++) {
443
+ tmp1[i] = calc_i0(tmp1[i]);
444
+ tmp2[i] = calc_i0(tmp2[i]);
445
+ }
446
+ auto o1 = _mm256_loadu_ps(tmp1);
447
+ auto o2 = _mm256_loadu_ps(tmp2);
448
+ return cvt_from_fp32<T>(o1, o2);
449
+ }
450
+ Vectorized<T> i0e() const {
451
+ __m256 lo, hi;
452
+ cvt_to_fp32<T>(values, lo, hi);
453
+ constexpr auto sz = size();
454
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
455
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
456
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
457
+
458
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
459
+ tmp1[i] = calc_i0e(tmp1[i]);
460
+ tmp2[i] = calc_i0e(tmp2[i]);
461
+ }
462
+ const auto o1 = _mm256_loadu_ps(tmp1);
463
+ const auto o2 = _mm256_loadu_ps(tmp2);
464
+ return cvt_from_fp32<T>(o1, o2);
465
+ }
466
+ Vectorized<T> digamma() const {
467
+ __m256 lo, hi;
468
+ cvt_to_fp32<T>(values, lo, hi);
469
+ constexpr auto sz = size();
470
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
471
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
472
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
473
+
474
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
475
+ tmp1[i] = calc_digamma(tmp1[i]);
476
+ tmp2[i] = calc_digamma(tmp2[i]);
477
+ }
478
+ const auto o1 = _mm256_loadu_ps(tmp1);
479
+ const auto o2 = _mm256_loadu_ps(tmp2);
480
+ return cvt_from_fp32<T>(o1, o2);
481
+ }
482
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
483
+ __m256 lo, hi;
484
+ __m256 xlo, xhi;
485
+ cvt_to_fp32<T>(values, lo, hi);
486
+ cvt_to_fp32<T>(x.values, xlo, xhi);
487
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
488
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
489
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
490
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
491
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
492
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
493
+ for (int64_t i = 0; i < size() / 2; ++i) {
494
+ tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
495
+ tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
496
+ }
497
+ auto o1 = _mm256_loadu_ps(tmp1);
498
+ auto o2 = _mm256_loadu_ps(tmp2);
499
+ return cvt_from_fp32<T>(o1, o2);
500
+ }
501
+
502
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
503
+ __m256 lo, hi;
504
+ __m256 xlo, xhi;
505
+ cvt_to_fp32<T>(values, lo, hi);
506
+ cvt_to_fp32<T>(x.values, xlo, xhi);
507
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
508
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
509
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
510
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
511
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
512
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
513
+ for (int64_t i = 0; i < size() / 2; ++i) {
514
+ tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
515
+ tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
516
+ }
517
+ auto o1 = _mm256_loadu_ps(tmp1);
518
+ auto o2 = _mm256_loadu_ps(tmp2);
519
+ return cvt_from_fp32<T>(o1, o2);
520
+ }
521
+ Vectorized<T> log() const {
522
+ return map(Sleef_logf8_u10);
523
+ }
524
+ Vectorized<T> log2() const {
525
+ return map(Sleef_log2f8_u10);
526
+ }
527
+ Vectorized<T> log10() const {
528
+ return map(Sleef_log10f8_u10);
529
+ }
530
+ Vectorized<T> log1p() const {
531
+ return map(Sleef_log1pf8_u10);
532
+ }
533
+ Vectorized<T> sin() const {
534
+ return map(Sleef_sinf8_u10);
535
+ }
536
+ Vectorized<T> sinh() const {
537
+ return map(Sleef_sinhf8_u10);
538
+ }
539
+ Vectorized<T> cos() const {
540
+ return map(Sleef_cosf8_u10);
541
+ }
542
+ Vectorized<T> cosh() const {
543
+ return map(Sleef_coshf8_u10);
544
+ }
545
+ Vectorized<T> ceil() const {
546
+ __m256 lo, hi;
547
+ cvt_to_fp32<T>(values, lo, hi);
548
+ auto o1 = _mm256_ceil_ps(lo);
549
+ auto o2 = _mm256_ceil_ps(hi);
550
+ return cvt_from_fp32<T>(o1, o2);
551
+ }
552
+ Vectorized<T> floor() const {
553
+ __m256 lo, hi;
554
+ cvt_to_fp32<T>(values, lo, hi);
555
+ auto o1 = _mm256_floor_ps(lo);
556
+ auto o2 = _mm256_floor_ps(hi);
557
+ return cvt_from_fp32<T>(o1, o2);
558
+ }
559
+ Vectorized<T> neg() const {
560
+ return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000));
561
+ }
562
+ Vectorized<T> round() const {
563
+ __m256 lo, hi;
564
+ cvt_to_fp32<T>(values, lo, hi);
565
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
566
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
567
+ return cvt_from_fp32<T>(o1, o2);
568
+ }
569
+ Vectorized<T> tan() const {
570
+ return map(Sleef_tanf8_u10);
571
+ }
572
+ Vectorized<T> tanh() const {
573
+ return map(Sleef_tanhf8_u10);
574
+ }
575
+ Vectorized<T> trunc() const {
576
+ __m256 lo, hi;
577
+ cvt_to_fp32<T>(values, lo, hi);
578
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
579
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
580
+ return cvt_from_fp32<T>(o1, o2);
581
+ }
582
+ Vectorized<T> lgamma() const {
583
+ return map(Sleef_lgammaf8_u10);
584
+ }
585
+ Vectorized<T> sqrt() const {
586
+ __m256 lo, hi;
587
+ cvt_to_fp32<T>(values, lo, hi);
588
+ auto o1 = _mm256_sqrt_ps(lo);
589
+ auto o2 = _mm256_sqrt_ps(hi);
590
+ return cvt_from_fp32<T>(o1, o2);
591
+ }
592
+ Vectorized<T> reciprocal() const {
593
+ __m256 lo, hi;
594
+ cvt_to_fp32<T>(values, lo, hi);
595
+ auto ones = _mm256_set1_ps(1);
596
+ auto o1 = _mm256_div_ps(ones, lo);
597
+ auto o2 = _mm256_div_ps(ones, hi);
598
+ return cvt_from_fp32<T>(o1, o2);
599
+ }
600
+ Vectorized<T> rsqrt() const {
601
+ __m256 lo, hi;
602
+ cvt_to_fp32<T>(values, lo, hi);
603
+ auto ones = _mm256_set1_ps(1);
604
+ auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo));
605
+ auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi));
606
+ return cvt_from_fp32<T>(o1, o2);
607
+ }
608
+ Vectorized<T> pow(const Vectorized<T> &b) const {
609
+ __m256 lo, hi;
610
+ __m256 b1, b2;
611
+ cvt_to_fp32<T>(values, lo, hi);
612
+ cvt_to_fp32<T>(b.values, b1, b2);
613
+ auto o1 = Sleef_powf8_u10(lo, b1);
614
+ auto o2 = Sleef_powf8_u10(hi, b2);
615
+ return cvt_from_fp32<T>(o1, o2);
616
+ }
617
+ private:
618
+ template<typename Op>
619
+ Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
620
+ __m256 a_lo, a_hi;
621
+ __m256 b_lo, b_hi;
622
+ cvt_to_fp32<T>(values, a_lo, a_hi);
623
+ cvt_to_fp32<T>(b.values, b_lo, b_hi);
624
+ auto o1 = op(a_lo, b_lo);
625
+ auto o2 = op(a_hi, b_hi);
626
+ return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
627
+ }
628
+
629
+ public:
630
+ Vectorized<T> inline operator>(const Vectorized<T>& other) const {
631
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); });
632
+ }
633
+ Vectorized<T> inline operator<(const Vectorized<T>& other) const {
634
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); });
635
+ }
636
+ Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
637
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); });
638
+ }
639
+ Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
640
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); });
641
+ }
642
+ Vectorized<T> inline operator==(const Vectorized<T>& other) const {
643
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); });
644
+ }
645
+ Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
646
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); });
647
+ }
648
+ };
649
+
650
+ template<typename T, typename Op>
651
+ static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
652
+ __m256 a_lo, a_hi;
653
+ __m256 b_lo, b_hi;
654
+ cvt_to_fp32<T>(__m256i(a), a_lo, a_hi);
655
+ cvt_to_fp32<T>(__m256i(b), b_lo, b_hi);
656
+ auto o1 = op(a_lo, b_lo);
657
+ auto o2 = op(a_hi, b_hi);
658
+ return cvt_from_fp32<T>(o1, o2);
659
+ }
660
+
661
+ template <>
662
+ class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
663
+ public:
664
+ using Vectorized16::Vectorized16;
665
+
666
+ Vectorized<BFloat16> frac() const;
667
+
668
+ Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
669
+ Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
670
+ Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
671
+ Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
672
+ Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
673
+ Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
674
+ };
675
+
676
+ Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
677
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
678
+ }
679
+ Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
680
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
681
+ }
682
+ Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
683
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
684
+ }
685
+ Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
686
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
687
+ }
688
+ Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
689
+ return _mm256_and_si256(a, b);
690
+ }
691
+ Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
692
+ return _mm256_or_si256(a, b);
693
+ }
694
+ Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
695
+ return _mm256_xor_si256(a, b);
696
+ }
697
+
698
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
699
+ return (*this == other) & Vectorized<BFloat16>(1.0f);
700
+ }
701
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
702
+ return (*this != other) & Vectorized<BFloat16>(1.0f);
703
+ }
704
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
705
+ return (*this > other) & Vectorized<BFloat16>(1.0f);
706
+ }
707
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
708
+ return (*this >= other) & Vectorized<BFloat16>(1.0f);
709
+ }
710
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
711
+ return (*this < other) & Vectorized<BFloat16>(1.0f);
712
+ }
713
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
714
+ return (*this <= other) & Vectorized<BFloat16>(1.0f);
715
+ }
716
+
717
+ // frac. Implement this here so we can use subtraction
718
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
719
+ return *this - this->trunc();
720
+ }
721
+
722
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
723
+ // either input is a NaN.
724
+ template <>
725
+ Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
726
+ __m256 a_lo, a_hi;
727
+ __m256 b_lo, b_hi;
728
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
729
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
730
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
731
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
732
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
733
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
734
+ // Exploit the fact that all-ones is a NaN.
735
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
736
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
737
+ return cvtfp32_bf16(o1, o2);
738
+ }
739
+
740
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
741
+ // either input is a NaN.
742
+ template <>
743
+ Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
744
+ __m256 a_lo, a_hi;
745
+ __m256 b_lo, b_hi;
746
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
747
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
748
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
749
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
750
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
751
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
752
+ // Exploit the fact that all-ones is a NaN.
753
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
754
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
755
+ return cvtfp32_bf16(o1, o2);
756
+ }
757
+
758
+ template <>
759
+ Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
760
+ const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
761
+ __m256 a_lo, a_hi;
762
+ __m256 min_lo, min_hi;
763
+ __m256 max_lo, max_hi;
764
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
765
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
766
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
767
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
768
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
769
+ return cvtfp32_bf16(o1, o2);
770
+ }
771
+
772
+ template <>
773
+ Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
774
+ __m256 a_lo, a_hi;
775
+ __m256 max_lo, max_hi;
776
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
777
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
778
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
779
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
780
+ return cvtfp32_bf16(o1, o2);
781
+ }
782
+
783
+ template <>
784
+ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
785
+ __m256 a_lo, a_hi;
786
+ __m256 min_lo, min_hi;
787
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
788
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
789
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
790
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
791
+ return cvtfp32_bf16(o1, o2);
792
+ }
793
+
794
+ template <>
795
+ inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
796
+ int64_t i;
797
+ #ifndef __msvc_cl__
798
+ #pragma unroll
799
+ #endif
800
+ for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
801
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
802
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
803
+ }
804
+ #ifndef __msvc_cl__
805
+ #pragma unroll
806
+ #endif
807
+ for (; i < n; i++) {
808
+ dst[i] = src[i];
809
+ }
810
+ }
811
+
812
+ template <>
813
+ inline void convert(const float* src, BFloat16* dst, int64_t n) {
814
+ int64_t i;
815
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
816
+ __m256 a = _mm256_loadu_ps(&src[i]);
817
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
818
+
819
+ __m256i bf = cvtfp32_bf16(a, b);
820
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
821
+ }
822
+ for (; i < n; i++) {
823
+ dst[i] = c10::convert<BFloat16>(src[i]);
824
+ }
825
+ }
826
+
827
+ template <>
828
+ inline void convert(const double* src, BFloat16* dst, int64_t n) {
829
+ auto load_float = [](const double *src) -> __m256 {
830
+ // Load one float vector from an array of doubles
831
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
832
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
833
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
834
+ };
835
+
836
+ int64_t i;
837
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
838
+ __m256 a = load_float(&src[i]);
839
+ __m256 b = load_float(&src[i + 8]);
840
+
841
+ __m256i bf = cvtfp32_bf16(a, b);
842
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
843
+ }
844
+ for (; i < n; i++) {
845
+ dst[i] = c10::convert<BFloat16>(src[i]);
846
+ }
847
+ }
848
+
849
+ template <>
850
+ Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
851
+ const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
852
+ __m256 a_lo, a_hi;
853
+ __m256 b_lo, b_hi;
854
+ __m256 c_lo, c_hi;
855
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
856
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
857
+ cvtbf16_fp32(__m256i(c), c_lo, c_hi);
858
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
859
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
860
+ return cvtfp32_bf16(o1, o2);
861
+ }
862
+
863
+ template <>
864
+ class Vectorized<Half>: public Vectorized16<Half> {
865
+ public:
866
+ using Vectorized16::Vectorized16;
867
+
868
+ Vectorized<Half> frac() const;
869
+
870
+ Vectorized<Half> eq(const Vectorized<Half>& other) const;
871
+ Vectorized<Half> ne(const Vectorized<Half>& other) const;
872
+ Vectorized<Half> gt(const Vectorized<Half>& other) const;
873
+ Vectorized<Half> ge(const Vectorized<Half>& other) const;
874
+ Vectorized<Half> lt(const Vectorized<Half>& other) const;
875
+ Vectorized<Half> le(const Vectorized<Half>& other) const;
876
+ };
877
+
878
+ Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
879
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
880
+ }
881
+ Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
882
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
883
+ }
884
+ Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
885
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
886
+ }
887
+ Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
888
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
889
+ }
890
+ Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
891
+ return _mm256_and_si256(a, b);
892
+ }
893
+ Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
894
+ return _mm256_or_si256(a, b);
895
+ }
896
+ Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
897
+ return _mm256_xor_si256(a, b);
898
+ }
899
+
900
+ inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
901
+ return (*this == other) & Vectorized<Half>(1.0f);
902
+ }
903
+ inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
904
+ return (*this != other) & Vectorized<Half>(1.0f);
905
+ }
906
+ inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
907
+ return (*this > other) & Vectorized<Half>(1.0f);
908
+ }
909
+ inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
910
+ return (*this >= other) & Vectorized<Half>(1.0f);
911
+ }
912
+ inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
913
+ return (*this < other) & Vectorized<Half>(1.0f);
914
+ }
915
+ inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
916
+ return (*this <= other) & Vectorized<Half>(1.0f);
917
+ }
918
+
919
+ // frac. Implement this here so we can use subtraction
920
+ inline Vectorized<Half> Vectorized<Half>::frac() const {
921
+ return *this - this->trunc();
922
+ }
923
+
924
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
925
+ // either input is a NaN.
926
+ template <>
927
+ Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
928
+ __m256 a_lo, a_hi;
929
+ __m256 b_lo, b_hi;
930
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
931
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
932
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
933
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
934
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
935
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
936
+ // Exploit the fact that all-ones is a NaN.
937
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
938
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
939
+ return cvtfp32_fp16(o1, o2);
940
+ }
941
+
942
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
943
+ // either input is a NaN.
944
+ template <>
945
+ Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
946
+ __m256 a_lo, a_hi;
947
+ __m256 b_lo, b_hi;
948
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
949
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
950
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
951
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
952
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
953
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
954
+ // Exploit the fact that all-ones is a NaN.
955
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
956
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
957
+ return cvtfp32_fp16(o1, o2);
958
+ }
959
+
960
+ template <>
961
+ Vectorized<Half> inline clamp(const Vectorized<Half>& a,
962
+ const Vectorized<Half>& min, const Vectorized<Half>& max) {
963
+ __m256 a_lo, a_hi;
964
+ __m256 min_lo, min_hi;
965
+ __m256 max_lo, max_hi;
966
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
967
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
968
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
969
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
970
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
971
+ return cvtfp32_fp16(o1, o2);
972
+ }
973
+
974
+ template <>
975
+ Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
976
+ __m256 a_lo, a_hi;
977
+ __m256 max_lo, max_hi;
978
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
979
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
980
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
981
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
982
+ return cvtfp32_fp16(o1, o2);
983
+ }
984
+
985
+ template <>
986
+ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
987
+ __m256 a_lo, a_hi;
988
+ __m256 min_lo, min_hi;
989
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
990
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
991
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
992
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
993
+ return cvtfp32_fp16(o1, o2);
994
+ }
995
+
996
+ template <>
997
+ inline void convert(const Half* src, Half* dst, int64_t n) {
998
+ int64_t i;
999
+ #ifndef __msvc_cl__
1000
+ #pragma unroll
1001
+ #endif
1002
+ for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
1003
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
1004
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
1005
+ }
1006
+ #ifndef __msvc_cl__
1007
+ #pragma unroll
1008
+ #endif
1009
+ for (; i < n; i++) {
1010
+ dst[i] = src[i];
1011
+ }
1012
+ }
1013
+
1014
+ template <>
1015
+ inline void convert(const float* src, Half* dst, int64_t n) {
1016
+ int64_t i;
1017
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1018
+ __m256 a = _mm256_loadu_ps(&src[i]);
1019
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
1020
+
1021
+ __m256i c = cvtfp32_fp16(a, b);
1022
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
1023
+ }
1024
+ for (; i < n; i++) {
1025
+ dst[i] = c10::convert<Half>(src[i]);
1026
+ }
1027
+ }
1028
+
1029
+ template <>
1030
+ inline void convert(const double* src, Half* dst, int64_t n) {
1031
+ auto load_float = [](const double *src) -> __m256 {
1032
+ // Load one float vector from an array of doubles
1033
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
1034
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
1035
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
1036
+ };
1037
+
1038
+ int64_t i;
1039
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1040
+ __m256 a = load_float(&src[i]);
1041
+ __m256 b = load_float(&src[i + 8]);
1042
+
1043
+ __m256i c = cvtfp32_fp16(a, b);
1044
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
1045
+ }
1046
+ for (; i < n; i++) {
1047
+ dst[i] = c10::convert<Half>(src[i]);
1048
+ }
1049
+ }
1050
+
1051
+ template <>
1052
+ Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
1053
+ const Vectorized<Half>& b, const Vectorized<Half>& c) {
1054
+ __m256 a_lo, a_hi;
1055
+ __m256 b_lo, b_hi;
1056
+ __m256 c_lo, c_hi;
1057
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
1058
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
1059
+ cvtfp16_fp32(__m256i(c), c_lo, c_hi);
1060
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
1061
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
1062
+ return cvtfp32_fp16(o1, o2);
1063
+ }
1064
+
1065
+ #define CONVERT_VECTORIZED_INIT(type, name) \
1066
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1067
+ __m256 o1, o2; \
1068
+ cvt_to_fp32<type>(__m256i(a), o1, o2); \
1069
+ return std::make_tuple(o1, o2); \
1070
+ } \
1071
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1072
+ return cvt_from_fp32<type>(__m256(a), __m256(b)); \
1073
+ }
1074
+ CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1075
+ CONVERT_VECTORIZED_INIT(Half, half);
1076
+
1077
+ #else // defined(CPU_CAPABILITY_AVX2)
1078
+
1079
+ #define CONVERT_NON_VECTORIZED_INIT(type, name) \
1080
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1081
+ constexpr int64_t K = Vectorized<type>::size(); \
1082
+ __at_align__ float arr[K]; \
1083
+ __at_align__ type arr2[K]; \
1084
+ a.store(arr2); \
1085
+ convert(arr2, arr, K); \
1086
+ return std::make_tuple( \
1087
+ Vectorized<float>::loadu(arr), \
1088
+ Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
1089
+ } \
1090
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1091
+ constexpr int64_t K = Vectorized<type>::size(); \
1092
+ __at_align__ float arr[K]; \
1093
+ __at_align__ type arr2[K]; \
1094
+ a.store(arr); \
1095
+ b.store(arr + Vectorized<float>::size()); \
1096
+ convert(arr, arr2, K); \
1097
+ return Vectorized<type>::loadu(arr2); \
1098
+ }
1099
+ CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1100
+ #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
1101
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(const Vectorized<Half>& a) {
1102
+ static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
1103
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
1104
+ float16x8x2_t arr = a;
1105
+ float16x8_t x = arr.val[0];
1106
+ float16x8_t y = arr.val[1];
1107
+ #else
1108
+ auto arr = reinterpret_cast<const float16_t*>(a.operator const Half*());
1109
+ float16x8_t x = vld1q_f16(arr);
1110
+ float16x8_t y = vld1q_f16(arr + Vectorized<float>::size());
1111
+ #endif
1112
+ float32x4_t x1 = vcvt_f32_f16(vget_low_f16(x));
1113
+ float32x4_t x2 = vcvt_f32_f16(vget_high_f16(x));
1114
+ float32x4_t y1 = vcvt_f32_f16(vget_low_f16(y));
1115
+ float32x4_t y2 = vcvt_f32_f16(vget_high_f16(y));
1116
+ return { Vectorized<float>(x1, x2), Vectorized<float>(y1, y2) };
1117
+ }
1118
+ inline Vectorized<Half> convert_float_half(const Vectorized<float>& a, const Vectorized<float>& b) {
1119
+ static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
1120
+ float32x4x2_t x = a;
1121
+ float32x4x2_t y = b;
1122
+ float16x4_t x1 = vcvt_f16_f32(x.val[0]);
1123
+ float16x4_t x2 = vcvt_f16_f32(x.val[1]);
1124
+ float16x4_t y1 = vcvt_f16_f32(y.val[0]);
1125
+ float16x4_t y2 = vcvt_f16_f32(y.val[1]);
1126
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
1127
+ return Vectorized<Half>(vcombine_f16(x1, x2), vcombine_f16(y1, y2));
1128
+ #else
1129
+ Vectorized<Half> rc;
1130
+ auto arr = reinterpret_cast<float16_t*>(rc.operator Half*());
1131
+ vst1q_f16(arr, vcombine_f16(x1, x2));
1132
+ vst1q_f16(arr + Vectorized<float>::size(), vcombine_f16(y1, y2));
1133
+ return rc;
1134
+ #endif
1135
+ }
1136
+ #else
1137
+ CONVERT_NON_VECTORIZED_INIT(Half, half);
1138
+ #endif
1139
+
1140
+ #endif // defined(CPU_CAPABILITY_AVX2)
1141
+
1142
+ #if defined(CPU_CAPABILITY_AVX2)
1143
+ #define LOAD_FP32_VECTORIZED_INIT(type, name) \
1144
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1145
+ auto values = _mm_loadu_si128(reinterpret_cast<const __m128i*>(data)); \
1146
+ __m256 out_values; \
1147
+ cvt_to_fp32<type>(values, out_values); \
1148
+ out = out_values; \
1149
+ } \
1150
+ \
1151
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1152
+ auto vec = Vectorized<type>::loadu(data); \
1153
+ __m256 out1_values, out2_values; \
1154
+ cvt_to_fp32<type>(vec, out1_values, out2_values); \
1155
+ out1 = out1_values; \
1156
+ out2 = out2_values; \
1157
+ }
1158
+ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
1159
+ LOAD_FP32_VECTORIZED_INIT(Half, fp16);
1160
+
1161
+ #else // defined(CPU_CAPABILITY_AVX2)
1162
+ #define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
1163
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1164
+ __at_align__ float values[Vectorized<float>::size()]; \
1165
+ for (const auto k : c10::irange(Vectorized<float>::size())) { \
1166
+ values[k] = data[k]; \
1167
+ } \
1168
+ out = Vectorized<float>::loadu(values); \
1169
+ } \
1170
+ \
1171
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1172
+ load_fp32_from_##name(data, out1); \
1173
+ data += Vectorized<float>::size(); \
1174
+ load_fp32_from_##name(data, out2); \
1175
+ }
1176
+ LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1177
+ LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1178
+
1179
+ #endif
1180
+ }} // namsepace at::vec::CPU_CAPABILITY
1181
+
1182
+ #pragma GCC diagnostic pop
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+
11
+ #if defined(CPU_CAPABILITY_AVX2)
12
+ #define SLEEF_STATIC_LIBS
13
+ #include <sleef.h>
14
+ #endif
15
+
16
+ namespace at::vec {
17
+ // See Note [CPU_CAPABILITY namespace]
18
+ inline namespace CPU_CAPABILITY {
19
+
20
+ #if defined(CPU_CAPABILITY_AVX2)
21
+
22
+ template <> class Vectorized<c10::complex<double>> {
23
+ private:
24
+ __m256d values;
25
+ public:
26
+ using value_type = c10::complex<double>;
27
+ using size_type = int;
28
+ static constexpr size_type size() {
29
+ return 2;
30
+ }
31
+ Vectorized() {}
32
+ Vectorized(__m256d v) : values(v) {}
33
+ Vectorized(c10::complex<double> val) {
34
+ double real_value = val.real();
35
+ double imag_value = val.imag();
36
+ values = _mm256_setr_pd(real_value, imag_value,
37
+ real_value, imag_value);
38
+ }
39
+ Vectorized(c10::complex<double> val1, c10::complex<double> val2) {
40
+ values = _mm256_setr_pd(val1.real(), val1.imag(),
41
+ val2.real(), val2.imag());
42
+ }
43
+ operator __m256d() const {
44
+ return values;
45
+ }
46
+ template <int64_t mask>
47
+ static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
48
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
49
+ static_assert (mask > -1 && mask < 4, "Unexpected mask value");
50
+ switch (mask) {
51
+ case 0:
52
+ return a;
53
+ case 1:
54
+ return _mm256_blend_pd(a.values, b.values, 0x03);
55
+ case 2:
56
+ return _mm256_blend_pd(a.values, b.values, 0x0c);
57
+ case 3: break;
58
+ }
59
+ return b;
60
+ }
61
+ static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
62
+ const Vectorized<c10::complex<double>>& mask) {
63
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
64
+ auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values);
65
+ return _mm256_blendv_pd(a.values, b.values, mask_);
66
+
67
+ }
68
+ template<typename step_t>
69
+ static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0., step_t step = static_cast<step_t>(1)) {
70
+ return Vectorized<c10::complex<double>>(base,
71
+ base + step);
72
+ }
73
+ static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
74
+ int64_t count = size()) {
75
+ switch (count) {
76
+ case 0:
77
+ return a;
78
+ case 1:
79
+ return blend<1>(a, b);
80
+ }
81
+ return b;
82
+ }
83
+ static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
84
+ if (count == size())
85
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
86
+
87
+ __at_align__ double tmp_values[2*size()];
88
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
89
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
90
+ // instructions while a loop would be compiled to one instruction.
91
+ for (const auto i : c10::irange(2*size())) {
92
+ tmp_values[i] = 0.0;
93
+ }
94
+ std::memcpy(
95
+ tmp_values,
96
+ reinterpret_cast<const double*>(ptr),
97
+ count * sizeof(c10::complex<double>));
98
+ return _mm256_load_pd(tmp_values);
99
+ }
100
+ void store(void* ptr, int count = size()) const {
101
+ if (count == size()) {
102
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
103
+ } else if (count > 0) {
104
+ double tmp_values[2*size()];
105
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
106
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
107
+ }
108
+ }
109
+ const c10::complex<double>& operator[](int idx) const = delete;
110
+ c10::complex<double>& operator[](int idx) = delete;
111
+ Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
112
+ __at_align__ c10::complex<double> tmp[size()];
113
+ store(tmp);
114
+ for (const auto i : c10::irange(size())) {
115
+ tmp[i] = f(tmp[i]);
116
+ }
117
+ return loadu(tmp);
118
+ }
119
+ __m256d abs_2_() const {
120
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
121
+ return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
122
+ }
123
+ __m256d abs_() const {
124
+ auto real = _mm256_movedup_pd(values); // real real
125
+ // movehdup_pd does not exist...
126
+ auto imag = _mm256_permute_pd(values, 0xf); // imag imag
127
+ return Sleef_hypotd4_u05(real, imag); // abs abs
128
+ }
129
+ Vectorized<c10::complex<double>> abs() const {
130
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
131
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
132
+ return _mm256_and_pd(abs_(), real_mask); // abs 0
133
+ }
134
+ __m256d angle_() const {
135
+ //angle = atan2(b/a)
136
+ auto b_a = _mm256_permute_pd(values, 0x05); // b a
137
+ return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
138
+ }
139
+ Vectorized<c10::complex<double>> angle() const {
140
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
141
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
142
+ auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle
143
+ return _mm256_and_pd(angle, real_mask); // angle 0
144
+ }
145
+ Vectorized<c10::complex<double>> sgn() const {
146
+ auto abs = abs_();
147
+ auto zero = _mm256_setzero_pd();
148
+ auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ);
149
+ auto div = _mm256_div_pd(values, abs);
150
+ return _mm256_blendv_pd(div, zero, mask);
151
+ }
152
+ __m256d real_() const {
153
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
154
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
155
+ return _mm256_and_pd(values, real_mask);
156
+ }
157
+ Vectorized<c10::complex<double>> real() const {
158
+ return real_();
159
+ }
160
+ __m256d imag_() const {
161
+ const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
162
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
163
+ return _mm256_and_pd(values, imag_mask);
164
+ }
165
+ Vectorized<c10::complex<double>> imag() const {
166
+ return _mm256_permute_pd(imag_(), 0x05); //b a
167
+ }
168
+ __m256d conj_() const {
169
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
170
+ return _mm256_xor_pd(values, sign_mask); // a -b
171
+ }
172
+ Vectorized<c10::complex<double>> conj() const {
173
+ return conj_();
174
+ }
175
+ Vectorized<c10::complex<double>> log() const {
176
+ // Most trigonomic ops use the log() op to improve complex number performance.
177
+ return map(std::log);
178
+ }
179
+ Vectorized<c10::complex<double>> log2() const {
180
+ const __m256d log2_ = _mm256_set1_pd(std::log(2));
181
+ return _mm256_div_pd(log(), log2_);
182
+ }
183
+ Vectorized<c10::complex<double>> log10() const {
184
+ const __m256d log10_ = _mm256_set1_pd(std::log(10));
185
+ return _mm256_div_pd(log(), log10_);
186
+ }
187
+ Vectorized<c10::complex<double>> log1p() const {
188
+ return map(std::log1p);
189
+ }
190
+ Vectorized<c10::complex<double>> asin() const {
191
+ // asin(x)
192
+ // = -i*ln(iz + sqrt(1 -z^2))
193
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
194
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
195
+ const __m256d one = _mm256_set1_pd(1);
196
+
197
+ auto conj = conj_();
198
+ auto b_a = _mm256_permute_pd(conj, 0x05); //-b a
199
+ auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab
200
+ auto im = _mm256_add_pd(ab, ab); //-2ab -2ab
201
+
202
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
203
+ auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a
204
+ re = _mm256_sub_pd(one, re);
205
+
206
+ auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im)
207
+ auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt())
208
+ return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln()
209
+ }
210
+ Vectorized<c10::complex<double>> acos() const {
211
+ // acos(x) = pi/2 - asin(x)
212
+ constexpr auto pi_2d = c10::pi<double> / 2;
213
+ const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0);
214
+ return _mm256_sub_pd(pi_2, asin());
215
+ }
216
+ Vectorized<c10::complex<double>> atan() const;
217
+ Vectorized<c10::complex<double>> atanh() const {
218
+ return map(std::atanh);
219
+ }
220
+ Vectorized<c10::complex<double>> exp() const {
221
+ //exp(a + bi)
222
+ // = exp(a)*(cos(b) + sin(b)i)
223
+ auto exp = Sleef_expd4_u10(values); //exp(a) exp(b)
224
+ exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a)
225
+
226
+ auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
227
+ auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05),
228
+ sin_cos.x, 0x0A); //cos(b) sin(b)
229
+ return _mm256_mul_pd(exp, cos_sin);
230
+ }
231
+ Vectorized<c10::complex<double>> exp2() const {
232
+ // Use identity 2**x = exp(log(2) * x)
233
+ const __m256d ln_2 = _mm256_set1_pd(c10::ln_2<double>);
234
+ Vectorized<c10::complex<double>> scaled_values = _mm256_mul_pd(values, ln_2);
235
+ return scaled_values.exp();
236
+ }
237
+ Vectorized<c10::complex<double>> expm1() const {
238
+ return map(std::expm1);
239
+ }
240
+ Vectorized<c10::complex<double>> sin() const {
241
+ return map(std::sin);
242
+ }
243
+ Vectorized<c10::complex<double>> sinh() const {
244
+ return map(std::sinh);
245
+ }
246
+ Vectorized<c10::complex<double>> cos() const {
247
+ return map(std::cos);
248
+ }
249
+ Vectorized<c10::complex<double>> cosh() const {
250
+ return map(std::cosh);
251
+ }
252
+ Vectorized<c10::complex<double>> ceil() const {
253
+ return _mm256_ceil_pd(values);
254
+ }
255
+ Vectorized<c10::complex<double>> floor() const {
256
+ return _mm256_floor_pd(values);
257
+ }
258
+ Vectorized<c10::complex<double>> neg() const {
259
+ auto zero = _mm256_setzero_pd();
260
+ return _mm256_sub_pd(zero, values);
261
+ }
262
+ Vectorized<c10::complex<double>> round() const {
263
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
264
+ }
265
+ Vectorized<c10::complex<double>> tan() const {
266
+ return map(std::tan);
267
+ }
268
+ Vectorized<c10::complex<double>> tanh() const {
269
+ return map(std::tanh);
270
+ }
271
+ Vectorized<c10::complex<double>> trunc() const {
272
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
273
+ }
274
+ Vectorized<c10::complex<double>> sqrt() const {
275
+ return map(std::sqrt);
276
+ }
277
+ Vectorized<c10::complex<double>> reciprocal() const;
278
+ Vectorized<c10::complex<double>> rsqrt() const {
279
+ return sqrt().reciprocal();
280
+ }
281
+ Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
282
+ __at_align__ c10::complex<double> x_tmp[size()];
283
+ __at_align__ c10::complex<double> y_tmp[size()];
284
+ store(x_tmp);
285
+ exp.store(y_tmp);
286
+ for (const auto i : c10::irange(size())) {
287
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
288
+ }
289
+ return loadu(x_tmp);
290
+ }
291
+ // Comparison using the _CMP_**_OQ predicate.
292
+ // `O`: get false if an operand is NaN
293
+ // `Q`: do not raise if an operand is NaN
294
+ Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
295
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
296
+ }
297
+ Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
298
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
299
+ }
300
+ Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>&) const {
301
+ TORCH_CHECK(false, "not supported for complex numbers");
302
+ }
303
+ Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>&) const {
304
+ TORCH_CHECK(false, "not supported for complex numbers");
305
+ }
306
+ Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>&) const {
307
+ TORCH_CHECK(false, "not supported for complex numbers");
308
+ }
309
+ Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>&) const {
310
+ TORCH_CHECK(false, "not supported for complex numbers");
311
+ }
312
+
313
+ Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
314
+ Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
315
+ };
316
+
317
+ template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
318
+ return _mm256_add_pd(a, b);
319
+ }
320
+
321
+ template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
322
+ return _mm256_sub_pd(a, b);
323
+ }
324
+
325
+ template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
326
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
327
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
328
+ auto ac_bd = _mm256_mul_pd(a, b); //ac bd
329
+
330
+ auto d_c = _mm256_permute_pd(b, 0x05); //d c
331
+ d_c = _mm256_xor_pd(sign_mask, d_c); //d -c
332
+ auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc
333
+
334
+ auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
335
+ return ret;
336
+ }
337
+
338
+ template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
339
+ //re + im*i = (a + bi) / (c + di)
340
+ auto mask = _mm256_set1_pd(-0.f);
341
+ auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d|
342
+ auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c|
343
+ auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
344
+ auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc
345
+ auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc
346
+ auto acbd2 = _mm256_mul_pd(a2, b2);
347
+
348
+ const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0);
349
+ auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc
350
+ dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
351
+ auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
352
+ auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
353
+
354
+ // get the denominator
355
+ auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
356
+ res2 = _mm256_div_pd(res2, denom2);
357
+ return res2;
358
+ }
359
+
360
+ // reciprocal. Implement this here so we can use multiplication.
361
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
362
+ //re + im*i = (a + bi) / (c + di)
363
+ //re = (ac + bd)/abs_2() = c/abs_2()
364
+ //im = (bc - ad)/abs_2() = d/abs_2()
365
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
366
+ auto c_d = _mm256_xor_pd(sign_mask, values); //c -d
367
+ return _mm256_div_pd(c_d, abs_2_());
368
+ }
369
+
370
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
371
+ // atan(x) = i/2 * ln((i + z)/(i - z))
372
+ const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0);
373
+ const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5);
374
+
375
+ auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b
376
+ auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b
377
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
378
+ return i_half*ln; // i/2*ln()
379
+ }
380
+
381
+ template <>
382
+ Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
383
+ auto abs_a = a.abs_2_();
384
+ auto abs_b = b.abs_2_();
385
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ);
386
+ auto max = _mm256_blendv_pd(a, b, mask);
387
+ // Exploit the fact that all-ones is a NaN.
388
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
389
+ return _mm256_or_pd(max, isnan);
390
+ }
391
+
392
+ template <>
393
+ Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
394
+ auto abs_a = a.abs_2_();
395
+ auto abs_b = b.abs_2_();
396
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ);
397
+ auto min = _mm256_blendv_pd(a, b, mask);
398
+ // Exploit the fact that all-ones is a NaN.
399
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
400
+ return _mm256_or_pd(min, isnan);
401
+ }
402
+
403
+ template <>
404
+ Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
405
+ return _mm256_and_pd(a, b);
406
+ }
407
+
408
+ template <>
409
+ Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
410
+ return _mm256_or_pd(a, b);
411
+ }
412
+
413
+ template <>
414
+ Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
415
+ return _mm256_xor_pd(a, b);
416
+ }
417
+
418
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
419
+ auto eq = (*this == other); // compares real and imag individually
420
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
421
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
422
+ }
423
+
424
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
425
+ auto ne = (*this != other); // compares real and imag individually
426
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
427
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
428
+ }
429
+
430
+ #endif
431
+
432
+ }} // namespace at::vec::CPU_CAPABILITY
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX2)
11
+ #define SLEEF_STATIC_LIBS
12
+ #include <sleef.h>
13
+ #endif
14
+
15
+ namespace at::vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX2)
20
+
21
+ template <> class Vectorized<c10::complex<float>> {
22
+ private:
23
+ __m256 values;
24
+ public:
25
+ using value_type = c10::complex<float>;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 4;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m256 v) : values(v) {}
32
+ Vectorized(c10::complex<float> val) {
33
+ float real_value = val.real();
34
+ float imag_value = val.imag();
35
+ values = _mm256_setr_ps(real_value, imag_value,
36
+ real_value, imag_value,
37
+ real_value, imag_value,
38
+ real_value, imag_value
39
+ );
40
+ }
41
+ Vectorized(c10::complex<float> val1, c10::complex<float> val2, c10::complex<float> val3, c10::complex<float> val4) {
42
+ values = _mm256_setr_ps(val1.real(), val1.imag(),
43
+ val2.real(), val2.imag(),
44
+ val3.real(), val3.imag(),
45
+ val4.real(), val4.imag()
46
+ );
47
+ }
48
+ operator __m256() const {
49
+ return values;
50
+ }
51
+ template <int64_t mask>
52
+ static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
53
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
54
+ static_assert(mask > -1 && mask < 16, "Unexpected mask range");
55
+ switch (mask) {
56
+ case 0:
57
+ return a;
58
+ case 1:
59
+ return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011
60
+ case 2:
61
+ return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100
62
+ case 3:
63
+ return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111
64
+ case 4:
65
+ return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000
66
+ case 5:
67
+ return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011
68
+ case 6:
69
+ return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100
70
+ case 7:
71
+ return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111
72
+ case 8:
73
+ return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000
74
+ case 9:
75
+ return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011
76
+ case 10:
77
+ return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100
78
+ case 11:
79
+ return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111
80
+ case 12:
81
+ return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000
82
+ case 13:
83
+ return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011
84
+ case 14:
85
+ return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100
86
+ default: break;
87
+ }
88
+ return b;
89
+ }
90
+ static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
91
+ const Vectorized<c10::complex<float>>& mask) {
92
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
93
+ auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values);
94
+ return _mm256_blendv_ps(a.values, b.values, mask_);
95
+
96
+ }
97
+ template<typename step_t>
98
+ static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0., step_t step = static_cast<step_t>(1)) {
99
+ return Vectorized<c10::complex<float>>(base,
100
+ base + step,
101
+ base + c10::complex<float>(2)*step,
102
+ base + c10::complex<float>(3)*step);
103
+ }
104
+ static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
105
+ int64_t count = size()) {
106
+ switch (count) {
107
+ case 0:
108
+ return a;
109
+ case 1:
110
+ return blend<1>(a, b);
111
+ case 2:
112
+ return blend<3>(a, b);
113
+ case 3:
114
+ return blend<7>(a, b);
115
+ }
116
+ return b;
117
+ }
118
+ static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
119
+ if (count == size())
120
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
121
+
122
+ __at_align__ float tmp_values[2*size()];
123
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
124
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
125
+ // instructions while a loop would be compiled to one instruction.
126
+ for (const auto i : c10::irange(2*size())) {
127
+ tmp_values[i] = 0.0;
128
+ }
129
+ std::memcpy(
130
+ tmp_values,
131
+ reinterpret_cast<const float*>(ptr),
132
+ count * sizeof(c10::complex<float>));
133
+ return _mm256_load_ps(tmp_values);
134
+ }
135
+ void store(void* ptr, int count = size()) const {
136
+ if (count == size()) {
137
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
138
+ } else if (count > 0) {
139
+ float tmp_values[2*size()];
140
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
141
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
142
+ }
143
+ }
144
+ const c10::complex<float>& operator[](int idx) const = delete;
145
+ c10::complex<float>& operator[](int idx) = delete;
146
+ Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
147
+ __at_align__ c10::complex<float> tmp[size()];
148
+ store(tmp);
149
+ for (const auto i : c10::irange(size())) {
150
+ tmp[i] = f(tmp[i]);
151
+ }
152
+ return loadu(tmp);
153
+ }
154
+ __m256 abs_2_() const {
155
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
156
+ auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
157
+ return _mm256_permute_ps(ret, 0xD8);
158
+ }
159
+ __m256 abs_() const {
160
+ auto real = _mm256_moveldup_ps(values); // real real
161
+ auto imag = _mm256_movehdup_ps(values); // imag imag
162
+ return Sleef_hypotf8_u05(real, imag); // abs abs
163
+ }
164
+ Vectorized<c10::complex<float>> abs() const {
165
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
166
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
167
+ return _mm256_and_ps(abs_(), real_mask); // abs 0
168
+ }
169
+ __m256 angle_() const {
170
+ //angle = atan2(b/a)
171
+ auto b_a = _mm256_permute_ps(values, 0xB1); // b a
172
+ return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
173
+ }
174
+ Vectorized<c10::complex<float>> angle() const {
175
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
176
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
177
+ auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle
178
+ return _mm256_and_ps(angle, real_mask); // angle 0
179
+ }
180
+ Vectorized<c10::complex<float>> sgn() const {
181
+ auto abs = abs_();
182
+ auto zero = _mm256_setzero_ps();
183
+ auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ);
184
+ auto div = _mm256_div_ps(values, abs);
185
+ return _mm256_blendv_ps(div, zero, mask);
186
+ }
187
+ __m256 real_() const {
188
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
189
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
190
+ return _mm256_and_ps(values, real_mask);
191
+ }
192
+ Vectorized<c10::complex<float>> real() const {
193
+ return real_();
194
+ }
195
+ __m256 imag_() const {
196
+ const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
197
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
198
+ return _mm256_and_ps(values, imag_mask);
199
+ }
200
+ Vectorized<c10::complex<float>> imag() const {
201
+ return _mm256_permute_ps(imag_(), 0xB1); //b a
202
+ }
203
+ __m256 conj_() const {
204
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
205
+ return _mm256_xor_ps(values, sign_mask); // a -b
206
+ }
207
+ Vectorized<c10::complex<float>> conj() const {
208
+ return conj_();
209
+ }
210
+ Vectorized<c10::complex<float>> log() const {
211
+ // Most trigonomic ops use the log() op to improve complex number performance.
212
+ return map(std::log);
213
+ }
214
+ Vectorized<c10::complex<float>> log2() const {
215
+ const __m256 log2_ = _mm256_set1_ps(std::log(2));
216
+ return _mm256_div_ps(log(), log2_);
217
+ }
218
+ Vectorized<c10::complex<float>> log10() const {
219
+ const __m256 log10_ = _mm256_set1_ps(std::log(10));
220
+ return _mm256_div_ps(log(), log10_);
221
+ }
222
+ Vectorized<c10::complex<float>> log1p() const {
223
+ return map(std::log1p);
224
+ }
225
+ Vectorized<c10::complex<float>> asin() const {
226
+ // asin(x)
227
+ // = -i*ln(iz + sqrt(1 -z^2))
228
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
229
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
230
+ const __m256 one = _mm256_set1_ps(1);
231
+
232
+ auto conj = conj_();
233
+ auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a
234
+ auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab
235
+ auto im = _mm256_add_ps(ab, ab); //-2ab -2ab
236
+
237
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
238
+ auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
239
+ re = _mm256_permute_ps(re, 0xD8);
240
+ re = _mm256_sub_ps(one, re);
241
+
242
+ auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im)
243
+ auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt())
244
+ return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
245
+ }
246
+ Vectorized<c10::complex<float>> acos() const {
247
+ return map(std::acos);
248
+ }
249
+ Vectorized<c10::complex<float>> atan() const;
250
+ Vectorized<c10::complex<float>> atanh() const {
251
+ return map(std::atanh);
252
+ }
253
+ Vectorized<c10::complex<float>> exp() const {
254
+ //exp(a + bi)
255
+ // = exp(a)*(cos(b) + sin(b)i)
256
+ auto exp = Sleef_expf8_u10(values); //exp(a) exp(b)
257
+ exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a)
258
+
259
+ auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
260
+ auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1),
261
+ sin_cos.x, 0xAA); //cos(b) sin(b)
262
+ return _mm256_mul_ps(exp, cos_sin);
263
+ }
264
+ Vectorized<c10::complex<float>> exp2() const {
265
+ // Use identity 2**x = exp(log(2) * x)
266
+ const __m256 ln_2 = _mm256_set1_ps(c10::ln_2<float>);
267
+ Vectorized<c10::complex<float>> scaled_values = _mm256_mul_ps(values, ln_2);
268
+ return scaled_values.exp();
269
+ }
270
+ Vectorized<c10::complex<float>> expm1() const {
271
+ return map(std::expm1);
272
+ }
273
+ Vectorized<c10::complex<float>> sin() const {
274
+ return map(std::sin);
275
+ }
276
+ Vectorized<c10::complex<float>> sinh() const {
277
+ return map(std::sinh);
278
+ }
279
+ Vectorized<c10::complex<float>> cos() const {
280
+ return map(std::cos);
281
+ }
282
+ Vectorized<c10::complex<float>> cosh() const {
283
+ return map(std::cosh);
284
+ }
285
+ Vectorized<c10::complex<float>> ceil() const {
286
+ return _mm256_ceil_ps(values);
287
+ }
288
+ Vectorized<c10::complex<float>> floor() const {
289
+ return _mm256_floor_ps(values);
290
+ }
291
+ Vectorized<c10::complex<float>> neg() const {
292
+ auto zero = _mm256_setzero_ps();
293
+ return _mm256_sub_ps(zero, values);
294
+ }
295
+ Vectorized<c10::complex<float>> round() const {
296
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
297
+ }
298
+ Vectorized<c10::complex<float>> tan() const {
299
+ return map(std::tan);
300
+ }
301
+ Vectorized<c10::complex<float>> tanh() const {
302
+ return map(std::tanh);
303
+ }
304
+ Vectorized<c10::complex<float>> trunc() const {
305
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
306
+ }
307
+ Vectorized<c10::complex<float>> sqrt() const {
308
+ return map(std::sqrt);
309
+ }
310
+ Vectorized<c10::complex<float>> reciprocal() const;
311
+ Vectorized<c10::complex<float>> rsqrt() const {
312
+ return sqrt().reciprocal();
313
+ }
314
+ Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
315
+ __at_align__ c10::complex<float> x_tmp[size()];
316
+ __at_align__ c10::complex<float> y_tmp[size()];
317
+ store(x_tmp);
318
+ exp.store(y_tmp);
319
+ for (const auto i : c10::irange(size())) {
320
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
321
+ }
322
+ return loadu(x_tmp);
323
+ }
324
+ // Comparison using the _CMP_**_OQ predicate.
325
+ // `O`: get false if an operand is NaN
326
+ // `Q`: do not raise if an operand is NaN
327
+ Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
328
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
329
+ }
330
+ Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
331
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
332
+ }
333
+ Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& /*other*/) const {
334
+ TORCH_CHECK(false, "not supported for complex numbers");
335
+ }
336
+ Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& /*other*/) const {
337
+ TORCH_CHECK(false, "not supported for complex numbers");
338
+ }
339
+ Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& /*other*/) const {
340
+ TORCH_CHECK(false, "not supported for complex numbers");
341
+ }
342
+ Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& /*other*/) const {
343
+ TORCH_CHECK(false, "not supported for complex numbers");
344
+ }
345
+
346
+ Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
347
+ Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
348
+ };
349
+
350
+ template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
351
+ return _mm256_add_ps(a, b);
352
+ }
353
+
354
+ template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
355
+ return _mm256_sub_ps(a, b);
356
+ }
357
+
358
+ template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
359
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
360
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
361
+ auto ac_bd = _mm256_mul_ps(a, b); //ac bd
362
+
363
+ auto d_c = _mm256_permute_ps(b, 0xB1); //d c
364
+ d_c = _mm256_xor_ps(sign_mask, d_c); //d -c
365
+ auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc
366
+
367
+ auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
368
+ ret = _mm256_permute_ps(ret, 0xD8);
369
+ return ret;
370
+ }
371
+
372
+ template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
373
+ //re + im*i = (a + bi) / (c + di)
374
+ auto mask = _mm256_set1_ps(-0.f);
375
+ auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d|
376
+ auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c|
377
+ auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
378
+ auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc
379
+ auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc
380
+ auto acbd2 = _mm256_mul_ps(a2, b2);
381
+
382
+ const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
383
+ auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc
384
+ dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
385
+ auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
386
+ auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
387
+ res2 = _mm256_permute_ps(res2, 0xD8);
388
+
389
+ // get the denominator
390
+ auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
391
+ res2 = _mm256_div_ps(res2, denom2);
392
+ return res2;
393
+ }
394
+
395
+ // reciprocal. Implement this here so we can use multiplication.
396
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
397
+ //re + im*i = (a + bi) / (c + di)
398
+ //re = (ac + bd)/abs_2() = c/abs_2()
399
+ //im = (bc - ad)/abs_2() = d/abs_2()
400
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
401
+ auto c_d = _mm256_xor_ps(sign_mask, values); //c -d
402
+ return _mm256_div_ps(c_d, abs_2_());
403
+ }
404
+
405
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
406
+ // atan(x) = i/2 * ln((i + z)/(i - z))
407
+ const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
408
+ const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
409
+
410
+ auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b
411
+ auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b
412
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
413
+ return i_half*ln; // i/2*ln()
414
+ }
415
+
416
+ template <>
417
+ Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
418
+ auto abs_a = a.abs_2_();
419
+ auto abs_b = b.abs_2_();
420
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
421
+ auto max = _mm256_blendv_ps(a, b, mask);
422
+ // Exploit the fact that all-ones is a NaN.
423
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
424
+ return _mm256_or_ps(max, isnan);
425
+ }
426
+
427
+ template <>
428
+ Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
429
+ auto abs_a = a.abs_2_();
430
+ auto abs_b = b.abs_2_();
431
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
432
+ auto min = _mm256_blendv_ps(a, b, mask);
433
+ // Exploit the fact that all-ones is a NaN.
434
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
435
+ return _mm256_or_ps(min, isnan);
436
+ }
437
+
438
+ template <>
439
+ Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
440
+ return _mm256_and_ps(a, b);
441
+ }
442
+
443
+ template <>
444
+ Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
445
+ return _mm256_or_ps(a, b);
446
+ }
447
+
448
+ template <>
449
+ Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
450
+ return _mm256_xor_ps(a, b);
451
+ }
452
+
453
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
454
+ const Vectorized<c10::complex<float>>& other) const {
455
+ auto eq = (*this == other); // compares real and imag individually
456
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
457
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
458
+ }
459
+
460
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
461
+ const Vectorized<c10::complex<float>>& other) const {
462
+ auto ne = (*this != other); // compares real and imag individually
463
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
464
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
465
+ }
466
+
467
+ #endif
468
+
469
+ }} // namespace at::vec::CPU_CAPABILITY
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_convert.h ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/functional_bfloat16.h>
4
+ #include <ATen/cpu/vec/intrinsics.h>
5
+ #include <ATen/cpu/vec/vec_base.h>
6
+ #include <ATen/cpu/vec/vec_convert.h>
7
+
8
+ namespace at::vec {
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
12
+
13
+ template <>
14
+ struct VecConvert<float, 1, BFloat16, 1> {
15
+ static inline VectorizedN<float, 1> apply(
16
+ const VectorizedN<BFloat16, 1>& src) {
17
+ VectorizedN<float, 1> result;
18
+ __m256 value;
19
+ cvtbf16_fp32(_mm256_castsi256_si128(src[0]), value);
20
+ result[0] = value;
21
+ return result;
22
+ }
23
+ };
24
+
25
+ template <>
26
+ struct VecConvert<float, 1, Half, 1> {
27
+ static inline VectorizedN<float, 1> apply(const VectorizedN<Half, 1>& src) {
28
+ VectorizedN<float, 1> result;
29
+ __m256 value;
30
+ cvtfp16_fp32(_mm256_castsi256_si128(src[0]), value);
31
+ result[0] = value;
32
+ return result;
33
+ }
34
+ };
35
+
36
+ template <>
37
+ struct VecConvert<BFloat16, 1, float, 1> {
38
+ static inline VectorizedN<BFloat16, 1> apply(
39
+ const VectorizedN<float, 1>& src) {
40
+ VectorizedN<BFloat16, 1> result;
41
+ result[0] = _mm256_castsi128_si256(cvtfp32_bf16(src[0]));
42
+ return result;
43
+ }
44
+ };
45
+
46
+ template <>
47
+ struct VecConvert<BFloat16, 1, float, 2> {
48
+ static inline VectorizedN<BFloat16, 1> apply(
49
+ const VectorizedN<float, 2>& src) {
50
+ VectorizedN<BFloat16, 1> result;
51
+ result[0] = convert_float_bfloat16(src[0], src[1]);
52
+ return result;
53
+ }
54
+ };
55
+
56
+ template <>
57
+ struct VecConvert<float, 2, BFloat16, 1> {
58
+ static inline VectorizedN<float, 2> apply(
59
+ const VectorizedN<BFloat16, 1>& src) {
60
+ VectorizedN<float, 2> result;
61
+ std::tie(result[0], result[1]) = convert_bfloat16_float(src[0]);
62
+ return result;
63
+ }
64
+ };
65
+
66
+ template <>
67
+ struct VecConvert<Half, 1, float, 1> {
68
+ static inline VectorizedN<Half, 1> apply(const VectorizedN<float, 1>& src) {
69
+ VectorizedN<Half, 1> result;
70
+ result[0] = _mm256_castsi128_si256(cvtfp32_fp16(src[0]));
71
+ return result;
72
+ }
73
+ };
74
+
75
+ template <>
76
+ struct VecConvert<Half, 1, float, 2> {
77
+ static inline VectorizedN<Half, 1> apply(const VectorizedN<float, 2>& src) {
78
+ VectorizedN<Half, 1> result;
79
+ result[0] = convert_float_half(src[0], src[1]);
80
+ return result;
81
+ }
82
+ };
83
+
84
+ template <>
85
+ struct VecConvert<float, 2, Half, 1> {
86
+ static inline VectorizedN<float, 2> apply(const VectorizedN<Half, 1>& src) {
87
+ VectorizedN<float, 2> result;
88
+ std::tie(result[0], result[1]) = convert_half_float(src[0]);
89
+ return result;
90
+ }
91
+ };
92
+
93
+ template <>
94
+ inline Vectorized<double> convert_to_fp_of_same_size<double>(
95
+ const Vectorized<int64_t>& src);
96
+
97
+ template <>
98
+ struct VecConvert<float, 1, int64_t, 2> {
99
+ static inline VectorizedN<float, 1> apply(
100
+ const VectorizedN<int64_t, 2>& src) {
101
+ auto low_double = at::vec::convert_to_fp_of_same_size<double>(src[0]);
102
+ auto low = _mm256_cvtpd_ps(low_double);
103
+ auto high_double = at::vec::convert_to_fp_of_same_size<double>(src[1]);
104
+ auto high = _mm256_cvtpd_ps(high_double);
105
+ return Vectorized<float>(
106
+ _mm256_insertf128_ps(_mm256_castps128_ps256(low), high, 1));
107
+ }
108
+ };
109
+
110
+ template <>
111
+ struct VecConvert<int64_t, 2, float, 1> {
112
+ static inline VectorizedN<int64_t, 2> apply(
113
+ const VectorizedN<float, 1>& src) {
114
+ // Scalarization is the most reliable way of converting fp to int64 on AVX2.
115
+ // Check: https://stackoverflow.com/questions/41144668
116
+ float buffer[8];
117
+ src.store(buffer);
118
+ at::vec::VectorizedN<int64_t, 2> result;
119
+ result[0] = Vectorized<int64_t>(
120
+ static_cast<int64_t>(buffer[0]),
121
+ static_cast<int64_t>(buffer[1]),
122
+ static_cast<int64_t>(buffer[2]),
123
+ static_cast<int64_t>(buffer[3]));
124
+ result[1] = Vectorized<int64_t>(
125
+ static_cast<int64_t>(buffer[4]),
126
+ static_cast<int64_t>(buffer[5]),
127
+ static_cast<int64_t>(buffer[6]),
128
+ static_cast<int64_t>(buffer[7]));
129
+ return result;
130
+ }
131
+ };
132
+
133
+ template <>
134
+ struct VecConvert<int32_t, 1, int64_t, 2> {
135
+ static inline VectorizedN<int32_t, 1> apply(
136
+ const VectorizedN<int64_t, 2>& src) {
137
+ auto low = _mm256_shuffle_epi32(src[0], _MM_SHUFFLE(2, 0, 2, 0));
138
+ auto high = _mm256_shuffle_epi32(src[1], _MM_SHUFFLE(2, 0, 2, 0));
139
+ auto low_perm = _mm256_permute4x64_epi64(low, _MM_SHUFFLE(3, 1, 2, 0));
140
+ auto high_perm = _mm256_permute4x64_epi64(high, _MM_SHUFFLE(3, 1, 2, 0));
141
+ return Vectorized<int32_t>(_mm256_blend_epi32(low_perm, high_perm, 0xF0));
142
+ }
143
+ };
144
+
145
+ template <>
146
+ struct VecConvert<int64_t, 2, int32_t, 1> {
147
+ static inline VectorizedN<int64_t, 2> apply(
148
+ const VectorizedN<int32_t, 1>& src) {
149
+ at::vec::VectorizedN<int64_t, 2> result;
150
+ result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src[0]));
151
+ result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src[0], 1));
152
+ return result;
153
+ }
154
+ };
155
+
156
+ template <>
157
+ struct VecConvert<int32_t, 1, int8_t, 1> {
158
+ static inline VectorizedN<int32_t, 1> apply(
159
+ const VectorizedN<int8_t, 1>& src) {
160
+ auto src128 = _mm256_castsi256_si128(src[0]);
161
+ return Vectorized<int32_t>(_mm256_cvtepi8_epi32(src128));
162
+ }
163
+ };
164
+
165
+ template <>
166
+ struct VecConvert<int32_t, 1, uint8_t, 1> {
167
+ static inline VectorizedN<int32_t, 1> apply(
168
+ const VectorizedN<uint8_t, 1>& src) {
169
+ auto src128 = _mm256_castsi256_si128(src[0]);
170
+ return Vectorized<int32_t>(_mm256_cvtepu8_epi32(src128));
171
+ }
172
+ };
173
+
174
+
175
+ template <>
176
+ struct VecConvert<int32_t, 1, float, 1> {
177
+ static inline VectorizedN<int32_t, 1> apply(
178
+ const VectorizedN<float, 1>& src) {
179
+ return Vectorized<int32_t>(_mm256_cvttps_epi32(src[0]));
180
+ }
181
+ };
182
+
183
+ template <>
184
+ struct VecConvert<float, 1, int32_t, 1> {
185
+ static inline VectorizedN<float, 1> apply(
186
+ const VectorizedN<int32_t, 1>& src) {
187
+ return Vectorized<float>(_mm256_cvtepi32_ps(src[0]));
188
+ }
189
+ };
190
+
191
+ template <>
192
+ struct VecConvert<int16_t, 1, uint8_t, 1> {
193
+ static inline VectorizedN<int16_t, 1> apply(
194
+ const VectorizedN<uint8_t, 1>& src) {
195
+ auto src128 = _mm256_castsi256_si128(src[0]);
196
+ return Vectorized<int16_t>(_mm256_cvtepu8_epi16(src128));
197
+ }
198
+ };
199
+
200
+ template <typename dst_t, typename src_t>
201
+ struct VecConvert<
202
+ dst_t,
203
+ 1,
204
+ src_t,
205
+ 1,
206
+ typename std::enable_if_t<
207
+ (is_reduced_floating_point_v<dst_t> && is_8bit_integer_v<src_t>) ||
208
+ (is_reduced_floating_point_v<src_t> && is_8bit_integer_v<dst_t>),
209
+ void>> {
210
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<src_t, 1>& src) {
211
+ VectorizedN<float, 1> tmp_fp32 = VecConvert<float, 1, src_t, 1>::apply(src);
212
+ return VecConvert<dst_t, 1, float, 1>::apply(tmp_fp32);
213
+ }
214
+ };
215
+
216
+ template <typename dst_t>
217
+ struct VecConvert<
218
+ dst_t,
219
+ 1,
220
+ float,
221
+ 1,
222
+ typename std::enable_if_t<is_8bit_integer_v<dst_t>,
223
+ void>> {
224
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<float, 1>& src) {
225
+ return convert_float_to_int8<dst_t>(src[0]);
226
+ }
227
+ };
228
+
229
+
230
+ template <typename dst_t>
231
+ struct VecConvert<
232
+ dst_t,
233
+ 1,
234
+ int64_t,
235
+ 2,
236
+ typename std::enable_if<
237
+ std::is_same_v<dst_t, int8_t> ||
238
+ std::is_same_v<dst_t, uint8_t>>::type> {
239
+ static inline VectorizedN<dst_t, 1> apply(
240
+ const VectorizedN<int64_t, 2>& src) {
241
+ return VecConvert<dst_t, 1, int32_t, 1>::apply(
242
+ VecConvert<int32_t, 1, int64_t, 2>::apply(src));
243
+ }
244
+ };
245
+
246
+ #endif /* defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) */
247
+
248
+
249
+ #if (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)) || defined(CPU_CAPABILITY_NEON)
250
+ template <typename src_t>
251
+ struct VecConvert<
252
+ float,
253
+ 1,
254
+ src_t,
255
+ 1,
256
+ typename std::enable_if_t<is_8bit_integer_v<src_t>,
257
+ void>> {
258
+ static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
259
+ return convert_int8_to_float<src_t>(src[0]);
260
+ }
261
+ };
262
+ #endif
263
+
264
+ #if defined(CPU_CAPABILITY_NEON)
265
+ template <>
266
+ struct VecConvert<float, 1, BFloat16, 1> {
267
+ static inline VectorizedN<float, 1> apply(
268
+ const VectorizedN<BFloat16, 1>& src) {
269
+ VectorizedN<float, 1> result;
270
+ uint16x8_t u16_8 = vld1q_u16(reinterpret_cast<const uint16_t*>(&src[0]));
271
+ int32x4_t shift = vdupq_n_s32(16);
272
+ auto u16_low1 = vget_low_u16(u16_8);
273
+ auto u16_high1 = vget_high_u16(u16_8);
274
+ float32x4_t f32x4_0 = vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(u16_low1), shift));
275
+ float32x4_t f32x4_1 = vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(u16_high1), shift));
276
+ result[0] = {f32x4_0, f32x4_1};
277
+ return result;
278
+ }
279
+ };
280
+ #endif
281
+
282
+ template <typename src_t>
283
+ struct VecConvert<
284
+ float,
285
+ 1,
286
+ src_t,
287
+ 1,
288
+ typename std::enable_if_t<is_reduced_floating_point_v<src_t>, void>> {
289
+ static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
290
+ auto [res_vec1, res_vec2] = convert_to_float<src_t>(src[0]);
291
+ return res_vec1;
292
+ }
293
+ };
294
+
295
+ template <typename dst_t>
296
+ struct VecConvert<
297
+ dst_t,
298
+ 1,
299
+ float,
300
+ 1,
301
+ typename std::enable_if_t<is_reduced_floating_point_v<dst_t>, void>> {
302
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<float, 1>& src) {
303
+ return convert_from_float<dst_t>(src[0], src[0]);
304
+ }
305
+ };
306
+
307
+ } // namespace CPU_CAPABILITY
308
+ } // namespace at::vec
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2)
10
+ #define SLEEF_STATIC_LIBS
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at::vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+
19
+ #if defined(CPU_CAPABILITY_AVX2)
20
+
21
+ template <> class Vectorized<double> {
22
+ private:
23
+ __m256d values;
24
+ public:
25
+ using value_type = double;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 4;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m256d v) : values(v) {}
32
+ Vectorized(double val) {
33
+ values = _mm256_set1_pd(val);
34
+ }
35
+ Vectorized(double val1, double val2, double val3, double val4) {
36
+ values = _mm256_setr_pd(val1, val2, val3, val4);
37
+ }
38
+ operator __m256d() const {
39
+ return values;
40
+ }
41
+ template <int64_t mask>
42
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
43
+ return _mm256_blend_pd(a.values, b.values, mask);
44
+ }
45
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
46
+ const Vectorized<double>& mask) {
47
+ return _mm256_blendv_pd(a.values, b.values, mask.values);
48
+ }
49
+ template<typename step_t>
50
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
51
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
52
+ }
53
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
54
+ int64_t count = size()) {
55
+ switch (count) {
56
+ case 0:
57
+ return a;
58
+ case 1:
59
+ return blend<1>(a, b);
60
+ case 2:
61
+ return blend<3>(a, b);
62
+ case 3:
63
+ return blend<7>(a, b);
64
+ }
65
+ return b;
66
+ }
67
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
68
+ if (count == size())
69
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
70
+
71
+
72
+ __at_align__ double tmp_values[size()];
73
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
74
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
75
+ // instructions while a loop would be compiled to one instruction.
76
+ for (const auto i : c10::irange(size())) {
77
+ tmp_values[i] = 0.0;
78
+ }
79
+ std::memcpy(
80
+ tmp_values,
81
+ reinterpret_cast<const double*>(ptr),
82
+ count * sizeof(double));
83
+ return _mm256_load_pd(tmp_values);
84
+ }
85
+ void store(void* ptr, int count = size()) const {
86
+ if (count == size()) {
87
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
88
+ } else if (count > 0) {
89
+ double tmp_values[size()];
90
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
91
+ std::memcpy(ptr, tmp_values, count * sizeof(double));
92
+ }
93
+ }
94
+ const double& operator[](int idx) const = delete;
95
+ double& operator[](int idx) = delete;
96
+ int zero_mask() const {
97
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
98
+ __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
99
+ return _mm256_movemask_pd(cmp);
100
+ }
101
+ Vectorized<double> isnan() const {
102
+ return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
103
+ }
104
+ bool has_inf_nan() const {
105
+ __m256d self_sub = _mm256_sub_pd(values, values);
106
+ return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0;
107
+ }
108
+ Vectorized<double> map(double (*const f)(double)) const {
109
+ __at_align__ double tmp[size()];
110
+ store(tmp);
111
+ for (const auto i : c10::irange(size())) {
112
+ tmp[i] = f(tmp[i]);
113
+ }
114
+ return loadu(tmp);
115
+ }
116
+ Vectorized<double> abs() const {
117
+ auto mask = _mm256_set1_pd(-0.f);
118
+ return _mm256_andnot_pd(mask, values);
119
+ }
120
+ Vectorized<double> angle() const {
121
+ const auto zero_vec = _mm256_set1_pd(0.f);
122
+ const auto nan_vec = _mm256_set1_pd(NAN);
123
+ const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ);
124
+ const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ);
125
+ const auto pi = _mm256_set1_pd(c10::pi<double>);
126
+
127
+ const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ);
128
+ auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask);
129
+ angle = _mm256_blendv_pd(angle, nan_vec, nan_mask);
130
+ return angle;
131
+ }
132
+ Vectorized<double> real() const {
133
+ return *this;
134
+ }
135
+ Vectorized<double> imag() const {
136
+ return _mm256_set1_pd(0);
137
+ }
138
+ Vectorized<double> conj() const {
139
+ return *this;
140
+ }
141
+ Vectorized<double> acos() const {
142
+ return Vectorized<double>(Sleef_acosd4_u10(values));
143
+ }
144
+ Vectorized<double> acosh() const {
145
+ return Vectorized<double>(Sleef_acoshd4_u10(values));
146
+ }
147
+ Vectorized<double> asin() const {
148
+ return Vectorized<double>(Sleef_asind4_u10(values));
149
+ }
150
+ Vectorized<double> atan() const {
151
+ return Vectorized<double>(Sleef_atand4_u10(values));
152
+ }
153
+ Vectorized<double> atanh() const {
154
+ return Vectorized<double>(Sleef_atanhd4_u10(values));
155
+ }
156
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
157
+ return Vectorized<double>(Sleef_atan2d4_u10(values, b));
158
+ }
159
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
160
+ return Vectorized<double>(Sleef_copysignd4(values, sign));
161
+ }
162
+ Vectorized<double> erf() const {
163
+ return Vectorized<double>(Sleef_erfd4_u10(values));
164
+ }
165
+ Vectorized<double> erfc() const {
166
+ return Vectorized<double>(Sleef_erfcd4_u15(values));
167
+ }
168
+ Vectorized<double> erfinv() const {
169
+ return map(calc_erfinv);
170
+ }
171
+ Vectorized<double> exp() const {
172
+ return Vectorized<double>(Sleef_expd4_u10(values));
173
+ }
174
+ Vectorized<double> exp2() const {
175
+ return Vectorized<double>(Sleef_exp2d4_u10(values));
176
+ }
177
+ Vectorized<double> expm1() const {
178
+ return Vectorized<double>(Sleef_expm1d4_u10(values));
179
+ }
180
+ Vectorized<double> exp_u20() const {
181
+ return exp();
182
+ }
183
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
184
+ return Vectorized<double>(Sleef_fmodd4(values, q));
185
+ }
186
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
187
+ return Vectorized<double>(Sleef_hypotd4_u05(values, b));
188
+ }
189
+ Vectorized<double> i0() const {
190
+ return map(calc_i0);
191
+ }
192
+ Vectorized<double> i0e() const {
193
+ return map(calc_i0e);
194
+ }
195
+ Vectorized<double> digamma() const {
196
+ return map(calc_digamma);
197
+ }
198
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
199
+ __at_align__ double tmp[size()];
200
+ __at_align__ double tmp_x[size()];
201
+ store(tmp);
202
+ x.store(tmp_x);
203
+ for (const auto i : c10::irange(size())) {
204
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
205
+ }
206
+ return loadu(tmp);
207
+ }
208
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
209
+ __at_align__ double tmp[size()];
210
+ __at_align__ double tmp_x[size()];
211
+ store(tmp);
212
+ x.store(tmp_x);
213
+ for (const auto i : c10::irange(size())) {
214
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
215
+ }
216
+ return loadu(tmp);
217
+ }
218
+ Vectorized<double> log() const {
219
+ return Vectorized<double>(Sleef_logd4_u10(values));
220
+ }
221
+ Vectorized<double> log2() const {
222
+ return Vectorized<double>(Sleef_log2d4_u10(values));
223
+ }
224
+ Vectorized<double> log10() const {
225
+ return Vectorized<double>(Sleef_log10d4_u10(values));
226
+ }
227
+ Vectorized<double> log1p() const {
228
+ return Vectorized<double>(Sleef_log1pd4_u10(values));
229
+ }
230
+ Vectorized<double> sin() const {
231
+ return Vectorized<double>(Sleef_sind4_u10(values));
232
+ }
233
+ Vectorized<double> sinh() const {
234
+ return Vectorized<double>(Sleef_sinhd4_u10(values));
235
+ }
236
+ Vectorized<double> cos() const {
237
+ return Vectorized<double>(Sleef_cosd4_u10(values));
238
+ }
239
+ Vectorized<double> cosh() const {
240
+ return Vectorized<double>(Sleef_coshd4_u10(values));
241
+ }
242
+ Vectorized<double> ceil() const {
243
+ return _mm256_ceil_pd(values);
244
+ }
245
+ Vectorized<double> floor() const {
246
+ return _mm256_floor_pd(values);
247
+ }
248
+ Vectorized<double> frac() const;
249
+ Vectorized<double> neg() const {
250
+ return _mm256_xor_pd(_mm256_set1_pd(-0.), values);
251
+ }
252
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
253
+ return Vectorized<double>(Sleef_nextafterd4(values, b));
254
+ }
255
+ Vectorized<double> round() const {
256
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
257
+ }
258
+ Vectorized<double> tan() const {
259
+ return Vectorized<double>(Sleef_tand4_u10(values));
260
+ }
261
+ Vectorized<double> tanh() const {
262
+ return Vectorized<double>(Sleef_tanhd4_u10(values));
263
+ }
264
+ Vectorized<double> trunc() const {
265
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
266
+ }
267
+ Vectorized<double> lgamma() const {
268
+ return Vectorized<double>(Sleef_lgammad4_u10(values));
269
+ }
270
+ Vectorized<double> sqrt() const {
271
+ return _mm256_sqrt_pd(values);
272
+ }
273
+ Vectorized<double> reciprocal() const {
274
+ return _mm256_div_pd(_mm256_set1_pd(1), values);
275
+ }
276
+ Vectorized<double> rsqrt() const {
277
+ return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values));
278
+ }
279
+ Vectorized<double> pow(const Vectorized<double> &b) const {
280
+ return Vectorized<double>(Sleef_powd4_u10(values, b));
281
+ }
282
+ // Comparison using the _CMP_**_OQ predicate.
283
+ // `O`: get false if an operand is NaN
284
+ // `Q`: do not raise if an operand is NaN
285
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
286
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
287
+ }
288
+
289
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
290
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
291
+ }
292
+
293
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
294
+ return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ);
295
+ }
296
+
297
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
298
+ return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ);
299
+ }
300
+
301
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
302
+ return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ);
303
+ }
304
+
305
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
306
+ return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ);
307
+ }
308
+
309
+ Vectorized<double> eq(const Vectorized<double>& other) const;
310
+ Vectorized<double> ne(const Vectorized<double>& other) const;
311
+ Vectorized<double> lt(const Vectorized<double>& other) const;
312
+ Vectorized<double> le(const Vectorized<double>& other) const;
313
+ Vectorized<double> gt(const Vectorized<double>& other) const;
314
+ Vectorized<double> ge(const Vectorized<double>& other) const;
315
+ };
316
+
317
+ template <>
318
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
319
+ return _mm256_add_pd(a, b);
320
+ }
321
+
322
+ template <>
323
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
324
+ return _mm256_sub_pd(a, b);
325
+ }
326
+
327
+ template <>
328
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
329
+ return _mm256_mul_pd(a, b);
330
+ }
331
+
332
+ template <>
333
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
334
+ return _mm256_div_pd(a, b);
335
+ }
336
+
337
+ // frac. Implement this here so we can use subtraction.
338
+ inline Vectorized<double> Vectorized<double>::frac() const {
339
+ return *this - this->trunc();
340
+ }
341
+
342
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
343
+ // either input is a NaN.
344
+ template <>
345
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
346
+ Vectorized<double> max = _mm256_max_pd(a, b);
347
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
348
+ // Exploit the fact that all-ones is a NaN.
349
+ return _mm256_or_pd(max, isnan);
350
+ }
351
+
352
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
353
+ // either input is a NaN.
354
+ template <>
355
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
356
+ Vectorized<double> min = _mm256_min_pd(a, b);
357
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
358
+ // Exploit the fact that all-ones is a NaN.
359
+ return _mm256_or_pd(min, isnan);
360
+ }
361
+
362
+ template <>
363
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
364
+ return _mm256_min_pd(max, _mm256_max_pd(min, a));
365
+ }
366
+
367
+ template <>
368
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
369
+ return _mm256_max_pd(min, a);
370
+ }
371
+
372
+ template <>
373
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
374
+ return _mm256_min_pd(max, a);
375
+ }
376
+
377
+ template <>
378
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
379
+ return _mm256_and_pd(a, b);
380
+ }
381
+
382
+ template <>
383
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
384
+ return _mm256_or_pd(a, b);
385
+ }
386
+
387
+ template <>
388
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
389
+ return _mm256_xor_pd(a, b);
390
+ }
391
+
392
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
393
+ return (*this == other) & Vectorized<double>(1.0);
394
+ }
395
+
396
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
397
+ return (*this != other) & Vectorized<double>(1.0);
398
+ }
399
+
400
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
401
+ return (*this > other) & Vectorized<double>(1.0);
402
+ }
403
+
404
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
405
+ return (*this >= other) & Vectorized<double>(1.0);
406
+ }
407
+
408
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
409
+ return (*this < other) & Vectorized<double>(1.0);
410
+ }
411
+
412
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
413
+ return (*this <= other) & Vectorized<double>(1.0);
414
+ }
415
+
416
+ template <>
417
+ inline void convert(const double* src, double* dst, int64_t n) {
418
+ int64_t i;
419
+ #ifndef __msvc_cl__
420
+ #pragma unroll
421
+ #endif
422
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
423
+ _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
424
+ }
425
+ #ifndef __msvc_cl__
426
+ #pragma unroll
427
+ #endif
428
+ for (; i < n; i++) {
429
+ dst[i] = src[i];
430
+ }
431
+ }
432
+
433
+ #ifdef CPU_CAPABILITY_AVX2
434
+ template <>
435
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
436
+ return _mm256_fmadd_pd(a, b, c);
437
+ }
438
+
439
+ template <>
440
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
441
+ return _mm256_fmsub_pd(a, b, c);
442
+ }
443
+ #endif
444
+
445
+ #endif
446
+
447
+ }} // namespace at::vec::CPU_CAPABILITY
videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2)
10
+ #define SLEEF_STATIC_LIBS
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at::vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2)
19
+
20
+ template <> class Vectorized<float> {
21
+ private:
22
+ __m256 values;
23
+ public:
24
+ using value_type = float;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 8;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256 v) : values(v) {}
31
+ Vectorized(float val) {
32
+ values = _mm256_set1_ps(val);
33
+ }
34
+ Vectorized(float val1, float val2, float val3, float val4,
35
+ float val5, float val6, float val7, float val8) {
36
+ values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8);
37
+ }
38
+ operator __m256() const {
39
+ return values;
40
+ }
41
+ template <int64_t mask>
42
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
43
+ return _mm256_blend_ps(a.values, b.values, mask);
44
+ }
45
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
46
+ const Vectorized<float>& mask) {
47
+ return _mm256_blendv_ps(a.values, b.values, mask.values);
48
+ }
49
+ template<typename step_t>
50
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
51
+ return Vectorized<float>(
52
+ base, base + step, base + 2 * step, base + 3 * step,
53
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
54
+ }
55
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
56
+ int64_t count = size()) {
57
+ switch (count) {
58
+ case 0:
59
+ return a;
60
+ case 1:
61
+ return blend<1>(a, b);
62
+ case 2:
63
+ return blend<3>(a, b);
64
+ case 3:
65
+ return blend<7>(a, b);
66
+ case 4:
67
+ return blend<15>(a, b);
68
+ case 5:
69
+ return blend<31>(a, b);
70
+ case 6:
71
+ return blend<63>(a, b);
72
+ case 7:
73
+ return blend<127>(a, b);
74
+ }
75
+ return b;
76
+ }
77
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
78
+ if (count == size())
79
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
80
+ __at_align__ float tmp_values[size()];
81
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
82
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
83
+ // instructions while a loop would be compiled to one instruction.
84
+ for (const auto i : c10::irange(size())) {
85
+ tmp_values[i] = 0.0;
86
+ }
87
+ std::memcpy(
88
+ tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float));
89
+ return _mm256_loadu_ps(tmp_values);
90
+ }
91
+ void store(void* ptr, int64_t count = size()) const {
92
+ if (count == size()) {
93
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
94
+ } else if (count > 0) {
95
+ float tmp_values[size()];
96
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
97
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
98
+ }
99
+ }
100
+ const float& operator[](int idx) const = delete;
101
+ float& operator[](int idx) = delete;
102
+ int zero_mask() const {
103
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
104
+ __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
105
+ return _mm256_movemask_ps(cmp);
106
+ }
107
+ Vectorized<float> isnan() const {
108
+ return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
109
+ }
110
+
111
+ bool has_inf_nan() const {
112
+ __m256 self_sub = _mm256_sub_ps(values, values);
113
+ return (_mm256_movemask_epi8(_mm256_castps_si256(self_sub)) & 0x77777777) != 0;
114
+ }
115
+
116
+ Vectorized<float> map(float (*const f)(float)) const {
117
+ __at_align__ float tmp[size()];
118
+ store(tmp);
119
+ for (const auto i : c10::irange(size())) {
120
+ tmp[i] = f(tmp[i]);
121
+ }
122
+ return loadu(tmp);
123
+ }
124
+ Vectorized<float> abs() const {
125
+ auto mask = _mm256_set1_ps(-0.f);
126
+ return _mm256_andnot_ps(mask, values);
127
+ }
128
+ Vectorized<float> angle() const {
129
+ const auto zero_vec = _mm256_set1_ps(0.f);
130
+ const auto nan_vec = _mm256_set1_ps(NAN);
131
+ const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ);
132
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
133
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
134
+
135
+ const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ);
136
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
137
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
138
+ return angle;
139
+ }
140
+ Vectorized<float> real() const {
141
+ return *this;
142
+ }
143
+ Vectorized<float> imag() const {
144
+ return _mm256_set1_ps(0);
145
+ }
146
+ Vectorized<float> conj() const {
147
+ return *this;
148
+ }
149
+ Vectorized<float> acos() const {
150
+ return Vectorized<float>(Sleef_acosf8_u10(values));
151
+ }
152
+ Vectorized<float> acosh() const {
153
+ return Vectorized<float>(Sleef_acoshf8_u10(values));
154
+ }
155
+ Vectorized<float> asin() const {
156
+ return Vectorized<float>(Sleef_asinf8_u10(values));
157
+ }
158
+ Vectorized<float> atan() const {
159
+ return Vectorized<float>(Sleef_atanf8_u10(values));
160
+ }
161
+ Vectorized<float> atanh() const {
162
+ return Vectorized<float>(Sleef_atanhf8_u10(values));
163
+ }
164
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
165
+ return Vectorized<float>(Sleef_atan2f8_u10(values, b));
166
+ }
167
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
168
+ return Vectorized<float>(Sleef_copysignf8(values, sign));
169
+ }
170
+ Vectorized<float> erf() const {
171
+ // constants
172
+ const auto neg_zero_vec = _mm256_set1_ps(-0.f);
173
+ const auto one_vec = _mm256_set1_ps(1.0f);
174
+ const auto p = _mm256_set1_ps(0.3275911f);
175
+ const auto p1 = _mm256_set1_ps(0.254829592f);
176
+ const auto p2 = _mm256_set1_ps(-0.284496736f);
177
+ const auto p3 = _mm256_set1_ps(1.421413741f);
178
+ const auto p4 = _mm256_set1_ps(-1.453152027f);
179
+ const auto p5 = _mm256_set1_ps(1.061405429f);
180
+ // sign(x)
181
+ auto sign_mask = _mm256_and_ps(neg_zero_vec, values);
182
+ auto abs_vec = _mm256_xor_ps(sign_mask, values);
183
+ // t = 1 / (p * abs(x) + 1)
184
+ auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec);
185
+ auto t = _mm256_div_ps(one_vec, tmp0);
186
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
187
+ auto tmp1 = _mm256_fmadd_ps(p5, t, p4);
188
+ auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3);
189
+ auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2);
190
+ auto r = _mm256_fmadd_ps(tmp3, t, p1);
191
+ // - exp(- x * x)
192
+ auto pow_2 = _mm256_mul_ps(values, values);
193
+ auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2);
194
+ // auto tmp4 = exp(neg_pow_2);
195
+ auto tmp4 = Vectorized<float>(Sleef_expf8_u10(neg_pow_2));
196
+ auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4);
197
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
198
+ auto tmp6 = _mm256_mul_ps(tmp5, t);
199
+ auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec);
200
+ return _mm256_xor_ps(sign_mask, tmp7);
201
+ }
202
+ Vectorized<float> erfc() const {
203
+ return Vectorized<float>(Sleef_erfcf8_u15(values));
204
+ }
205
+ Vectorized<float> erfinv() const {
206
+ return map(calc_erfinv);
207
+ }
208
+ Vectorized<float> exp() const {
209
+ return Vectorized<float>(Sleef_expf8_u10(values));
210
+ }
211
+ Vectorized<float> exp2() const {
212
+ return Vectorized<float>(Sleef_exp2f8_u10(values));
213
+ }
214
+ Vectorized<float> expm1() const {
215
+ return Vectorized<float>(Sleef_expm1f8_u10(values));
216
+ }
217
+ Vectorized<float> exp_u20() const {
218
+ // A faster version of exp with ULP=20
219
+ static __m256 vec_factorial_1 =
220
+ _mm256_set1_ps(0.999999701f); // 1/factorial(1)
221
+ static __m256 vec_factorial_2 =
222
+ _mm256_set1_ps(0.499991506f); // 1/factorial(2)
223
+ static __m256 vec_factorial_3 =
224
+ _mm256_set1_ps(0.166676521f); // 1/factorial(3)
225
+ static __m256 vec_factorial_4 =
226
+ _mm256_set1_ps(0.0418978221f); // 1/factorial(4)
227
+ static __m256 vec_factorial_5 =
228
+ _mm256_set1_ps(0.00828929059f); // 1/factorial(5)
229
+ static __m256 vec_exp_log2ef =
230
+ _mm256_castsi256_ps(_mm256_set1_epi32(0x3fb8aa3b)); // log2(e)
231
+ static __m256 vec_half = _mm256_set1_ps(0.5f);
232
+ static __m256 vec_one = _mm256_set1_ps(1.f);
233
+ static __m256 vec_zero = _mm256_set1_ps(0.f);
234
+ static __m256 vec_two = _mm256_set1_ps(2.f);
235
+ static __m256 vec_ln2f = _mm256_castsi256_ps(_mm256_set1_epi32(0x3f317218)); // ln(2)
236
+ static __m256 vec_ln_flt_min = _mm256_castsi256_ps(_mm256_set1_epi32(0xc2aeac50));
237
+ static __m256 vec_ln_flt_max = _mm256_castsi256_ps(_mm256_set1_epi32(0x42b17218));
238
+ static __m256i vec_127 = _mm256_set1_epi32(0x0000007f);
239
+ static int n_mantissa_bits = 23;
240
+
241
+ // exp(x) =
242
+ // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
243
+ // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
244
+
245
+ auto less_ln_flt_min_mask =
246
+ _mm256_cmp_ps(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
247
+ auto vec_src = _mm256_min_ps(values, vec_ln_flt_max);
248
+ vec_src = _mm256_max_ps(vec_src, vec_ln_flt_min);
249
+
250
+ // fx = floorf(x * log2ef + 0.5)
251
+ auto vec_fx = _mm256_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
252
+ vec_fx = _mm256_floor_ps(vec_fx);
253
+
254
+ // x = x - fx * ln2
255
+ auto vec_exp_poly = _mm256_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
256
+
257
+ // compute polynomial
258
+ auto vec_res =
259
+ _mm256_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
260
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
261
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
262
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
263
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_one);
264
+
265
+ // compute 2^(n-1)
266
+ auto vec_exp_number = _mm256_sub_ps(vec_fx, vec_one);
267
+ auto vec_exp_number_i = _mm256_cvtps_epi32(vec_exp_number);
268
+ auto vec_two_pow_n_i = _mm256_add_epi32(vec_exp_number_i, vec_127);
269
+ vec_two_pow_n_i = _mm256_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
270
+ auto vec_two_pow_n = _mm256_castsi256_ps(vec_two_pow_n_i);
271
+ vec_two_pow_n =
272
+ _mm256_blendv_ps(vec_two_pow_n, vec_zero, less_ln_flt_min_mask);
273
+
274
+ // y = y * 2^n
275
+ vec_res = _mm256_mul_ps(vec_res, vec_two_pow_n);
276
+ vec_res = _mm256_mul_ps(vec_res, vec_two);
277
+ return vec_res;
278
+ }
279
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
280
+ return Vectorized<float>(Sleef_fmodf8(values, q));
281
+ }
282
+ Vectorized<float> log() const {
283
+ return Vectorized<float>(Sleef_logf8_u10(values));
284
+ }
285
+ Vectorized<float> log2() const {
286
+ return Vectorized<float>(Sleef_log2f8_u10(values));
287
+ }
288
+ Vectorized<float> log10() const {
289
+ return Vectorized<float>(Sleef_log10f8_u10(values));
290
+ }
291
+ Vectorized<float> log1p() const {
292
+ return Vectorized<float>(Sleef_log1pf8_u10(values));
293
+ }
294
+ Vectorized<float> frac() const;
295
+ Vectorized<float> sin() const {
296
+ return Vectorized<float>(Sleef_sinf8_u35(values));
297
+ }
298
+ Vectorized<float> sinh() const {
299
+ return Vectorized<float>(Sleef_sinhf8_u10(values));
300
+ }
301
+ Vectorized<float> cos() const {
302
+ return Vectorized<float>(Sleef_cosf8_u35(values));
303
+ }
304
+ Vectorized<float> cosh() const {
305
+ return Vectorized<float>(Sleef_coshf8_u10(values));
306
+ }
307
+ Vectorized<float> ceil() const {
308
+ return _mm256_ceil_ps(values);
309
+ }
310
+ Vectorized<float> floor() const {
311
+ return _mm256_floor_ps(values);
312
+ }
313
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
314
+ return Vectorized<float>(Sleef_hypotf8_u05(values, b));
315
+ }
316
+ Vectorized<float> i0() const {
317
+ return map(calc_i0);
318
+ }
319
+ Vectorized<float> i0e() const {
320
+ return map(calc_i0e);
321
+ }
322
+ Vectorized<float> digamma() const {
323
+ return map(calc_digamma);
324
+ }
325
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
326
+ __at_align__ float tmp[size()];
327
+ __at_align__ float tmp_x[size()];
328
+ store(tmp);
329
+ x.store(tmp_x);
330
+ for (const auto i : c10::irange(size())) {
331
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
332
+ }
333
+ return loadu(tmp);
334
+ }
335
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
336
+ __at_align__ float tmp[size()];
337
+ __at_align__ float tmp_x[size()];
338
+ store(tmp);
339
+ x.store(tmp_x);
340
+ for (const auto i : c10::irange(size())) {
341
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
342
+ }
343
+ return loadu(tmp);
344
+ }
345
+ Vectorized<float> neg() const {
346
+ return _mm256_xor_ps(_mm256_set1_ps(-0.f), values);
347
+ }
348
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
349
+ return Vectorized<float>(Sleef_nextafterf8(values, b));
350
+ }
351
+ Vectorized<float> round() const {
352
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
353
+ }
354
+ Vectorized<float> tan() const {
355
+ return Vectorized<float>(Sleef_tanf8_u10(values));
356
+ }
357
+ Vectorized<float> tanh() const {
358
+ return Vectorized<float>(Sleef_tanhf8_u10(values));
359
+ }
360
+ Vectorized<float> trunc() const {
361
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
362
+ }
363
+ Vectorized<float> lgamma() const {
364
+ return Vectorized<float>(Sleef_lgammaf8_u10(values));
365
+ }
366
+ Vectorized<float> sqrt() const {
367
+ return _mm256_sqrt_ps(values);
368
+ }
369
+ Vectorized<float> reciprocal() const {
370
+ return _mm256_div_ps(_mm256_set1_ps(1), values);
371
+ }
372
+ Vectorized<float> rsqrt() const {
373
+ return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values));
374
+ }
375
+ Vectorized<float> pow(const Vectorized<float> &b) const {
376
+ return Vectorized<float>(Sleef_powf8_u10(values, b));
377
+ }
378
+ // Comparison using the _CMP_**_OQ predicate.
379
+ // `O`: get false if an operand is NaN
380
+ // `Q`: do not raise if an operand is NaN
381
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
382
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
383
+ }
384
+
385
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
386
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
387
+ }
388
+
389
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
390
+ return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ);
391
+ }
392
+
393
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
394
+ return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ);
395
+ }
396
+
397
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
398
+ return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ);
399
+ }
400
+
401
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
402
+ return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ);
403
+ }
404
+
405
+ Vectorized<float> eq(const Vectorized<float>& other) const;
406
+ Vectorized<float> ne(const Vectorized<float>& other) const;
407
+ Vectorized<float> gt(const Vectorized<float>& other) const;
408
+ Vectorized<float> ge(const Vectorized<float>& other) const;
409
+ Vectorized<float> lt(const Vectorized<float>& other) const;
410
+ Vectorized<float> le(const Vectorized<float>& other) const;
411
+ };
412
+
413
+ template <>
414
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
415
+ return _mm256_add_ps(a, b);
416
+ }
417
+
418
+ template <>
419
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
420
+ return _mm256_sub_ps(a, b);
421
+ }
422
+
423
+ template <>
424
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
425
+ return _mm256_mul_ps(a, b);
426
+ }
427
+
428
+ template <>
429
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
430
+ return _mm256_div_ps(a, b);
431
+ }
432
+
433
+ // frac. Implement this here so we can use subtraction
434
+ inline Vectorized<float> Vectorized<float>::frac() const {
435
+ return *this - this->trunc();
436
+ }
437
+
438
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
439
+ // either input is a NaN.
440
+ template <>
441
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
442
+ Vectorized<float> max = _mm256_max_ps(a, b);
443
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
444
+ // Exploit the fact that all-ones is a NaN.
445
+ return _mm256_or_ps(max, isnan);
446
+ }
447
+
448
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
449
+ // either input is a NaN.
450
+ template <>
451
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
452
+ Vectorized<float> min = _mm256_min_ps(a, b);
453
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
454
+ // Exploit the fact that all-ones is a NaN.
455
+ return _mm256_or_ps(min, isnan);
456
+ }
457
+
458
+ template <>
459
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
460
+ return _mm256_min_ps(max, _mm256_max_ps(min, a));
461
+ }
462
+
463
+ template <>
464
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
465
+ return _mm256_min_ps(max, a);
466
+ }
467
+
468
+ template <>
469
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
470
+ return _mm256_max_ps(min, a);
471
+ }
472
+
473
+ template <>
474
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
475
+ return _mm256_and_ps(a, b);
476
+ }
477
+
478
+ template <>
479
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
480
+ return _mm256_or_ps(a, b);
481
+ }
482
+
483
+ template <>
484
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
485
+ return _mm256_xor_ps(a, b);
486
+ }
487
+
488
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
489
+ return (*this == other) & Vectorized<float>(1.0f);
490
+ }
491
+
492
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
493
+ return (*this != other) & Vectorized<float>(1.0f);
494
+ }
495
+
496
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
497
+ return (*this > other) & Vectorized<float>(1.0f);
498
+ }
499
+
500
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
501
+ return (*this >= other) & Vectorized<float>(1.0f);
502
+ }
503
+
504
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
505
+ return (*this < other) & Vectorized<float>(1.0f);
506
+ }
507
+
508
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
509
+ return (*this <= other) & Vectorized<float>(1.0f);
510
+ }
511
+
512
+ template <>
513
+ inline void convert(const float* src, float* dst, int64_t n) {
514
+ int64_t i;
515
+ #ifndef __msvc_cl__
516
+ #pragma unroll
517
+ #endif
518
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
519
+ _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i));
520
+ }
521
+ #ifndef __msvc_cl__
522
+ #pragma unroll
523
+ #endif
524
+ for (; i < n; i++) {
525
+ dst[i] = src[i];
526
+ }
527
+ }
528
+
529
+
530
+ template <>
531
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
532
+ return _mm256_fmadd_ps(a, b, c);
533
+ }
534
+
535
+ template <>
536
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
537
+ return _mm256_fmsub_ps(a, b, c);
538
+ }
539
+
540
+ // Used by Inductor CPP codegen
541
+ template<>
542
+ inline void transpose_mxn<float, 8, 8>(
543
+ const float* src,
544
+ int64_t ld_src,
545
+ float* dst,
546
+ int64_t ld_dst) {
547
+ // load from src to registers
548
+ // a: a0 a1 a2 a3 a4 a5 a6 a7
549
+ // b: b0 b1 b2 b3 b4 b5 b6 b7
550
+ // c: c0 c1 c2 c3 c4 c5 c6 c7
551
+ // d: d0 d1 d2 d3 d4 d5 d6 d7
552
+ // e: e0 e1 e2 e3 e4 e5 e6 e7
553
+ // f: f0 f1 f2 f3 f4 f5 f6 f7
554
+ // g: g0 g1 g2 g3 g4 g5 g6 g7
555
+ // h: h0 h1 h2 h3 h4 h5 h6 h7
556
+ __m256 a = _mm256_loadu_ps(&src[0 * ld_src]);
557
+ __m256 b = _mm256_loadu_ps(&src[1 * ld_src]);
558
+ __m256 c = _mm256_loadu_ps(&src[2 * ld_src]);
559
+ __m256 d = _mm256_loadu_ps(&src[3 * ld_src]);
560
+ __m256 e = _mm256_loadu_ps(&src[4 * ld_src]);
561
+ __m256 f = _mm256_loadu_ps(&src[5 * ld_src]);
562
+ __m256 g = _mm256_loadu_ps(&src[6 * ld_src]);
563
+ __m256 h = _mm256_loadu_ps(&src[7 * ld_src]);
564
+
565
+ __m256 ta, tb, tc, td, te, tf, tg, th;
566
+ // unpacking and interleaving 32-bit elements
567
+ // a0 b0 a1 b1 a4 b4 a5 b5
568
+ // a2 b2 a3 b3 a6 b6 a7 b7
569
+ // c0 d0 c1 d1 ...
570
+ // c2 d2 c3 d3 ...
571
+ // e0 f0 e1 f1 ...
572
+ // e2 f2 e3 f3 ...
573
+ // g0 h0 g1 h1 ...
574
+ // g2 h2 g3 h3 ...
575
+ ta = _mm256_unpacklo_ps(a, b);
576
+ tb = _mm256_unpackhi_ps(a, b);
577
+ tc = _mm256_unpacklo_ps(c, d);
578
+ td = _mm256_unpackhi_ps(c, d);
579
+ te = _mm256_unpacklo_ps(e, f);
580
+ tf = _mm256_unpackhi_ps(e, f);
581
+ tg = _mm256_unpacklo_ps(g, h);
582
+ th = _mm256_unpackhi_ps(g, h);
583
+
584
+ // unpacking and interleaving 64-bit elements
585
+ // a0 b0 c0 d0 a4 b4 c4 d4
586
+ // a1 b1 c1 d1 ...
587
+ // a2 b2 c2 d2 ...
588
+ // a3 b3 c3 d3 ...
589
+ // e0 f0 g0 h0 e4 f4 g4 h4
590
+ // e1 f1 g1 h1 ...
591
+ // e2 f2 g2 h2 ...
592
+ // e3 f3 g3 h3 ...
593
+ a = _mm256_castpd_ps(
594
+ _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
595
+ b = _mm256_castpd_ps(
596
+ _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
597
+ c = _mm256_castpd_ps(
598
+ _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
599
+ d = _mm256_castpd_ps(
600
+ _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
601
+ e = _mm256_castpd_ps(
602
+ _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
603
+ f = _mm256_castpd_ps(
604
+ _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
605
+ g = _mm256_castpd_ps(
606
+ _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
607
+ h = _mm256_castpd_ps(
608
+ _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
609
+
610
+ // shuffle 128-bits (composed of 4 32-bit elements)
611
+ // a0 b0 c0 d0 e0 f0 g0 h0
612
+ // a1 b1 c1 d1 ...
613
+ // a2 b2 c2 d2 ...
614
+ // a3 b3 c3 d3 ...
615
+ // a4 b4 c4 d4 ...
616
+ // a5 b5 c5 d5 ...
617
+ // a6 b6 c6 d6 ...
618
+ // a7 b7 c7 d7 ...
619
+ ta = _mm256_permute2f128_ps(a, e, 0x20);
620
+ tb = _mm256_permute2f128_ps(b, f, 0x20);
621
+ tc = _mm256_permute2f128_ps(c, g, 0x20);
622
+ td = _mm256_permute2f128_ps(d, h, 0x20);
623
+ te = _mm256_permute2f128_ps(a, e, 0x31);
624
+ tf = _mm256_permute2f128_ps(b, f, 0x31);
625
+ tg = _mm256_permute2f128_ps(c, g, 0x31);
626
+ th = _mm256_permute2f128_ps(d, h, 0x31);
627
+
628
+ // store from registers to dst
629
+ _mm256_storeu_ps(&dst[0 * ld_dst], ta);
630
+ _mm256_storeu_ps(&dst[1 * ld_dst], tb);
631
+ _mm256_storeu_ps(&dst[2 * ld_dst], tc);
632
+ _mm256_storeu_ps(&dst[3 * ld_dst], td);
633
+ _mm256_storeu_ps(&dst[4 * ld_dst], te);
634
+ _mm256_storeu_ps(&dst[5 * ld_dst], tf);
635
+ _mm256_storeu_ps(&dst[6 * ld_dst], tg);
636
+ _mm256_storeu_ps(&dst[7 * ld_dst], th);
637
+ }
638
+
639
+ template<>
640
+ inline void transpose_mxn<float, 16, 16>(
641
+ const float* src,
642
+ int64_t ld_src,
643
+ float* dst,
644
+ int64_t ld_dst) {
645
+ transpose_mxn<float, 8, 8>(
646
+ src , ld_src, dst, ld_dst);
647
+ transpose_mxn<float, 8, 8>(
648
+ src + 8, ld_src, dst + 8 * ld_dst, ld_dst);
649
+ transpose_mxn<float, 8, 8>(
650
+ src + 8 * ld_src, ld_src, dst + 8, ld_dst);
651
+ transpose_mxn<float, 8, 8>(
652
+ src + 8 * ld_src + 8, ld_src, dst + 8 * ld_dst + 8, ld_dst);
653
+ }
654
+ #endif
655
+
656
+ }} // namespace at::vec::CPU_CAPABILITY