diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..23cf15067f6d50fddd3ca45639566958d4262ce8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/CUDAFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable std::optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..4278ea0820295f0285b30359a5b26e6100d257ef --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable std::optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/Config.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/Config.h new file mode 100644 index 0000000000000000000000000000000000000000..b7a964bb4e403711ff9c642d95721d9c7a99b274 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/Config.h @@ -0,0 +1,21 @@ +#pragma once + +// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's +// obvious if you forgot to include Config.h +// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined +// +// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h + +#define AT_MKLDNN_ENABLED() 1 +#define AT_MKLDNN_ACL_ENABLED() 0 +#define AT_MKL_ENABLED() 1 +#define AT_MKL_SEQUENTIAL() 0 +#define AT_POCKETFFT_ENABLED() 0 +#define AT_NNPACK_ENABLED() 1 +#define CAFFE2_STATIC_LINK_CUDA() 0 +#define AT_BUILD_WITH_BLAS() 1 +#define AT_BUILD_WITH_LAPACK() 1 +#define AT_PARALLEL_OPENMP 1 +#define AT_PARALLEL_NATIVE 0 +#define AT_BLAS_F2C() 0 +#define AT_BLAS_USE_CBLAS_DOT() 0 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/Dimname.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/Dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..71836a9e25d3d82d9cd5024b2f33e147e14bf87e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/Dimname.h @@ -0,0 +1 @@ +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..e34be30f960712a9a6306383f9ffcfbde29d32a2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/EmptyTensor.h @@ -0,0 +1,166 @@ +#pragma once +#include + +namespace at::detail { + +inline void check_size_nonnegative(ArrayRef size) { + for (const auto& x : size) { + TORCH_CHECK( + x >= 0, + "Trying to create tensor with negative dimension ", + x, + ": ", + size); + } +} + +inline void check_size_nonnegative(ArrayRef size) { + for (const auto& x : size) { + TORCH_CHECK( + x.expect_size(__FILE__, __LINE__), + "Trying to create tensor with negative dimension ", + x, + ": ", + size); + } +} + +TORCH_API size_t computeStorageNbytesContiguous( + IntArrayRef sizes, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API SymInt computeStorageNbytesContiguous( + SymIntArrayRef sizes, + const SymInt& itemsize, + const SymInt& storage_offset = 0); +TORCH_API size_t computeStorageNbytes( + IntArrayRef sizes, + IntArrayRef strides, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API SymInt computeStorageNbytes( + SymIntArrayRef sizes, + SymIntArrayRef strides, + const SymInt& itemsize, + const SymInt& storage_offset = 0); + +TORCH_API TensorBase empty_generic( + IntArrayRef size, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type, + std::optional memory_format_opt); + +TORCH_API TensorBase empty_generic_symint( + SymIntArrayRef size, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type, + std::optional memory_format_opt); + +TORCH_API TensorBase empty_strided_generic( + IntArrayRef size, + IntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_strided_symint_generic( + SymIntArrayRef size, + SymIntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + ScalarType dtype, + bool pin_memory = false, + std::optional memory_format_opt = std::nullopt); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); + +TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + bool pin_memory = false); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + ScalarType dtype, + std::optional memory_format_opt = std::nullopt); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); + +TORCH_API TensorBase empty_symint_meta( + SymIntArrayRef size, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); + +TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase +empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + ScalarType dtype); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + const TensorOptions& options); + +} // namespace at::detail diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..b648ef616284f971ab51e2bf9d8e7dd557237bb7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/FuncTorchTLS.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +namespace at::functorch { + +// NOTE [functorch TLS in pytorch/pytorch] +// +// functorch lives out-of-tree. However, it has some TLS that needs to be +// propagated. The solution for that is we store a pointer to the TLS +// inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to +// include whatever functorch needs. +// +// We need to store a pointer due to the indirection: +// inside functorch, we will create a subclass of FunctorchTLSBase called +// FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack. +// FuncTorchTLSBase doesn't have any metadata because it hasn't been defined +// yet. +// +// Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside +// functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*. +// We can't directly pass around FunctorchTLSBase (without a pointer) because +// FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having +// more elements. +struct TORCH_API FuncTorchTLSBase { + virtual ~FuncTorchTLSBase() = default; + virtual std::unique_ptr deepcopy() const = 0; + + virtual int64_t checkSupportsSingleLevelAutogradFunction() const = 0; + virtual void checkSupportsCppAutogradFunction() const = 0; + virtual void checkSupportsInplaceRequiresGrad() const = 0; + virtual void checkSupportsRetainGrad() const = 0; +}; + +// returns deepcopy of the functorch tls +TORCH_API std::unique_ptr getCopyOfFuncTorchTLS(); + +// sets the functorch tls. always does a deep copy. +TORCH_API void setFuncTorchTLS( + const std::shared_ptr& state); + +// get a mutable reference to the functorch tls +TORCH_API std::unique_ptr& functorchTLSAccessor(); + +} // namespace at::functorch diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h new file mode 100644 index 0000000000000000000000000000000000000000..4617afd0b72c7ce286e61a4d1abe2cc89743024c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/LinalgBackend.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include +#include + +namespace at { + +enum class LinalgBackend : int8_t { Default, Cusolver, Magma }; + +inline std::string LinalgBackendToString(at::LinalgBackend backend) { + switch (backend) { + case LinalgBackend::Default: + return "at::LinalgBackend::Default"; + case LinalgBackend::Cusolver: + return "at::LinalgBackend::Cusolver"; + case LinalgBackend::Magma: + return "at::LinalgBackend::Magma"; + default: + TORCH_CHECK(false, "Unknown linalg backend"); + } +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::LinalgBackend backend) { + return stream << LinalgBackendToString(backend); +} + +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h new file mode 100644 index 0000000000000000000000000000000000000000..84e744ba10b10af06a234ade767c2a1caa34d9fa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/ParallelOpenMP.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include +#include + +#ifdef _OPENMP +#define INTRA_OP_PARALLEL + +#include +#endif + +#ifdef _OPENMP +namespace at::internal { +template +inline void invoke_parallel( + int64_t begin, + int64_t end, + int64_t grain_size, + const F& f) { + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + +#pragma omp parallel + { + // choose number of tasks based on grain size and number of threads + // can't use num_threads clause due to bugs in GOMP's thread pool (See + // #32008) + int64_t num_threads = omp_get_num_threads(); + if (grain_size > 0) { + num_threads = std::min(num_threads, divup((end - begin), grain_size)); + } + + int64_t tid = omp_get_thread_num(); + int64_t chunk_size = divup((end - begin), num_threads); + int64_t begin_tid = begin + tid * chunk_size; + if (begin_tid < end) { + try { + internal::ThreadIdGuard tid_guard(tid); + f(begin_tid, std::min(end, chunk_size + begin_tid)); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + } + } + if (eptr) { + std::rethrow_exception(eptr); + } +} +} // namespace at::internal +#endif // _OPENMP diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..4a81dc280e2424871d5188af637af679b269aea2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/TensorUtils.h @@ -0,0 +1,190 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +// These functions are NOT in Utils.h, because this file has a dep on Tensor.h + +#define TORCH_CHECK_TENSOR_ALL(cond, ...) \ + TORCH_CHECK((cond)._is_all_true().item(), __VA_ARGS__); + +namespace at { + +// The following are utility functions for checking that arguments +// make sense. These are particularly useful for native functions, +// which do NO argument checking by default. + +struct TORCH_API TensorArg { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const Tensor& tensor; + const char* name; + int pos; // 1-indexed + TensorArg(const Tensor& tensor, const char* name, int pos) + : tensor(tensor), name(name), pos(pos) {} + // Try to mitigate any possibility of dangling reference to temporaries. + // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved) + TensorArg(Tensor&& tensor, const char* name, int pos) = delete; + const Tensor* operator->() const { + return &tensor; + } + const Tensor& operator*() const { + return tensor; + } +}; + +struct TORCH_API TensorGeometryArg { + TensorGeometry tensor; + const char* name; + int pos; // 1-indexed + /* implicit */ TensorGeometryArg(TensorArg arg) + : tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {} + TensorGeometryArg(TensorGeometry tensor, const char* name, int pos) + : tensor(std::move(tensor)), name(name), pos(pos) {} + const TensorGeometry* operator->() const { + return &tensor; + } + const TensorGeometry& operator*() const { + return tensor; + } +}; + +// A string describing which function did checks on its input +// arguments. +// TODO: Consider generalizing this into a call stack. +using CheckedFrom = const char*; + +// The undefined convention: singular operators assume their arguments +// are defined, but functions which take multiple tensors will +// implicitly filter out undefined tensors (to make it easier to perform +// tests which should apply if the tensor is defined, and should not +// otherwise.) +// +// NB: This means that the n-ary operators take lists of TensorArg, +// not TensorGeometryArg, because the Tensor to TensorGeometry +// conversion will blow up if you have undefined tensors. + +TORCH_API std::ostream& operator<<( + std::ostream& out, + const TensorGeometryArg& t); +TORCH_API void checkDim( + CheckedFrom c, + const Tensor& tensor, + const char* name, + int pos, // 1-indexed + int64_t dim); +TORCH_API void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim); +// NB: this is an inclusive-exclusive range +TORCH_API void checkDimRange( + CheckedFrom c, + const TensorGeometryArg& t, + int64_t dim_start, + int64_t dim_end); +TORCH_API void checkSameDim( + CheckedFrom c, + const TensorGeometryArg& t1, + const TensorGeometryArg& t2); +TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t); +TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef ts); +TORCH_API void checkSize( + CheckedFrom c, + const TensorGeometryArg& t, + IntArrayRef sizes); +TORCH_API void checkSize_symint( + CheckedFrom c, + const TensorGeometryArg& t, + c10::SymIntArrayRef sizes); +TORCH_API void checkSize( + CheckedFrom c, + const TensorGeometryArg& t, + int64_t dim, + int64_t size); +TORCH_API void checkSize_symint( + CheckedFrom c, + const TensorGeometryArg& t, + int64_t dim, + const c10::SymInt& size); +TORCH_API void checkNumel( + CheckedFrom c, + const TensorGeometryArg& t, + int64_t numel); +TORCH_API void checkSameNumel( + CheckedFrom c, + const TensorArg& t1, + const TensorArg& t2); +TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef tensors); +TORCH_API void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType s); +TORCH_API void checkScalarTypes( + CheckedFrom c, + const TensorArg& t, + at::ArrayRef l); +TORCH_API void checkSameGPU( + CheckedFrom c, + const TensorArg& t1, + const TensorArg& t2); +TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef tensors); +TORCH_API void checkSameType( + CheckedFrom c, + const TensorArg& t1, + const TensorArg& t2); +TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef tensors); +TORCH_API void checkSameSize( + CheckedFrom c, + const TensorArg& t1, + const TensorArg& t2); +TORCH_API void checkAllSameSize(CheckedFrom c, ArrayRef tensors); +TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t); +TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef t); + +// FixMe: does TensorArg slow things down? +TORCH_API void checkBackend( + CheckedFrom c, + at::ArrayRef t, + at::Backend backend); + +TORCH_API void checkDeviceType( + CheckedFrom c, + at::ArrayRef tensors, + at::DeviceType device_type); + +TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout); + +TORCH_API void checkLayout( + CheckedFrom c, + at::ArrayRef tensors, + at::Layout layout); + +// Methods for getting data_ptr if tensor is defined +TORCH_API void* maybe_data_ptr(const Tensor& tensor); +TORCH_API void* maybe_data_ptr(const TensorArg& tensor); + +TORCH_API void check_dim_size( + const Tensor& tensor, + int64_t dim, + int64_t dim_size, + int64_t size); + +namespace detail { +TORCH_API std::vector defaultStrides(IntArrayRef sizes); + +TORCH_API std::optional> computeStride( + IntArrayRef oldshape, + IntArrayRef oldstride, + IntArrayRef newshape); + +TORCH_API std::optional computeStride( + c10::SymIntArrayRef oldshape, + c10::SymIntArrayRef oldstride, + c10::SymIntArrayRef newshape); + +TORCH_API std::optional computeStride( + IntArrayRef oldshape, + IntArrayRef oldstride, + const DimVector& newshape); + +} // namespace detail +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h new file mode 100644 index 0000000000000000000000000000000000000000..8b0bb5c4d2e79a6bb73b7092fc7761cea2340368 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/VmapGeneratedPlumbing.h @@ -0,0 +1,27723 @@ + +#pragma once +#include +#include + +namespace at { namespace functorch { + +template +at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Byte::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Char::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Double::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Float::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Int::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Long::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Short::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_cast_Half::call(self, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const ::std::optional & gradient, ::std::optional retain_graph, bool create_graph) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) { + return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional gradient_value; + std::optional gradient_bdim; + if (gradient) { + std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level); + } + batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph); +} +template +void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) { + return at::_ops::set_data::call(self, new_data); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [new_data_value, new_data_bdim] = unwrapTensorAtLevel(new_data, cur_level); + batch_rule(self_value, self_bdim, new_data_value, new_data_bdim); +} +template +at::Tensor data_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::data::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::requires_grad_::call(self, requires_grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, requires_grad); + return self; +} +template +void retain_grad_generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::retain_grad::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); +} +template +at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fw_primal::call(self, level); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) { + return at::_ops::_make_dual::call(primal, tangent, level); + } + auto [primal_value, primal_bdim] = unwrapTensorAtLevel(primal, cur_level); + auto [tangent_value, tangent_bdim] = unwrapTensorAtLevel(tangent, cur_level); + auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dual, cur_level)) { + return at::_ops::_unpack_dual::call(dual, level); + } + auto [dual_value, dual_bdim] = unwrapTensorAtLevel(dual, cur_level); + auto results = batch_rule(dual_value, dual_bdim, level); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rename_generated_plumbing(const at::Tensor & self, ::std::optional names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rename::call(self, names); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::align_to::call(self, names); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::align_as::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector align_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::align_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _assert_async_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_assert_async::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); +} +template +void _assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_assert_async_msg::call(self, assert_msg); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, assert_msg); +} +template +at::Tensor _functional_assert_scalar_generated_plumbing(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token); + } + auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(self, assert_msg, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _functional_assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_assert_async_msg::call(self, assert_msg, dep_token); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(self_value, self_bdim, assert_msg, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(a, cur_level)) { + return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype); + } + auto [a_value, a_bdim] = unwrapTensorAtLevel(a, cur_level); + batch_rule(a_value, a_bdim, size, stride, dtype); +} +template +at::Tensor _functional_sym_constrain_range_generated_plumbing(const at::Scalar & size, ::std::optional min, ::std::optional max, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_sym_constrain_range::call(size, min, max, dep_token); + } + auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _functional_sym_constrain_range_for_size_generated_plumbing(const at::Scalar & size, ::std::optional min, ::std::optional max, const at::Tensor & dep_token) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dep_token, cur_level)) { + return at::_ops::_functional_sym_constrain_range_for_size::call(size, min, max, dep_token); + } + auto [dep_token_value, dep_token_bdim] = unwrapTensorAtLevel(dep_token, cur_level); + auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::refine_names::call(self, names); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level); + auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight_arr, cur_level)) { + return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional & weight_buf, const at::Tensor & hx, const ::std::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) { + return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + std::optional weight_buf_value; + std::optional weight_buf_bdim; + if (weight_buf) { + std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level); + } + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) { + return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level); + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional grad_output_value; + std::optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _fused_dropout_generated_plumbing(const at::Tensor & self, double p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fused_dropout::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_scale::call(self, mask, scale); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_dropout_generated_plumbing(const at::Tensor & input, double p, ::std::optional train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::native_dropout::call(input, p, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::native_dropout_backward::call(grad_output, mask, scale); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) { + return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype); + } + auto [quasi_value, quasi_bdim] = unwrapTensorAtLevel(quasi, cur_level); + auto [sobolstate_value, sobolstate_bdim] = unwrapTensorAtLevel(sobolstate, cur_level); + auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) { + return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [sobolstate_value, sobolstate_bdim] = unwrapTensorAtLevel(sobolstate, cur_level); + batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated); + return self; +} +template +at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) { + return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [ltm_value, ltm_bdim] = unwrapTensorAtLevel(ltm, cur_level); + batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension); + return self; +} +template +at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sobol_engine_initialize_state_::call(self, dimension); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dimension); + return self; +} +template +at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) { + return at::_ops::_reshape_from_tensor::call(self, shape); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [shape_value, shape_bdim] = unwrapTensorAtLevel(shape, cur_level); + auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_shape_as_tensor::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::dropout::call(input, p, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dropout_::call(self, p, train); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::feature_dropout::call(input, p, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::feature_dropout_::call(self, p, train); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::alpha_dropout::call(input, p, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alpha_dropout_::call(self, p, train); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::feature_alpha_dropout::call(input, p, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, p, train); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::feature_alpha_dropout_::call(self, p, train); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, train); + return self; +} +template +at::Tensor abs_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::abs::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & abs__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::abs_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor absolute_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::absolute::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & absolute__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::absolute_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor angle_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::angle::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_real::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_complex::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sgn_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sgn::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sgn__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sgn_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor chalf_generated_plumbing(const at::Tensor & self, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::chalf::call(self, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor real_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::real::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor imag_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::imag::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj_physical::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj_physical::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::conj_physical_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resolve_conj::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resolve_neg::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_neg_view::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor acos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acos::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & acos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acos_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arccos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccos::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arccos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccos_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool1d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool1d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::add_Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::add__Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_add_relu_Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_add_relu__Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_relu_Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_relu__Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::add_Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::add__Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::addmv::call(self, mat, vec, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat_value, mat_bdim] = unwrapTensorAtLevel(mat, cur_level); + auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level); + auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::addmv_::call(self, mat, vec, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat_value, mat_bdim] = unwrapTensorAtLevel(mat, cur_level); + auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level); + batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha); + return self; +} +template +at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::addr::call(self, vec1, vec2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [vec1_value, vec1_bdim] = unwrapTensorAtLevel(vec1, cur_level); + auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::addr_::call(self, vec1, vec2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [vec1_value, vec1_bdim] = unwrapTensorAtLevel(vec1, cur_level); + auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level); + batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha); + return self; +} +template +at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(theta, cur_level)) { + return at::_ops::affine_grid_generator::call(theta, size, align_corners); + } + auto [theta_value, theta_bdim] = unwrapTensorAtLevel(theta, cur_level); + auto results = batch_rule(theta_value, theta_bdim, size, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, size, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_is_all_true::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_is_any_true::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_check_tensor::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_functorch_fallback_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_test_functorch_fallback::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dims::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all_dimname::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dims::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any_dimname::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(like, cur_level)) { + return at::_ops::_dim_arange::call(like, dim); + } + auto [like_value, like_bdim] = unwrapTensorAtLevel(like, cur_level); + auto results = batch_rule(like_value, like_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argmax_generated_plumbing(const at::Tensor & self, ::std::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argmax::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argmin_generated_plumbing(const at::Tensor & self, ::std::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argmin::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor acosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acosh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & acosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::acosh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arccosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccosh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arccosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arccosh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor asinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asinh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & asinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asinh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsinh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsinh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atanh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atanh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arctanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctanh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctanh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor asin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asin::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & asin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::asin_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arcsin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsin::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arcsin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arcsin_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atan::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atan_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor arctan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctan::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::arctan_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_1d::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_1d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_2d::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_2d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::atleast_3d::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::atleast_3d_Sequence::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level); + auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level); + auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level); + auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level); + batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return self; +} +template +at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) { + return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [var_value, var_bdim] = unwrapTensorAtLevel(var, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) { + return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [reservedSpace_value, reservedSpace_bdim] = unwrapTensorAtLevel(reservedSpace, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + std::optional save_mean_value; + std::optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + std::optional save_var_transform_value; + std::optional save_var_transform_bdim; + if (save_var_transform) { + std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level); + } + auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) { + return at::_ops::bernoulli__Tensor::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [p_value, p_bdim] = unwrapTensorAtLevel(p, cur_level); + batch_rule(self_value, self_bdim, p_value, p_bdim, generator); + return self; +} +template +at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli__float::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, generator); + return self; +} +template +at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bernoulli_p::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::bilinear::call(input1, input2, weight, bias); + } + auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::binary_cross_entropy::call(self, target, weight, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) { + return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional pos_weight_value; + std::optional pos_weight_bdim; + if (pos_weight) { + std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bincount_generated_plumbing(const at::Tensor & self, const ::std::optional & weights, int64_t minlength) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::bincount::call(self, weights, minlength); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weights_value; + std::optional weights_bdim; + if (weights) { + std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_not::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_not_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::copysign_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::copysign__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::copysign_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::copysign__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor _lazy_clone_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_lazy_clone::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logical_not_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logical_not::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_not__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logical_not_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_xor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_xor_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_and::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_and_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_or::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logical_or_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::bmm::call(self, mat2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector broadcast_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::broadcast_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::broadcast_to::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_broadcast_to::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cat::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cat_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concat::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concat_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concatenate::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::concatenate_names::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor block_diag_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::block_diag::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ceil_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ceil::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ceil__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ceil_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(matrices, cur_level)) { + return at::_ops::chain_matmul::call(matrices); + } + + auto results = batch_rule(matrices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_chunk::call(self, chunks, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, chunks, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::chunk::call(self, chunks, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, chunks, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tensor_split_indices::call(self, indices, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) { + return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor_indices_or_sections_value, tensor_indices_or_sections_bdim] = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_generated_plumbing(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_Tensor::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional min_value; + std::optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + std::optional max_value; + std::optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp__generated_plumbing(at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min, max); + return self; +} +template +at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp__Tensor::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional min_value; + std::optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + std::optional max_value; + std::optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_max::call(self, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_max_Tensor::call(self, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [max_value, max_bdim] = unwrapTensorAtLevel(max, cur_level); + auto results = batch_rule(self_value, self_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_max_::call(self, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, max); + return self; +} +template +at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clamp_max__Tensor::call(self, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [max_value, max_bdim] = unwrapTensorAtLevel(max, cur_level); + batch_rule(self_value, self_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_min::call(self, min); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) { + return at::_ops::clamp_min_Tensor::call(self, min); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [min_value, min_bdim] = unwrapTensorAtLevel(min, cur_level); + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clamp_min_::call(self, min); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min); + return self; +} +template +at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) { + return at::_ops::clamp_min__Tensor::call(self, min); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [min_value, min_bdim] = unwrapTensorAtLevel(min, cur_level); + batch_rule(self_value, self_bdim, min_value, min_bdim); + return self; +} +template +at::Tensor clip_generated_plumbing(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clip::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clip_Tensor::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional min_value; + std::optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + std::optional max_value; + std::optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & clip__generated_plumbing(at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clip_::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min, max); + return self; +} +template +at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const ::std::optional & min, const ::std::optional & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) { + return at::_ops::clip__Tensor::call(self, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional min_value; + std::optional min_bdim; + if (min) { + std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level); + } + std::optional max_value; + std::optional max_bdim; + if (max) { + std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level); + } + batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim); + return self; +} +template +at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) { + return at::_ops::complex::call(real, imag); + } + auto [real_value, real_bdim] = unwrapTensorAtLevel(real, cur_level); + auto [imag_value, imag_bdim] = unwrapTensorAtLevel(imag, cur_level); + auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) { + return at::_ops::polar::call(abs, angle); + } + auto [abs_value, abs_bdim] = unwrapTensorAtLevel(abs, cur_level); + auto [angle_value, angle_bdim] = unwrapTensorAtLevel(angle, cur_level); + auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::constant_pad_nd::call(self, pad, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::contiguous::call(self, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _convolution_double_backward_generated_plumbing(const ::std::optional & ggI, const ::std::optional & ggW, const ::std::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + auto [gO_value, gO_bdim] = unwrapTensorAtLevel(gO, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional ggI_value; + std::optional ggI_bdim; + if (ggI) { + std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level); + } + std::optional ggW_value; + std::optional ggW_bdim; + if (ggW) { + std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level); + } + std::optional ggb_value; + std::optional ggb_bdim; + if (ggb) { + std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level); + } + auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_tbc::call(self, weight, bias, pad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy::call(self, src, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_::call(self, src, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return self; +} +template +at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) { + return at::_ops::_copy_from::call(self, dst, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [dst_value, dst_bdim] = unwrapTensorAtLevel(dst, cur_level); + auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) { + return at::_ops::_copy_from_and_resize::call(self, dst); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [dst_value, dst_bdim] = unwrapTensorAtLevel(dst, cur_level); + auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cos_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cos::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cos__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cos_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor cosh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cosh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cosh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cosh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction); + } + auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::count_nonzero_dim_IntList::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, ::std::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::count_nonzero::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const ::std::optional & fweights, const ::std::optional & aweights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) { + return at::_ops::cov::call(self, correction, fweights, aweights); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional fweights_value; + std::optional fweights_bdim; + if (fweights) { + std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level); + } + std::optional aweights_value; + std::optional aweights_bdim; + if (aweights) { + std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::corrcoef::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(theta, cur_level)) { + return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W); + } + auto [theta_value, theta_bdim] = unwrapTensorAtLevel(theta, cur_level); + auto results = batch_rule(theta_value, theta_bdim, N, C, H, W); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, N, C, H, W); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) { + return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [reserveSpace_value, reserveSpace_bdim] = unwrapTensorAtLevel(reserveSpace, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + std::optional save_mean_value; + std::optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + std::optional save_var_value; + std::optional save_var_bdim; + if (save_var) { + std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional & alpha, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [z_value, z_bdim] = unwrapTensorAtLevel(z, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::cudnn_grid_sampler::call(self, grid); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummax_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummax::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummax_dimname::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_cummax_helper::call(self, values, indices, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim); +} +template +::std::tuple cummin_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummin::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cummin_dimname::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_cummin_helper::call(self, values, indices, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim); +} +template +at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::cummaxmin_backward::call(grad, input, indices, dim); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod_::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod_dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumprod__dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::cumprod_backward::call(grad, input, dim, output); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum_::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum_dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cumsum__dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, dtype); + return self; +} +template +at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::cumulative_trapezoid_x::call(y, x, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level); + auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) { + return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) { + return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level); + auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level); + auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) { + return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto [neg_log_likelihood_value, neg_log_likelihood_bdim] = unwrapTensorAtLevel(neg_log_likelihood, cur_level); + auto [log_alpha_value, log_alpha_bdim] = unwrapTensorAtLevel(log_alpha, cur_level); + auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) { + return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [log_probs_value, log_probs_bdim] = unwrapTensorAtLevel(log_probs, cur_level); + auto [targets_value, targets_bdim] = unwrapTensorAtLevel(targets, cur_level); + auto [input_lengths_value, input_lengths_bdim] = unwrapTensorAtLevel(input_lengths, cur_level); + auto [target_lengths_value, target_lengths_bdim] = unwrapTensorAtLevel(target_lengths, cur_level); + auto [neg_log_likelihood_value, neg_log_likelihood_bdim] = unwrapTensorAtLevel(neg_log_likelihood, cur_level); + auto [log_alpha_value, log_alpha_bdim] = unwrapTensorAtLevel(log_alpha, cur_level); + auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diag_embed::call(self, offset, dim1, dim2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagflat::call(self, offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal::call(self, offset, dim1, dim2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill_diagonal_::call(self, fill_value, wrap); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, fill_value, wrap); + return self; +} +template +at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional & prepend, const ::std::optional & append) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) { + return at::_ops::diff::call(self, n, dim, prepend, append); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional prepend_value; + std::optional prepend_bdim; + if (prepend) { + std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level); + } + std::optional append_value; + std::optional append_bdim; + if (append) { + std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarint_generated_plumbing(const at::Tensor & self, const ::std::optional & spacing, ::std::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_array::call(self, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, ::std::optional dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) { + return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) { + return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div_Tensor_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::div__Tensor_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return self; +} +template +at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div_Scalar_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::div__Scalar_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, rounding_mode); + return self; +} +template +at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode); + return self; +} +template +at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, rounding_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, rounding_mode); + return self; +} +template +at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::true_divide_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::true_divide__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::true_divide_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::true_divide__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) { + return at::_ops::dot::call(self, tensor); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor_value, tensor_bdim] = unwrapTensorAtLevel(tensor, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::vdot::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::einsum::call(equation, tensors, path); + } + + auto results = batch_rule(equation, tensors, path); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type); + return self; +} +template +at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor row_stack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::row_stack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional & per_sample_weights, bool include_last_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional & per_sample_weights, bool include_last_offset, ::std::optional padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level); + auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level); + auto [maximum_indices_value, maximum_indices_bdim] = unwrapTensorAtLevel(maximum_indices, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level); + auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional & per_sample_weights, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) { + return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level); + auto [bag_size_value, bag_size_bdim] = unwrapTensorAtLevel(bag_size, cur_level); + auto [maximum_indices_value, maximum_indices_bdim] = unwrapTensorAtLevel(maximum_indices, cur_level); + std::optional per_sample_weights_value; + std::optional per_sample_weights_bdim; + if (per_sample_weights) { + std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) { + return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto [offset2bag_value, offset2bag_bdim] = unwrapTensorAtLevel(offset2bag, cur_level); + auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } + auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level); + auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_resize_output_::call(self, size, device); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, device); + return self; +} +template +at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(qtensor, cur_level)) { + return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format); + } + auto [qtensor_value, qtensor_bdim] = unwrapTensorAtLevel(qtensor, cur_level); + auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor empty_like_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor erf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erf__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erf_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor erfc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfc::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erfc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfc_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & exp__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor exp2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp2::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & exp2__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exp2_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor expm1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expm1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & expm1__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expm1_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expand::call(self, size, implicit); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, implicit); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::expand_as::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_using_ints::call(self, start_dim, end_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flatten_DimnameList::call(self, dims, out_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unflatten_int::call(self, dim, sizes); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unflatten_Dimname::call(self, dim, sizes, names); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sizes, names); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill_Scalar::call(self, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::fill_Tensor::call(self, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fill__Scalar::call(self, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, value); + return self; +} +template +at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::fill__Tensor::call(self, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor floor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::floor_divide::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::floor_divide__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_divide_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::floor_divide__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor frac_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frac::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & frac__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frac_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gcd::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gcd_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lcm::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lcm_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) { + return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grid_value, grid_bdim] = unwrapTensorAtLevel(grid, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_group_norm_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [rstd_value, rstd_bdim] = unwrapTensorAtLevel(rstd, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_r2c::call(self, dim, normalization, onesided); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fft_c2c::call(self, dim, normalization, forward); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, normalization, forward); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) { + return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz); + } + auto [compressed_idx_value, compressed_idx_bdim] = unwrapTensorAtLevel(compressed_idx, cur_level); + auto [plain_idx_value, plain_idx_bdim] = unwrapTensorAtLevel(plain_idx, cur_level); + batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz); +} +template +at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional> & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::index_Tensor::call(self, indices); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional> & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_unsafe_index_Tensor::call(self, indices); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_masked_index_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional> & indices, const at::Scalar & fill) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::_unsafe_masked_index::call(self, mask, indices, fill); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, indices, fill); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_masked_index_put_accumulate_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional> & indices, const at::Tensor & values) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_unsafe_masked_index_put_accumulate::call(self, mask, indices, values); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, indices, values_value, values_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy_::call(self, dim, index, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy::call(self, dim, index, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy__dimname::call(self, dim, index, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_copy_dimname::call(self, dim, index, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::index_put_::call(self, indices, values, accumulate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return self; +} +template +at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::index_put::call(self, indices, values, accumulate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _unsafe_index_put_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe); + return self; +} +template +at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) { + return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert); + } + auto [elements_value, elements_bdim] = unwrapTensorAtLevel(elements, cur_level); + auto [test_elements_value, test_elements_bdim] = unwrapTensorAtLevel(test_elements, cur_level); + auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(elements, cur_level)) { + return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert); + } + auto [elements_value, elements_bdim] = unwrapTensorAtLevel(elements, cur_level); + auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(test_elements, cur_level)) { + return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert); + } + auto [test_elements_value, test_elements_bdim] = unwrapTensorAtLevel(test_elements, cur_level); + auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isnan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isnan::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isreal_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isreal::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::kl_div::call(self, target, reduction, log_target); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::kron::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::kthvalue::call(self, k, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, bool cudnn_enable) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [rstd_value, rstd_bdim] = unwrapTensorAtLevel(rstd, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor rms_norm_generated_plumbing(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, ::std::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::rms_norm::call(input, normalized_shape, weight, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, ::std::optional nan, ::std::optional posinf, ::std::optional neginf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nan_to_num::call(self, nan, posinf, neginf); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, ::std::optional nan, ::std::optional posinf, ::std::optional neginf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nan_to_num_::call(self, nan, posinf, neginf); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, nan, posinf, neginf); + return self; +} +template +at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::linear::call(input, weight, bias); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::linear_backward::call(self, grad_output, weight, output_mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::mkldnn_linear::call(self, weight, bias); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _cslt_compress_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_cslt_compress::call(input); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cslt_sparse_mm_generated_plumbing(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional & bias, const ::std::optional & alpha, ::std::optional out_dtype, bool transpose_result, int64_t alg_id) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_A, cur_level) && !isBatchedAtLevel(dense_B, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(alpha, cur_level)) { + return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); + } + auto [compressed_A_value, compressed_A_bdim] = unwrapTensorAtLevel(compressed_A, cur_level); + auto [dense_B_value, dense_B_bdim] = unwrapTensorAtLevel(dense_B, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional alpha_value; + std::optional alpha_bdim; + if (alpha) { + std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha.value(), cur_level); + } + auto results = batch_rule(compressed_A_value, compressed_A_bdim, dense_B_value, dense_B_bdim, bias_value, bias_bdim, alpha_value, alpha_bdim, out_dtype, transpose_result, alg_id); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _sparse_semi_structured_tile_generated_plumbing(const at::Tensor & input, c10::string_view algorithm, bool use_cutlass) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_sparse_semi_structured_tile::call(input, algorithm, use_cutlass); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, algorithm, use_cutlass); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _sparse_semi_structured_apply_generated_plumbing(const at::Tensor & input, const at::Tensor & thread_masks) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(thread_masks, cur_level)) { + return at::_ops::_sparse_semi_structured_apply::call(input, thread_masks); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [thread_masks_value, thread_masks_bdim] = unwrapTensorAtLevel(thread_masks, cur_level); + auto results = batch_rule(input_value, input_bdim, thread_masks_value, thread_masks_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _sparse_semi_structured_apply_dense_generated_plumbing(const at::Tensor & input, const at::Tensor & thread_masks) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(thread_masks, cur_level)) { + return at::_ops::_sparse_semi_structured_apply_dense::call(input, thread_masks); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [thread_masks_value, thread_masks_bdim] = unwrapTensorAtLevel(thread_masks, cur_level); + auto results = batch_rule(input_value, input_bdim, thread_masks_value, thread_masks_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_semi_structured_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional & bias, ::std::optional activation, ::std::optional out_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(meta, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_sparse_semi_structured_linear::call(input, weight, meta, bias, activation, out_dtype); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [meta_value, meta_bdim] = unwrapTensorAtLevel(meta, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, meta_value, meta_bdim, bias_value, bias_bdim, activation, out_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_semi_structured_mm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, ::std::optional out_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat1_meta, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_sparse_semi_structured_mm::call(mat1, mat1_meta, mat2, out_dtype); + } + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat1_meta_value, mat1_meta_bdim] = unwrapTensorAtLevel(mat1_meta, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(mat1_value, mat1_bdim, mat1_meta_value, mat1_meta_bdim, mat2_value, mat2_bdim, out_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_semi_structured_addmm_generated_plumbing(const at::Tensor & input, const at::Tensor & mat1, const at::Tensor & mat1_meta, const at::Tensor & mat2, const at::Scalar & alpha, const at::Scalar & beta, ::std::optional out_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat1_meta, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_sparse_semi_structured_addmm::call(input, mat1, mat1_meta, mat2, alpha, beta, out_dtype); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat1_meta_value, mat1_meta_bdim] = unwrapTensorAtLevel(mat1_meta, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(input_value, input_bdim, mat1_value, mat1_bdim, mat1_meta_value, mat1_meta_bdim, mat2_value, mat2_bdim, alpha, beta, out_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mixed_dtypes_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const ::std::optional & bias, ::std::optional activation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_mixed_dtypes_linear::call(input, weight, scale, bias, activation); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, scale_value, scale_bdim, bias_value, bias_bdim, activation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [packed_value, packed_bdim] = unwrapTensorAtLevel(packed, cur_level); + auto [col_offsets_value, col_offsets_bdim] = unwrapTensorAtLevel(col_offsets, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [packed_value, packed_bdim] = unwrapTensorAtLevel(packed, cur_level); + auto [col_offsets_value, col_offsets_bdim] = unwrapTensorAtLevel(col_offsets, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _wrapped_linear_prepack_generated_plumbing(const at::Tensor & weight, const at::Tensor & weight_scale, const at::Tensor & weight_zero_point, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_scale, cur_level) && !isBatchedAtLevel(weight_zero_point, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_wrapped_linear_prepack::call(weight, weight_scale, weight_zero_point, bias); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [weight_scale_value, weight_scale_bdim] = unwrapTensorAtLevel(weight_scale, cur_level); + auto [weight_zero_point_value, weight_zero_point_bdim] = unwrapTensorAtLevel(weight_zero_point, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(weight_value, weight_bdim, weight_scale_value, weight_scale_bdim, weight_zero_point_value, weight_zero_point_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _wrapped_quantized_linear_prepacked_generated_plumbing(const at::Tensor & input, const at::Tensor & input_scale, const at::Tensor & input_zero_point, const at::Tensor & packed_weight, const at::Tensor & output_scale, const at::Tensor & output_zero_point, int64_t out_channel) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(input_scale, cur_level) && !isBatchedAtLevel(input_zero_point, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(output_scale, cur_level) && !isBatchedAtLevel(output_zero_point, cur_level)) { + return at::_ops::_wrapped_quantized_linear_prepacked::call(input, input_scale, input_zero_point, packed_weight, output_scale, output_zero_point, out_channel); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [input_scale_value, input_scale_bdim] = unwrapTensorAtLevel(input_scale, cur_level); + auto [input_zero_point_value, input_zero_point_bdim] = unwrapTensorAtLevel(input_zero_point, cur_level); + auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level); + auto [output_scale_value, output_scale_bdim] = unwrapTensorAtLevel(output_scale, cur_level); + auto [output_zero_point_value, output_zero_point_bdim] = unwrapTensorAtLevel(output_zero_point, cur_level); + auto results = batch_rule(input_value, input_bdim, input_scale_value, input_scale_bdim, input_zero_point_value, input_zero_point_bdim, packed_weight_value, packed_weight_bdim, output_scale_value, output_scale_bdim, output_zero_point_value, output_zero_point_bdim, out_channel); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [packed_weight_value, packed_weight_bdim] = unwrapTensorAtLevel(packed_weight, cur_level); + auto [bias_value, bias_bdim] = unwrapTensorAtLevel(bias, cur_level); + auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_quantized_matrix::call(input); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, K, N); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ldexp_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ldexp_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor linspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); + } + auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level)) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory); + } + auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(start_value, start_bdim, end, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(end, cur_level)) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); + } + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start, end_value, end_bdim, steps, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log10_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log10::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log10__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log10_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log1p_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log1p::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log1p__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log1p_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor log2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log2::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & log2__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log2_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logaddexp::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::logaddexp2::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy_Scalar_Self::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::xlogy_Scalar_Other::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::xlogy__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::xlogy__Scalar_Other::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor logspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(start, cur_level)) { + return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(start_value, start_bdim, end, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(end, cur_level)) { + return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory); + } + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(start, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_softmax_int::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_softmax_Dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_log_softmax::call(self, dim, half_to_float); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_logcumsumexp::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logcumsumexp::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logcumsumexp_dimname::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logsumexp::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logsumexp_names::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction); + } + auto [input1_value, input1_bdim] = unwrapTensorAtLevel(input1, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::matmul::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::matmul_backward::call(grad, self, other, mask); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_power::call(self, n); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_exp::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) { + return at::_ops::matrix_exp_backward::call(self, grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _aminmax_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_aminmax::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_aminmax_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple aminmax_generated_plumbing(const at::Tensor & self, ::std::optional dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::aminmax::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) { + return at::_ops::_compute_linear_combination::call(input, coefficients); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [coefficients_value, coefficients_bdim] = unwrapTensorAtLevel(coefficients, cur_level); + auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_names_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::amax::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_generated_plumbing(const at::Tensor & self, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean_dim::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmean::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor median_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::median_names_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanmedian_names_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min_names_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::amin::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) { + return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight0_value, weight0_bdim] = unwrapTensorAtLevel(weight0, cur_level); + auto [weight1_value, weight1_bdim] = unwrapTensorAtLevel(weight1, cur_level); + auto [weight2_value, weight2_bdim] = unwrapTensorAtLevel(weight2, cur_level); + auto [weight3_value, weight3_bdim] = unwrapTensorAtLevel(weight3, cur_level); + auto [hx__value, hx__bdim] = unwrapTensorAtLevel(hx_, cur_level); + auto [cx__value, cx__bdim] = unwrapTensorAtLevel(cx_, cur_level); + auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight1_value, weight1_bdim] = unwrapTensorAtLevel(weight1, cur_level); + auto [weight2_value, weight2_bdim] = unwrapTensorAtLevel(weight2, cur_level); + auto [weight3_value, weight3_bdim] = unwrapTensorAtLevel(weight3, cur_level); + auto [weight4_value, weight4_bdim] = unwrapTensorAtLevel(weight4, cur_level); + auto [hx__value, hx__bdim] = unwrapTensorAtLevel(hx_, cur_level); + auto [cx_tmp_value, cx_tmp_bdim] = unwrapTensorAtLevel(cx_tmp, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [hy__value, hy__bdim] = unwrapTensorAtLevel(hy_, cur_level); + auto [cy__value, cy__bdim] = unwrapTensorAtLevel(cy_, cur_level); + auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level); + std::optional grad_output_value; + std::optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level)); +} +template +::std::tuple miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, double epsilon) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) { + return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + std::optional save_mean_value; + std::optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + std::optional save_var_value; + std::optional save_var_bdim; + if (save_var) { + std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const ::std::optional & alpha, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [z_value, z_bdim] = unwrapTensorAtLevel(z, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const ::std::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) { + return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) { + return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level); + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional grad_output_value; + std::optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::mm::call(self, mat2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _int_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_int_mm::call(self, mat2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_weight_to_int4pack_generated_plumbing(const at::Tensor & self, int64_t innerKTiles) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convert_weight_to_int4pack::call(self, innerKTiles); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, innerKTiles); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_int4pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScaleAndZeros, cur_level)) { + return at::_ops::_weight_int4pack_mm::call(self, mat2, qGroupSize, qScaleAndZeros); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto [qScaleAndZeros_value, qScaleAndZeros_bdim] = unwrapTensorAtLevel(qScaleAndZeros, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScaleAndZeros_value, qScaleAndZeros_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_int8pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scales, cur_level)) { + return at::_ops::_weight_int8pack_mm::call(self, mat2, scales); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scales_value, scales_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_sparse_mm::call(sparse, dense); + } + auto [sparse_value, sparse_bdim] = unwrapTensorAtLevel(sparse, cur_level); + auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce); + } + auto [sparse_value, sparse_bdim] = unwrapTensorAtLevel(sparse, cur_level); + auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_sparse_sparse_matmul::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mode::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mode_dimname::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::mul_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::mul__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mul_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mul__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::multiply_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::multiply__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multiply_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multiply__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) { + return at::_ops::mv::call(self, vec); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [vec_value, vec_bdim] = unwrapTensorAtLevel(vec, cur_level); + auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mvlgamma::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mvlgamma_::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p); + return self; +} +template +at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::narrow_copy::call(self, dim, start, length); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::narrow::call(self, dim, start, length); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) { + return at::_ops::narrow_Tensor::call(self, dim, start, length); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [start_value, start_bdim] = unwrapTensorAtLevel(start, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple native_batch_norm_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _native_batch_norm_legit_no_training_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_native_batch_norm_legit_no_training::call(input, weight, bias, running_mean, running_var, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level); + auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::batch_norm_stats::call(input, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) { + return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, int64_t count) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) { + return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level); + auto [counts_value, counts_bdim] = unwrapTensorAtLevel(counts, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) { + return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + std::optional save_mean_value; + std::optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + std::optional save_invstd_value; + std::optional save_invstd_bdim; + if (save_invstd) { + std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & weight, bool input_g, bool weight_g, bool bias_g) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(sum_dy, cur_level) && !isBatchedAtLevel(sum_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) { + return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [invstd_value, invstd_bdim] = unwrapTensorAtLevel(invstd, cur_level); + auto [sum_dy_value, sum_dy_bdim] = unwrapTensorAtLevel(sum_dy, cur_level); + auto [sum_dy_xmu_value, sum_dy_xmu_bdim] = unwrapTensorAtLevel(sum_dy_xmu, cur_level); + auto [count_value, count_bdim] = unwrapTensorAtLevel(count, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, sum_dy_value, sum_dy_bdim, sum_dy_xmu_value, sum_dy_xmu_bdim, count_value, count_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ones_like_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim); + } + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional compute_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::cdist::call(x1, x2, p, compute_mode); + } + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::_euclidean_dist::call(x1, x2); + } + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, ::std::optional compute_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode); + } + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) { + return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto [cdist_value, cdist_bdim] = unwrapTensorAtLevel(cdist, cur_level); + auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pdist::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pdist_forward::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) { + return at::_ops::_pdist_backward::call(grad, self, p, pdist); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [pdist_value, pdist_bdim] = unwrapTensorAtLevel(pdist, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) { + return at::_ops::cosine_similarity::call(x1, x2, dim, eps); + } + auto [x1_value, x1_bdim] = unwrapTensorAtLevel(x1, cur_level); + auto [x2_value, x2_bdim] = unwrapTensorAtLevel(x2, cur_level); + auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::permute::call(self, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::movedim_intlist::call(self, source, destination); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::movedim_int::call(self, source, destination); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::moveaxis_intlist::call(self, source, destination); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::moveaxis_int::call(self, source, destination); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, destination); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::numpy_T::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::matrix_H::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mT_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mT::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mH_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mH::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adjoint_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adjoint::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pixel_shuffle::call(self, upscale_factor); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upscale_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pixel_unshuffle::call(self, downscale_factor); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, downscale_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::channel_shuffle::call(self, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_channel_shuffle::call(self, groups); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, groups); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, ::std::optional device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pin_memory::call(self, device); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, ::std::optional device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pin_memory::call(self, device); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pinverse::call(self, rcond); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rad2deg::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rad2deg_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::deg2rad::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::deg2rad_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor rand_like_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randint_like_generated_plumbing(const at::Tensor & self, c10::SymInt high, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, c10::SymInt low, c10::SymInt high, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor randn_like_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ravel_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ravel::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reciprocal::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reciprocal_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor neg_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::neg::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & neg__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::neg_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor negative_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::negative::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & negative__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::negative_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::repeat::call(self, repeats); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, ::std::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(repeats, cur_level)) { + return at::_ops::repeat_interleave_Tensor::call(repeats, output_size); + } + auto [repeats_value, repeats_bdim] = unwrapTensorAtLevel(repeats, cur_level); + auto results = batch_rule(repeats_value, repeats_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, ::std::optional dim, ::std::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) { + return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [repeats_value, repeats_bdim] = unwrapTensorAtLevel(repeats, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, ::std::optional dim, ::std::optional output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reshape::call(self, shape); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shape); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_copy::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_alias::call(self, size, stride); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_reshape::call(self, shape); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shape); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::reshape_as::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor round_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & round__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round_decimals::call(self, decimals); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, decimals); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::round__decimals::call(self, decimals); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, decimals); + return self; +} +template +at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rrelu::call(self, lower, upper, training, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rrelu_::call(self, lower, upper, training, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, lower, upper, training, generator); + return self; +} +template +at::Tensor relu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & relu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor relu6_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu6::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & relu6__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::relu6_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::prelu::call(self, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_prelu_kernel::call(self, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu_::call(self, approximate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, approximate); + return self; +} +template +at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu::call(self, approximate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, approximate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::gelu_backward::call(grad_output, self, approximate); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardshrink::call(self, lambd); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardshrink_backward::call(grad_out, self, lambd); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsqrt::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsqrt_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_Dimname::call(self, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_int::call(self, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::select_backward::call(grad_output, input_sizes, dim, index); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_select_backward::call(grad_output, self, dim, index); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor selu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::selu::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & selu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::selu_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::celu::call(self, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::celu_::call(self, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, alpha); + return self; +} +template +at::Tensor silu_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & silu__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::silu_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mish_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & mish__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::mish_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sigmoid::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sigmoid_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor logit_generated_plumbing(const at::Tensor & self, ::std::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit::call(self, eps); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & logit__generated_plumbing(at::Tensor & self, ::std::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit_::call(self, eps); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, eps); + return self; +} +template +at::Tensor sin_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sin::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sin__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sin_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sinc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinc::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sinc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinc_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sinh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sinh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sinh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor detach_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::detach::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slice_Tensor::call(self, dim, start, end, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_inverse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::slice_inverse::call(self, src, dim, start, end, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::slice_scatter::call(self, src, dim, start, end, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::select_scatter::call(self, src, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::smm::call(self, mat2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softmax_int::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softmax_Dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_softmax::call(self, dim, half_to_float); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_split_Tensor::call(self, split_size, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_Tensor::call(self, split_size, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_sizes::call(self, split_size, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hsplit_int::call(self, sections); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hsplit_array::call(self, indices); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::vsplit_int::call(self, sections); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::vsplit_array::call(self, indices); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dsplit_int::call(self, sections); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sections); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dsplit_array::call(self, indices); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, indices); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dim::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dimname::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_dims::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _chunk_cat_generated_plumbing(at::TensorList tensors, int64_t dim, int64_t num_chunks) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_chunk_cat::call(tensors, dim, num_chunks); + } + + auto results = batch_rule(tensors, dim, num_chunks); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::stack::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_stack::call(tensors, dim); + } + + auto results = batch_rule(tensors, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::hstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::vstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dstack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::dstack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool normalized, ::std::optional onesided, ::std::optional return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional window_value; + std::optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, c10::string_view pad_mode, bool normalized, ::std::optional onesided, ::std::optional return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional window_value; + std::optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool center, bool normalized, ::std::optional onesided, ::std::optional length, bool return_complex) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional window_value; + std::optional window_bdim; + if (window) { + std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_generated_plumbing(const at::Tensor & self, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nansum::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sum_to_size::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sqrt_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sqrt::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sqrt__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sqrt_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor square_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::square::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & square__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::square_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std::call(self, unbiased); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_correction::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean::call(self, unbiased); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_correction::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::std_correction_names::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_generated_plumbing(const at::Tensor & self, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor t_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::t::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tan_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tan::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tan__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tan_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor tanh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tanh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tanh__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tanh_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::tensordot::call(self, other, dims_self, dims_other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold::call(self, threshold, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, threshold, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold_::call(self, threshold, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, threshold, value); + return self; +} +template +at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::threshold_backward::call(grad_output, self, threshold); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tile_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tile::call(self, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_int::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_Dimname::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_transpose::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim0, dim1); + return self; +} +template +at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::one_hot::call(self, num_classes); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_classes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flip::call(self, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fliplr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fliplr::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flipud_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::flipud::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor roll_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::roll::call(self, shifts, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, shifts, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rot90::call(self, k, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::trapezoid_x::call(y, x, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::trapezoid_dx::call(y, dx, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) { + return at::_ops::trapz_x::call(y, x, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(y, cur_level)) { + return at::_ops::trapz_dx::call(y, dx, dim); + } + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(y_value, y_bdim, dx, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) { + return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads); + } + auto [qkv_value, qkv_bdim] = unwrapTensorAtLevel(qkv, cur_level); + auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level); + auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check); + } + auto [t_value, t_bdim] = unwrapTensorAtLevel(t, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) { + return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213); + } + auto [padded_value, padded_bdim] = unwrapTensorAtLevel(padded, cur_level); + auto [cpu_nested_shape_example_value, cpu_nested_shape_example_bdim] = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level); + auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_size::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_strides::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_storage_offsets_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_tensor_storage_offsets::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) { + return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example); + } + auto [padded_value, padded_bdim] = unwrapTensorAtLevel(padded, cur_level); + auto [nt_example_value, nt_example_bdim] = unwrapTensorAtLevel(nt_example, cur_level); + auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level); + auto [nested_strides_value, nested_strides_bdim] = unwrapTensorAtLevel(nested_strides, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level); + auto [nested_strides_value, nested_strides_bdim] = unwrapTensorAtLevel(nested_strides, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_jagged_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(min_seqlen, cur_level) && !isBatchedAtLevel(max_seqlen, cur_level)) { + return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level); + std::optional lengths_value; + std::optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + std::optional min_seqlen_value; + std::optional min_seqlen_bdim; + if (min_seqlen) { + std::tie(min_seqlen_value, min_seqlen_bdim) = unwrapTensorAtLevel(min_seqlen.value(), cur_level); + } + std::optional max_seqlen_value; + std::optional max_seqlen_bdim; + if (max_seqlen) { + std::tie(max_seqlen_value, max_seqlen_bdim) = unwrapTensorAtLevel(max_seqlen.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx, min_seqlen_value, min_seqlen_bdim, max_seqlen_value, max_seqlen_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_view_from_jagged_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(min_seqlen, cur_level) && !isBatchedAtLevel(max_seqlen, cur_level)) { + return at::_ops::_nested_view_from_jagged_copy::call(self, offsets, dummy, lengths, ragged_idx, min_seqlen, max_seqlen); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level); + std::optional lengths_value; + std::optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + std::optional min_seqlen_value; + std::optional min_seqlen_bdim; + if (min_seqlen) { + std::tie(min_seqlen_value, min_seqlen_bdim) = unwrapTensorAtLevel(min_seqlen.value(), cur_level); + } + std::optional max_seqlen_value; + std::optional max_seqlen_bdim; + if (max_seqlen) { + std::tie(max_seqlen_value, max_seqlen_bdim) = unwrapTensorAtLevel(max_seqlen.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx, min_seqlen_value, min_seqlen_bdim, max_seqlen_value, max_seqlen_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_values::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_values_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_offsets_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_offsets::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_lengths_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_lengths::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_min_seqlen_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_min_seqlen::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_max_seqlen_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_nested_get_max_seqlen::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_get_jagged_dummy_generated_plumbing(const at::Tensor & any) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(any, cur_level)) { + return at::_ops::_nested_get_jagged_dummy::call(any); + } + auto [any_value, any_bdim] = unwrapTensorAtLevel(any, cur_level); + auto results = batch_rule(any_value, any_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _nested_compute_contiguous_strides_offsets_generated_plumbing(const at::Tensor & nested_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(nested_size, cur_level)) { + return at::_ops::_nested_compute_contiguous_strides_offsets::call(nested_size); + } + auto [nested_size_value, nested_size_bdim] = unwrapTensorAtLevel(nested_size, cur_level); + auto results = batch_rule(nested_size_value, nested_size_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) { + return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); + } + auto [i1_value, i1_bdim] = unwrapTensorAtLevel(i1, cur_level); + auto [i2_value, i2_bdim] = unwrapTensorAtLevel(i2, cur_level); + auto [i3_value, i3_bdim] = unwrapTensorAtLevel(i3, cur_level); + auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) { + return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction); + } + auto [anchor_value, anchor_bdim] = unwrapTensorAtLevel(anchor, cur_level); + auto [positive_value, positive_bdim] = unwrapTensorAtLevel(positive, cur_level); + auto [negative_value, negative_bdim] = unwrapTensorAtLevel(negative, cur_level); + auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trunc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trunc::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & trunc__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trunc_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor fix_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fix::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fix__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fix_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::type_as::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unique::call(self, sorted, return_inverse); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sorted, return_inverse); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_unsafe_view::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsqueeze::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor vander_generated_plumbing(const at::Tensor & x, ::std::optional N, bool increasing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::vander::call(x, N, increasing); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, N, increasing); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var::call(self, unbiased); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_correction::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_correction_names::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean::call(self, unbiased); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, unbiased); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_correction::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::view_as::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::where_self::call(condition, self, other); + } + auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::where_ScalarSelf::call(condition, self, other); + } + auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::where_ScalarOther::call(condition, self, other); + } + auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level)) { + return at::_ops::where_Scalar::call(condition, self, other); + } + auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level); + auto results = batch_rule(condition_value, condition_bdim, self, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector where_generated_plumbing(const at::Tensor & condition) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(condition, cur_level)) { + return at::_ops::where::call(condition); + } + auto [condition_value, condition_bdim] = unwrapTensorAtLevel(condition, cur_level); + auto results = batch_rule(condition_value, condition_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level)) { + return at::_ops::norm_except_dim::call(v, pow, dim); + } + auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level); + auto results = batch_rule(v_value, v_bdim, pow, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) { + return at::_ops::_weight_norm::call(v, g, dim); + } + auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level); + auto [g_value, g_bdim] = unwrapTensorAtLevel(g, cur_level); + auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) { + return at::_ops::_weight_norm_interface::call(v, g, dim); + } + auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level); + auto [g_value, g_bdim] = unwrapTensorAtLevel(g, cur_level); + auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) { + return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); + } + auto [grad_w_value, grad_w_bdim] = unwrapTensorAtLevel(grad_w, cur_level); + auto [saved_v_value, saved_v_bdim] = unwrapTensorAtLevel(saved_v, cur_level); + auto [saved_g_value, saved_g_bdim] = unwrapTensorAtLevel(saved_g, cur_level); + auto [saved_norms_value, saved_norms_bdim] = unwrapTensorAtLevel(saved_norms, cur_level); + auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) { + return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); + } + auto [grad_w_value, grad_w_bdim] = unwrapTensorAtLevel(grad_w, cur_level); + auto [saved_v_value, saved_v_bdim] = unwrapTensorAtLevel(saved_v, cur_level); + auto [saved_g_value, saved_g_bdim] = unwrapTensorAtLevel(saved_g, cur_level); + auto [saved_norms_value, saved_norms_bdim] = unwrapTensorAtLevel(saved_norms, cur_level); + auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_standard_gamma_grad::call(self, output); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(self_value, self_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_standard_gamma::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) { + return at::_ops::_dirichlet_grad::call(x, alpha, total); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [alpha_value, alpha_bdim] = unwrapTensorAtLevel(alpha, cur_level); + auto [total_value, total_bdim] = unwrapTensorAtLevel(total, cur_level); + auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sample_dirichlet::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor poisson_generated_plumbing(const at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::poisson::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) { + return at::_ops::binomial::call(count, prob, generator); + } + auto [count_value, count_bdim] = unwrapTensorAtLevel(count, cur_level); + auto [prob_value, prob_bdim] = unwrapTensorAtLevel(prob, cur_level); + auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_norm::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _batch_norm_no_update_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_batch_norm_no_update::call(input, weight, bias, running_mean, running_var, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserve, cur_level)) { + return at::_ops::batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level); + std::optional running_mean_value; + std::optional running_mean_bdim; + if (running_mean) { + std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level); + } + std::optional running_var_value; + std::optional running_var_bdim; + if (running_var) { + std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level); + } + std::optional save_mean_value; + std::optional save_mean_bdim; + if (save_mean) { + std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level); + } + std::optional save_var_value; + std::optional save_var_bdim; + if (save_var) { + std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, update, eps, output_mask, reserve_value, reserve_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dtype::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dim::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_sum_backward::call(grad, self, dim); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_int::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax::call(self, dim, half_to_float); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, half_to_float); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, ::std::optional layout) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_spdiags::call(diagonals, offsets, shape, layout); + } + auto [diagonals_value, diagonals_bdim] = unwrapTensorAtLevel(diagonals, cur_level); + auto [offsets_value, offsets_bdim] = unwrapTensorAtLevel(offsets, cur_level); + auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_Scalar::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple frexp_Tensor_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frexp_Tensor::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::frobenius_norm_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nuclear_norm::call(self, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nuclear_norm_dim::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor clone_generated_plumbing(const at::Tensor & self, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::clone::call(self, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor positive_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::positive::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as_sparse_::call(self, the_template); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level); + batch_rule(self_value, self_bdim, the_template_value, the_template_bdim); + return self; +} +template +at::Tensor & zero__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zero_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::sub_Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::sub__Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sub_Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sub__Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::subtract_Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::subtract__Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return self; +} +template +at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::subtract_Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::subtract__Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other, alpha); + return self; +} +template +at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::rsub_Tensor::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::heaviside::call(self, values); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, values_value, values_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::heaviside_::call(self, values); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(self_value, self_bdim, values_value, values_bdim); + return self; +} +template +at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::rsub_Scalar::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) { + return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto [arg_out_value, arg_out_bdim] = unwrapTensorAtLevel(arg_out, cur_level); + auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::addmm::call(self, mat1, mat2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha); + return self; +} +template +at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _scaled_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scale_a, const at::Tensor & scale_b, const ::std::optional & bias, const ::std::optional & scale_result, ::std::optional out_dtype, bool use_fast_accum) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scale_a, cur_level) && !isBatchedAtLevel(scale_b, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(scale_result, cur_level)) { + return at::_ops::_scaled_mm::call(self, mat2, scale_a, scale_b, bias, scale_result, out_dtype, use_fast_accum); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto [scale_a_value, scale_a_bdim] = unwrapTensorAtLevel(scale_a, cur_level); + auto [scale_b_value, scale_b_bdim] = unwrapTensorAtLevel(scale_b, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional scale_result_value; + std::optional scale_result_bdim; + if (scale_result) { + std::tie(scale_result_value, scale_result_bdim) = unwrapTensorAtLevel(scale_result.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scale_a_value, scale_a_bdim, scale_b_value, scale_b_bdim, bias_value, bias_bdim, scale_result_value, scale_result_bdim, out_dtype, use_fast_accum); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level); + auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); + } + auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level); + auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level); + auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size, is_coalesced); + } + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, is_coalesced); +} +template +void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout); + } + auto [compressed_indices_value, compressed_indices_bdim] = unwrapTensorAtLevel(compressed_indices, cur_level); + auto [plain_indices_value, plain_indices_bdim] = unwrapTensorAtLevel(plain_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout); +} +template +void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size); +} +template +void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size); + } + auto [ccol_indices_value, ccol_indices_bdim] = unwrapTensorAtLevel(ccol_indices, cur_level); + auto [row_indices_value, row_indices_bdim] = unwrapTensorAtLevel(row_indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size); +} +template +at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional is_coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced); + } + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return self; +} +template +const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return self; +} +template +at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::sparse_mask::call(self, mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_mask_projection_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_sparse_mask_projection::call(self, mask, accumulate_matches); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, accumulate_matches); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _to_cpu_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::_to_cpu::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dense_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dense::call(self, dtype, masked_grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_dense::call(self, dtype, masked_grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, ::std::optional masked_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::to_dense_backward::call(grad, input, masked_grad); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, masked_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor coalesce_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::coalesce::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesce::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_values::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesced_::call(self, coalesced); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, coalesced); + return self; +} +template +at::Tensor indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor values_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::values::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::crow_indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col_indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ccol_indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor row_indices_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::row_indices::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) { + return at::_ops::hspmm::call(mat1, mat2); + } + auto [mat1_value, mat1_bdim] = unwrapTensorAtLevel(mat1, cur_level); + auto [mat2_value, mat2_bdim] = unwrapTensorAtLevel(mat2, cur_level); + auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return self; +} +template +::std::vector unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_int::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_Dimname::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sparse_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_sparse_dim::call(self, sparse_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, sparse_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, ::std::optional layout, at::OptionalIntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_generated_plumbing(const at::Tensor & self, ::std::optional layout, at::OptionalIntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse::call(self, layout, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_csr::call(self, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_csr_generated_plumbing(const at::Tensor & self, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_csr::call(self, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_csc::call(self, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_csc_generated_plumbing(const at::Tensor & self, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_csc::call(self, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_bsr::call(self, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_sparse_bsc::call(self, blocksize, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _to_sparse_semi_structured_generated_plumbing(const at::Tensor & dense) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dense, cur_level)) { + return at::_ops::_to_sparse_semi_structured::call(dense); + } + auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(dense_value, dense_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_mkldnn::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups, input_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) { + return at::_ops::to_mkldnn_backward::call(grad, input); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, reduce_range); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype); + } + auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level); + auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) { + return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level); + auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level); + auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::dequantize_self::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector dequantize_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::dequantize_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::q_per_channel_scales::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::q_per_channel_zero_points::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor int_repr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::int_repr::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto [fake_quant_enabled_value, fake_quant_enabled_bdim] = unwrapTensorAtLevel(fake_quant_enabled, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_saturate_weight_to_fp16::call(weight); + } + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, bool non_blocking, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, bool non_blocking, bool copy, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector meshgrid_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::meshgrid::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::meshgrid_indexing::call(tensors, indexing); + } + + auto results = batch_rule(tensors, indexing); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::cartesian_prod::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::combinations::call(self, r, with_replacement); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, r, with_replacement); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level)); +} +template +::std::tuple,::std::vector> lstm_mps_backward_generated_plumbing(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [z_state_value, z_state_bdim] = unwrapTensorAtLevel(z_state, cur_level); + auto [cell_state_fwd_value, cell_state_fwd_bdim] = unwrapTensorAtLevel(cell_state_fwd, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [layersOutputs_value, layersOutputs_bdim] = unwrapTensorAtLevel(layersOutputs, cur_level); + std::optional grad_y_value; + std::optional grad_y_bdim; + if (grad_y) { + std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias, const ::std::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias); + } + auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level); + auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level); + auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level); + std::optional input_bias_value; + std::optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + std::optional hidden_bias_value; + std::optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level); + auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level); + auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level); + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _thnn_fused_lstm_cell_backward_generated_plumbing(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level); + auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level); + auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level); + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_differentiable_lstm_cell_backward_generated_plumbing(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional & input_bias, const ::std::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) { + return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); + } + auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level); + auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level); + auto [cx_value, cx_bdim] = unwrapTensorAtLevel(cx, cur_level); + auto [cy_value, cy_bdim] = unwrapTensorAtLevel(cy, cur_level); + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + std::optional input_bias_value; + std::optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + std::optional hidden_bias_value; + std::optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional & input_bias, const ::std::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level); + auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + std::optional input_bias_value; + std::optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + std::optional hidden_bias_value; + std::optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) { + return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias); + } + auto [grad_hy_value, grad_hy_bdim] = unwrapTensorAtLevel(grad_hy, cur_level); + auto [workspace_value, workspace_bdim] = unwrapTensorAtLevel(workspace, cur_level); + auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const ::std::optional & input_bias, const ::std::optional & hidden_bias) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) { + return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + auto [grad_hy_value, grad_hy_bdim] = unwrapTensorAtLevel(grad_hy, cur_level); + auto [input_gates_value, input_gates_bdim] = unwrapTensorAtLevel(input_gates, cur_level); + auto [hidden_gates_value, hidden_gates_bdim] = unwrapTensorAtLevel(hidden_gates, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + std::optional input_bias_value; + std::optional input_bias_bdim; + if (input_bias) { + std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level); + } + std::optional hidden_bias_value; + std::optional hidden_bias_bdim; + if (hidden_bias) { + std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level); + } + auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) { + return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + std::optional b_ih_value; + std::optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + std::optional b_hh_value; + std::optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + std::optional b_ih_value; + std::optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + std::optional b_hh_value; + std::optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + std::optional b_ih_value; + std::optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + std::optional b_hh_value; + std::optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih, const ::std::optional & b_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) { + return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + std::optional b_ih_value; + std::optional b_ih_bdim; + if (b_ih) { + std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level); + } + std::optional b_hh_value; + std::optional b_hh_bdim; + if (b_hh) { + std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level); + auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level); + auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level); + auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level); + auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level); + auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level); + auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level); + auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level); + auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level); + auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level); + auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level); + auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level); + auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level); + auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level); + auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level); + auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) { + return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [w_ih_value, w_ih_bdim] = unwrapTensorAtLevel(w_ih, cur_level); + auto [w_hh_value, w_hh_bdim] = unwrapTensorAtLevel(w_hh, cur_level); + auto [b_ih_value, b_ih_bdim] = unwrapTensorAtLevel(b_ih, cur_level); + auto [b_hh_value, b_hh_bdim] = unwrapTensorAtLevel(b_hh, cur_level); + auto [packed_ih_value, packed_ih_bdim] = unwrapTensorAtLevel(packed_ih, cur_level); + auto [packed_hh_value, packed_hh_bdim] = unwrapTensorAtLevel(packed_hh, cur_level); + auto [col_offsets_ih_value, col_offsets_ih_bdim] = unwrapTensorAtLevel(col_offsets_ih, cur_level); + auto [col_offsets_hh_value, col_offsets_hh_bdim] = unwrapTensorAtLevel(col_offsets_hh, cur_level); + auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) { + return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [lengths_value, lengths_bdim] = unwrapTensorAtLevel(lengths, cur_level); + auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) { + return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) { + return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + auto [batch_sizes_value, batch_sizes_bdim] = unwrapTensorAtLevel(batch_sizes, cur_level); + auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor lift_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift_fresh::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lift_fresh_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_fill__Scalar::call(self, mask, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, value); + return self; +} +template +at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_fill_Scalar::call(self, mask, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::masked_fill__Tensor::call(self, mask, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::masked_fill_Tensor::call(self, mask, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::masked_scatter_::call(self, mask, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim); + return self; +} +template +at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::masked_scatter::call(self, mask, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_scatter_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, ::std::optional dim, ::std::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_softmax::call(self, mask, dim, mask_type); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_dtype::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::put_::call(self, index, source, accumulate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate); + return self; +} +template +at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::put::call(self, index, source, accumulate); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add_::call(self, dim, index, source, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return self; +} +template +at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add::call(self, dim, index, source, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_add_dimname::call(self, dim, index, source, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self); + return self; +} +template +at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill__int_Scalar::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill_int_Scalar::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill__int_Tensor::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill_int_Tensor::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return self; +} +template +at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_src::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter__src::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return self; +} +template +at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_value::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter__value::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return self; +} +template +at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce::call(self, dim, index, src, reduce); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter__reduce::call(self, dim, index, src, reduce); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce); + return self; +} +template +at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce); + return self; +} +template +at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_dimname_src::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::scatter_dimname_value::call(self, dim, index, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add_::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return self; +} +template +at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_add_dimname::call(self, dim, index, src); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self); + return self; +} +template +at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::eq__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::eq__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_and_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_and__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_and__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__and___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__and___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__iand___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__iand___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_or_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_or__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_or__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__or___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__or___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ior___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ior___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_xor_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_xor__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_xor__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__xor___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__xor___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ixor___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ixor___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__lshift___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__lshift___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__ilshift___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__ilshift___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__rshift___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__rshift___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::__irshift___Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::__irshift___Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tril_::call(self, diagonal); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, diagonal); + return self; +} +template +at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::triu_::call(self, diagonal); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, diagonal); + return self; +} +template +at::Tensor & digamma__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::digamma_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::lerp__Scalar::call(self, end, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + batch_rule(self_value, self_bdim, end_value, end_bdim, weight); + return self; +} +template +at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::lerp__Tensor::call(self, end, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim); + return self; +} +template +at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level); + auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level); + batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return self; +} +template +at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) { + return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [batch1_value, batch1_bdim] = unwrapTensorAtLevel(batch1, cur_level); + auto [batch2_value, batch2_bdim] = unwrapTensorAtLevel(batch2, cur_level); + auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random__from::call(self, from, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, from, to, generator); + return self; +} +template +at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random__to::call(self, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, to, generator); + return self; +} +template +at::Tensor & random__generated_plumbing(at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, generator); + return self; +} +template +at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::uniform_::call(self, from, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, from, to, generator); + return self; +} +template +at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cauchy_::call(self, median, sigma, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, median, sigma, generator); + return self; +} +template +at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_normal_::call(self, mean, std, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, mean, std, generator); + return self; +} +template +at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exponential_::call(self, lambd, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, lambd, generator); + return self; +} +template +at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geometric_::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, generator); + return self; +} +template +at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diag::call(self, diagonal); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, ::std::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::cross::call(self, other, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::triu::call(self, diagonal); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::tril::call(self, diagonal); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, diagonal); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trace_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::trace::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level)) { + return at::_ops::trace_backward::call(grad, sizes); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(grad_value, grad_bdim, sizes); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ne_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ne_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ne__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ne__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::not_equal_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::not_equal_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::not_equal__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::not_equal__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::eq_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::eq_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ge_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ge_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ge__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::ge__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_equal_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_equal_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_equal__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_equal__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::le_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::le_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::le__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::le__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_equal_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_equal_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_equal__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_equal__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gt_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gt_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::gt__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::gt__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::greater__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::greater__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lt_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lt_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lt__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::lt__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::less__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::less__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::take::call(self, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, ::std::optional dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::take_along_dim::call(self, indices, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select::call(self, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select_dimname::call(self, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::index_select_backward::call(grad, self_sizes, dim, index); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_select::call(self, mask); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::masked_select_backward::call(grad, input, mask); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [mask_value, mask_bdim] = unwrapTensorAtLevel(mask, cur_level); + auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nonzero_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nonzero_static_generated_plumbing(const at::Tensor & self, int64_t size, int64_t fill_value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero_static::call(self, size, fill_value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, fill_value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector nonzero_numpy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nonzero_numpy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argwhere_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argwhere::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather::call(self, dim, index, sparse_grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) { + return at::_ops::gather_dimname::call(self, dim, index, sparse_grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) { + return at::_ops::_gather_sparse_backward::call(self, dim, index, grad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [index_value, index_bdim] = unwrapTensorAtLevel(index, cur_level); + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcmul::call(self, tensor1, tensor2, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level); + auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcmul_::call(self, tensor1, tensor2, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level); + auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level); + batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return self; +} +template +at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcdiv::call(self, tensor1, tensor2, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level); + auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level); + auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::addcdiv_::call(self, tensor1, tensor2, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [tensor1_value, tensor1_bdim] = unwrapTensorAtLevel(tensor1, cur_level); + auto [tensor2_value, tensor2_bdim] = unwrapTensorAtLevel(tensor2, cur_level); + batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value); + return self; +} +template +at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) { + return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(info, cur_level)) { + return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix); + } + auto [info_value, info_bdim] = unwrapTensorAtLevel(info, cur_level); + batch_rule(info_value, info_bdim, api_name, is_matrix); +} +template +at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, ::std::optional N) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::linalg_vander::call(x, N); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, N); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::svd::call(self, some, compute_uv); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, some, compute_uv); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::swapaxes::call(self, axis0, axis1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, axis0, axis1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::swapdims::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cholesky::call(self, upper); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) { + return at::_ops::cholesky_solve::call(self, input2, upper); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) { + return at::_ops::_cholesky_solve_helper::call(self, A, upper); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cholesky_inverse::call(self, upper); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple qr_generated_plumbing(const at::Tensor & self, bool some) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::qr::call(self, some); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, some); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple geqrf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geqrf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) { + return at::_ops::orgqr::call(self, input2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) { + return at::_ops::ormqr::call(self, input2, input3, left, transpose); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [input2_value, input2_bdim] = unwrapTensorAtLevel(input2, cur_level); + auto [input3_value, input3_bdim] = unwrapTensorAtLevel(input3, cur_level); + auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_lu_with_info::call(self, pivot, check_errors); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pivot, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) { + return at::_ops::lu_solve::call(self, LU_data, LU_pivots); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [LU_data_value, LU_data_bdim] = unwrapTensorAtLevel(LU_data, cur_level); + auto [LU_pivots_value, LU_pivots_bdim] = unwrapTensorAtLevel(LU_pivots, cur_level); + auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) { + return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots); + } + auto [LU_data_value, LU_data_bdim] = unwrapTensorAtLevel(LU_data, cur_level); + auto [LU_pivots_value, LU_pivots_bdim] = unwrapTensorAtLevel(LU_pivots, cur_level); + auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::multinomial::call(self, num_samples, replacement, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & lgamma__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lgamma_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor lgamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::lgamma::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor digamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::digamma::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::polygamma::call(n, self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(n, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::polygamma_::call(self, n); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, n); + return self; +} +template +at::Tensor erfinv_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfinv::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & erfinv__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::erfinv_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::i0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & i0__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::i0_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor sign_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sign::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & sign__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sign_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor signbit_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::signbit::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::dist::call(self, other, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::atan2_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::atan2::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::arctan2::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::arctan2_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) { + return at::_ops::lerp_Scalar::call(self, end, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::lerp_Tensor::call(self, end, weight); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [end_value, end_bdim] = unwrapTensorAtLevel(end, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::histc::call(self, bins, min, max); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, bins, min, max); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogram_bins_tensor::call(self, bins, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [bins_value, bins_bdim] = unwrapTensorAtLevel(bins, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::vector _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, ::std::optional> range, const ::std::optional & weight, bool density) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fmod_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fmod__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmod_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmod__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::hypot::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::hypot_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igamma::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igamma_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igammac::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::igammac_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::nextafter::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::nextafter_::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::remainder_Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::remainder__Scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, other); + return self; +} +template +at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder_Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder__Tensor::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self_value, self_bdim, other_value, other_bdim); + return self; +} +template +at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::remainder_Scalar_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor min_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::min::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmin::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::fmax::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::maximum::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::max_other::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::minimum::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::min_other::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) { + return at::_ops::quantile::call(self, q, dim, keepdim, interpolation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level); + auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, ::std::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) { + return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level); + auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, ::std::optional dim, bool keepdim, c10::string_view interpolation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort::call(self, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_stable_generated_plumbing(const at::Tensor & self, ::std::optional stable, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_stable::call(self, stable, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_dimname::call(self, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple sort_dimname_stable_generated_plumbing(const at::Tensor & self, ::std::optional stable, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sort_dimname_stable::call(self, stable, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor msort_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::msort::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort::call(self, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort_stable::call(self, stable, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, stable, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::argsort_dimname::call(self, dim, descending); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, descending); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple topk_generated_plumbing(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::topk::call(self, k, dim, largest, sorted); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor all_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::all::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor any_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::any::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::renorm::call(self, p, dim, maxnorm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::renorm_::call(self, p, dim, maxnorm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, p, dim, maxnorm); + return self; +} +template +at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unfold::call(self, dimension, size, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dimension, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_in, cur_level)) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); + } + auto [grad_in_value, grad_in_bdim] = unwrapTensorAtLevel(grad_in, cur_level); + auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow_Tensor_Tensor::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow_Scalar::call(self, exponent); + } + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pow_Tensor_Scalar::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pow__Scalar::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, exponent); + return self; +} +template +at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::pow__Tensor::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return self; +} +template +at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power_Tensor_Tensor::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power_Scalar::call(self, exponent); + } + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + auto results = batch_rule(self, exponent_value, exponent_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::float_power_Tensor_Scalar::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, exponent); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::float_power__Scalar::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, exponent); + return self; +} +template +at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::float_power__Tensor::call(self, exponent); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [exponent_value, exponent_bdim] = unwrapTensorAtLevel(exponent, cur_level); + batch_rule(self_value, self_bdim, exponent_value, exponent_bdim); + return self; +} +template +at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::normal_::call(self, mean, std, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, mean, std, generator); + return self; +} +template +at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::normal_functional::call(self, mean, std, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, mean, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mean, cur_level)) { + return at::_ops::normal_Tensor_float::call(mean, std, generator); + } + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto results = batch_rule(mean_value, mean_bdim, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(std, cur_level)) { + return at::_ops::normal_float_Tensor::call(mean, std, generator); + } + auto [std_value, std_bdim] = unwrapTensorAtLevel(std, cur_level); + auto results = batch_rule(mean, std_value, std_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) { + return at::_ops::normal_Tensor_Tensor::call(mean, std, generator); + } + auto [mean_value, mean_bdim] = unwrapTensorAtLevel(mean, cur_level); + auto [std_value, std_bdim] = unwrapTensorAtLevel(std, cur_level); + auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor alias_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alias::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale); + } + auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level); + auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level); + batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim); +} +template +::std::vector _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add_List::call(self, other, alpha); + } + + auto results = batch_rule(self, other, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add__List::call(self, other, alpha); + } + + batch_rule(self, other, alpha); +} +template +::std::vector _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_add__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_add_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add_Tensor::call(self, other, alpha); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_add__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_add__Tensor::call(self, other, alpha); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, alpha); +} +template +::std::vector _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_sub_List::call(self, other, alpha); + } + + auto results = batch_rule(self, other, alpha); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_sub__List::call(self, other, alpha); + } + + batch_rule(self, other, alpha); +} +template +::std::vector _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sub__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_mul__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_mul_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_mul__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_mul__Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim); +} +template +::std::vector _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_div__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_div_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div_Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_div__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_div__Tensor::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim); +} +template +::std::vector _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_max_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_max__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_min_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_clamp_min__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_maximum_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_maximum__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_maximum__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum_Scalar::call(self, scalar); + } + + auto results = batch_rule(self, scalar); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum__Scalar::call(self, scalar); + } + + batch_rule(self, scalar); +} +template +::std::vector _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_minimum_List::call(self, other); + } + + auto results = batch_rule(self, other); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_foreach_minimum__List::call(self, other); + } + + batch_rule(self, other); +} +template +::std::vector _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum_ScalarList::call(self, scalars); + } + + auto results = batch_rule(self, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_minimum__ScalarList::call(self, scalars); + } + + batch_rule(self, scalars); +} +template +::std::vector _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value); + } + + auto results = batch_rule(self, tensor1, tensor2, value); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars); + } + + auto results = batch_rule(self, tensor1, tensor2, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value); + } + + batch_rule(self, tensor1, tensor2, value); +} +template +void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars); + } + + batch_rule(self, tensor1, tensor2, scalars); +} +template +void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); +} +template +::std::vector _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value); + } + + auto results = batch_rule(self, tensor1, tensor2, value); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars); + } + + auto results = batch_rule(self, tensor1, tensor2, scalars); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value); + } + + batch_rule(self, tensor1, tensor2, value); +} +template +void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) { + return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars); + } + + batch_rule(self, tensor1, tensor2, scalars); +} +template +void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) { + return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim); +} +template +::std::vector _foreach_abs_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_abs::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_abs__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_abs_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_acos_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_acos::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_acos__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_acos_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_asin_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_asin::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_asin__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_asin_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_atan_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_atan::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_atan__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_atan_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_ceil_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_ceil::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_ceil__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_ceil_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_cos_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cos::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_cos__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cos_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_cosh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cosh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_cosh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_cosh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_erf_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erf::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_erf__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erf_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_erfc_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erfc::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_erfc__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_erfc_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_exp_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_exp::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_exp__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_exp_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_expm1_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_expm1::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_expm1__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_expm1_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_floor_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_floor::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_floor__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_floor_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_frac_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_frac::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_frac__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_frac_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::_foreach_lerp_List::call(self, tensors1, weights); + } + + auto results = batch_rule(self, tensors1, weights); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) { + return at::_ops::_foreach_lerp__List::call(self, tensors1, weights); + } + + batch_rule(self, tensors1, weights); +} +template +::std::vector _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) { + return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight); + } + + auto results = batch_rule(self, tensors1, weight); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) { + return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight); + } + + batch_rule(self, tensors1, weight); +} +template +::std::vector _foreach_lgamma_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_lgamma::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_lgamma__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_lgamma_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log10_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log10::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log10__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log10_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log1p_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log1p::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log1p__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log1p_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_log2_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log2::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_log2__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_log2_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_max_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_max::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_neg_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_neg::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_neg__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_neg_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_norm_Scalar::call(self, ord, dtype); + } + + auto results = batch_rule(self, ord, dtype); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_List_generated_plumbing(at::TensorList self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow_List::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow_Scalar::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow_ScalarList::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector _foreach_pow_ScalarAndTensor_generated_plumbing(const at::Scalar & self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent); + } + + auto results = batch_rule(self, exponent); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_pow__List_generated_plumbing(at::TensorList self, at::TensorList exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) { + return at::_ops::_foreach_pow__List::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +void _foreach_pow__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow__Scalar::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +void _foreach_pow__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef exponent) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_pow__ScalarList::call(self, exponent); + } + + batch_rule(self, exponent); +} +template +::std::vector _foreach_reciprocal_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_reciprocal::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_reciprocal__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_reciprocal_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_round_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_round::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_round__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_round_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sigmoid_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sigmoid::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sigmoid__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sigmoid_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sign_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sign::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sign__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sign_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sin_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sin::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sin__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sin_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sinh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sinh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sinh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sinh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_sqrt_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sqrt::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_sqrt__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_sqrt_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_tan_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tan::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_tan__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tan_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_tanh_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tanh::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_tanh__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_tanh_::call(self); + } + + batch_rule(self); +} +template +::std::vector _foreach_trunc_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_trunc::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_trunc__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_trunc_::call(self); + } + + batch_rule(self); +} +template +void _foreach_zero__generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_zero_::call(self); + } + + batch_rule(self); +} +template +void _foreach_copy__generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::_foreach_copy_::call(self, src, non_blocking); + } + + batch_rule(self, src, non_blocking); +} +template +::std::vector _foreach_copy_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::_foreach_copy::call(self, src, non_blocking); + } + + auto results = batch_rule(self, src, non_blocking); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) { + return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [boundaries_value, boundaries_bdim] = unwrapTensorAtLevel(boundaries, cur_level); + auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(boundaries, cur_level)) { + return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right); + } + auto [boundaries_value, boundaries_bdim] = unwrapTensorAtLevel(boundaries, cur_level); + auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) { + return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter); + } + auto [sorted_sequence_value, sorted_sequence_bdim] = unwrapTensorAtLevel(sorted_sequence, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional sorter_value; + std::optional sorter_bdim; + if (sorter) { + std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level); + } + auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) { + return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter); + } + auto [sorted_sequence_value, sorted_sequence_bdim] = unwrapTensorAtLevel(sorted_sequence, cur_level); + std::optional sorter_value; + std::optional sorter_bdim; + if (sorter) { + std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level); + } + auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, out_int32); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) { + return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose); + } + auto [crow_indices_value, crow_indices_bdim] = unwrapTensorAtLevel(crow_indices, cur_level); + auto [col_indices_value, col_indices_bdim] = unwrapTensorAtLevel(col_indices, cur_level); + auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::mse_loss::call(self, target, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::l1_loss::call(self, target, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional & weight, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::multilabel_margin_loss::call(self, target, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) { + return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto [is_target_value, is_target_bdim] = unwrapTensorAtLevel(is_target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) { + return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto [total_weight_value, total_weight_bdim] = unwrapTensorAtLevel(total_weight, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) { + return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto [total_weight_value, total_weight_bdim] = unwrapTensorAtLevel(total_weight, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::smooth_l1_loss::call(self, target, reduction, beta); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::huber_loss::call(self, target, reduction, delta); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::soft_margin_loss::call(self, target, reduction); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) { + return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [target_value, target_bdim] = unwrapTensorAtLevel(target, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::elu::call(self, alpha, scale, input_scale); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) { + return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_or_result_value, self_or_result_bdim] = unwrapTensorAtLevel(self_or_result, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::elu_::call(self, alpha, scale, input_scale); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, alpha, scale, input_scale); + return self; +} +template +at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::glu::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::glu_backward::call(grad_output, self, dim); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) { + return at::_ops::glu_jvp::call(glu, x, dx, dim); + } + auto [glu_value, glu_bdim] = unwrapTensorAtLevel(glu, cur_level); + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [dx_value, dx_bdim] = unwrapTensorAtLevel(dx, cur_level); + auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) { + return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim); + } + auto [grad_x_value, grad_x_bdim] = unwrapTensorAtLevel(grad_x, cur_level); + auto [grad_glu_value, grad_glu_bdim] = unwrapTensorAtLevel(grad_glu, cur_level); + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [dgrad_glu_value, dgrad_glu_bdim] = unwrapTensorAtLevel(dgrad_glu, cur_level); + auto [dx_value, dx_bdim] = unwrapTensorAtLevel(dx, cur_level); + auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardsigmoid_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh::call(self, min_val, max_val); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, min_val, max_val); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardtanh_::call(self, min_val, max_val); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, min_val, max_val); + return self; +} +template +at::Tensor hardswish_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & hardswish__generated_plumbing(at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish_::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim); + return self; +} +template +at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::hardswish_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu::call(self, negative_slope); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, negative_slope); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::leaky_relu_::call(self, negative_slope); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, negative_slope); + return self; +} +template +at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_sigmoid::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple log_sigmoid_forward_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_sigmoid_forward::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) { + return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [buffer_value, buffer_bdim] = unwrapTensorAtLevel(buffer, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level); + auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) { + return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [noise_value, noise_bdim] = unwrapTensorAtLevel(noise, cur_level); + batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator); + return self; +} +template +at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softplus::call(self, beta, threshold); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, beta, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::softplus_backward::call(grad_output, self, beta, threshold); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::softshrink::call(self, lambd); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::softshrink_backward::call(grad_output, self, lambd); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool2d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool2d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_avg_pool3d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool3d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool2d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::adaptive_max_pool3d::call(self, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) { + return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [random_samples_value, random_samples_bdim] = unwrapTensorAtLevel(random_samples, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) { + return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [random_samples_value, random_samples_bdim] = unwrapTensorAtLevel(random_samples, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_unpool2d::call(self, indices, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad1d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad2d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad3d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad1d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad1d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad2d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad2d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad3d::call(self, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::replication_pad3d_backward::call(grad_output, self, padding); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pad_circular::call(self, pad); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_pad_enum::call(self, pad, mode, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, mode, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, ::std::optional value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::pad::call(self, pad, mode, value); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, pad, mode, value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level)) { + return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto results = batch_rule(input_value, input_bdim, output_size, scale_factors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest1d::call(self, output_size, scales); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level)) { + return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::sigmoid_backward::call(grad_output, output); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) { + return at::_ops::logit_backward::call(grad_output, self, eps); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::tanh_backward::call(grad_output, output); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask); + } + auto [grad_output_value, grad_output_bdim] = unwrapTensorAtLevel(grad_output, cur_level); + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) { + return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [weight_value, weight_bdim] = unwrapTensorAtLevel(weight, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor column_stack_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::column_stack::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isfinite_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isfinite::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isinf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isinf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::record_stream::call(self, s); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, s); +} +template +at::Tensor isposinf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isposinf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor isneginf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::isneginf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_add_batch_dim::call(self, batch_dim, level); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, batch_dim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_entr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_entr::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_ndtri::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log_ndtr::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_expm1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_exp2::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_psi_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_psi::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_digamma::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_gammaln::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erf_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erf::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfc::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfcx::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_erfinv::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_ndtr::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlog1py::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlog1py_self_scalar::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_xlog1py_other_scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlogy::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_xlogy_self_scalar::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_xlogy_other_scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_zeta::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_zeta_self_scalar::call(self, other); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_zeta_other_scalar::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, other); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i0e::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_i1e::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_logit_generated_plumbing(const at::Tensor & self, ::std::optional eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_logit::call(self, eps); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, eps); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_polygamma::call(n, self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(n, self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_logsumexp::call(self, dim, keepdim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, keepdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_expit_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_expit::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_sinc::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_round::call(self, decimals); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, decimals); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log1p::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_log_softmax::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_gammainc::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::special_gammaincc::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_multigammaln::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_softmax::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfft::call(self, n, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfft2::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_rfftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_irfftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_hfftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ihfftn::call(self, s, dim, norm); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, s, dim, norm); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_fftshift::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::fft_ifftshift::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cholesky::call(self, upper); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, upper); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_cross::call(self, other, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu_factor::call(A, pivot); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_lu::call(A, pivot); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, pivot); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint); + } + auto [LU_value, LU_bdim] = unwrapTensorAtLevel(LU, cur_level); + auto [pivots_value, pivots_bdim] = unwrapTensorAtLevel(pivots, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_det_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_det::call(A); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_det::call(A); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor det_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::det::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, hermitian, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_ldl_factor::call(self, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, hermitian); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian); + } + auto [LD_value, LD_bdim] = unwrapTensorAtLevel(LD, cur_level); + auto [pivots_value, pivots_bdim] = unwrapTensorAtLevel(pivots, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, ::std::optional rcond, ::std::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [b_value, b_bdim] = unwrapTensorAtLevel(b, cur_level); + auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_matmul::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) { + return at::_ops::linalg_vecdot::call(x, y, dim); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [y_value, y_bdim] = unwrapTensorAtLevel(y, cur_level); + auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_exp::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_slogdet_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_slogdet::call(A); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple linalg_slogdet_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_slogdet::call(A); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple slogdet_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slogdet::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor logdet_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::logdet::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_eig_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eig::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor _linalg_eigvals_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_linalg_eigvals::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigvals::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_eigh::call(A, UPLO, compute_v); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, UPLO, compute_v); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigh::call(self, UPLO); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, UPLO); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_eigvalsh::call(self, UPLO); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, UPLO); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) { + return at::_ops::linalg_householder_product::call(input, tau); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [tau_value, tau_bdim] = unwrapTensorAtLevel(tau, cur_level); + auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_inv_ex::call(A, check_errors); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_inv::call(A); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor inverse_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::inverse::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::inner::call(self, other); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::outer::call(self, vec2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) { + return at::_ops::ger::call(self, vec2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [vec2_value, vec2_bdim] = unwrapTensorAtLevel(vec2, cur_level); + auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const ::std::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, ::std::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_svd::call(A, full_matrices, driver); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, full_matrices, driver); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, ::std::optional driver) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_svdvals::call(A, driver); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, driver); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const ::std::optional & p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cond::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_cond_p_str::call(self, p); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) { + return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional atol_value; + std::optional atol_bdim; + if (atol) { + std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level); + } + std::optional rtol_value; + std::optional rtol_bdim; + if (rtol) { + std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level); + } + auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_pinv::call(self, rcond, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) { + return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [rcond_value, rcond_bdim] = unwrapTensorAtLevel(rcond, cur_level); + auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve_ex::call(A, B, left, check_errors); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::linalg_solve::call(A, B, left); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _spsolve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) { + return at::_ops::_spsolve::call(A, B, left); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto [B_value, B_bdim] = unwrapTensorAtLevel(B, cur_level); + auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_tensorinv::call(self, ind); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, ind); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::linalg_tensorsolve::call(self, other, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(A, cur_level)) { + return at::_ops::linalg_qr::call(A, mode); + } + auto [A_value, A_bdim] = unwrapTensorAtLevel(A, cur_level); + auto results = batch_rule(A_value, A_bdim, mode); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_power::call(self, n); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const ::std::optional & atol, const ::std::optional & rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + std::optional atol_value; + std::optional atol_bdim; + if (atol) { + std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level); + } + std::optional rtol_value; + std::optional rtol_bdim; + if (rtol) { + std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, ::std::optional atol, ::std::optional rtol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::linalg_matrix_rank::call(self, tol, hermitian); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, tol, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) { + return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [tol_value, tol_bdim] = unwrapTensorAtLevel(tol, cur_level); + auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::linalg_multi_dot::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::nested_to_padded_tensor::call(self, padding, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) { + return at::_ops::_test_serialization_subcmul::call(self, other, alpha); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_parallel_materialize_generated_plumbing(const at::Tensor & self, int64_t num_parallel, bool skip_first) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_parallel_materialize::call(self, num_parallel, skip_first); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, num_parallel, skip_first); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_intlist::call(values, addends); + } + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_filled_intlist::call(values, addends); + } + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, ::std::optional> addends) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level)) { + return at::_ops::_test_optional_floatlist::call(values, addends); + } + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, addends); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_string_default::call(dummy, a, b); + } + auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b); + } + auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dummy, cur_level)) { + return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b); + } + auto [dummy_value, dummy_bdim] = unwrapTensorAtLevel(dummy, cur_level); + auto results = batch_rule(dummy_value, dummy_bdim, a, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_warn_in_autograd::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, b); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_view::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial); + } + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + std::optional lengths_value; + std::optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + std::optional indices_value; + std::optional indices_bdim; + if (indices) { + std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level); + } + std::optional offsets_value; + std::optional offsets_bdim; + if (offsets) { + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level); + } + auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & offsets, int64_t axis, const ::std::optional & initial) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial); + } + auto [grad_value, grad_bdim] = unwrapTensorAtLevel(grad, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [data_value, data_bdim] = unwrapTensorAtLevel(data, cur_level); + std::optional lengths_value; + std::optional lengths_bdim; + if (lengths) { + std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level); + } + std::optional offsets_value; + std::optional offsets_bdim; + if (offsets) { + std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level); + } + auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(sequences, cur_level)) { + return at::_ops::pad_sequence::call(sequences, batch_first, padding_value, padding_side); + } + + auto results = batch_rule(sequences, batch_first, padding_value, padding_side); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::flatten_dense_tensors::call(tensors); + } + + auto results = batch_rule(tensors); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) { + return at::_ops::unflatten_dense_tensors::call(flat, tensors); + } + auto [flat_value, flat_bdim] = unwrapTensorAtLevel(flat, cur_level); + auto results = batch_rule(flat_value, flat_bdim, tensors); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(list, cur_level)) { + return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory); + } + + auto results = batch_rule(list, dtype, layout, device, pin_memory); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fw_primal_copy::call(self, level); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) { + return at::_ops::_make_dual_copy::call(primal, tangent, level); + } + auto [primal_value, primal_bdim] = unwrapTensorAtLevel(primal, cur_level); + auto [tangent_value, tangent_bdim] = unwrapTensorAtLevel(tangent, cur_level); + auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_real_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_as_complex_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_conj_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_neg_view_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::as_strided_copy::call(self, size, stride, storage_offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_sparse_broadcast_to_copy::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::diagonal_copy::call(self, offset, dim1, dim2); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::expand_copy::call(self, size, implicit); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, implicit); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::permute_copy::call(self, dims); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dims); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_reshape_alias_copy::call(self, size, stride); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::select_copy_int::call(self, dim, index); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, index); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::detach_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, start, end, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_copy_Tensor::call(self, split_size, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_size, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, split_sizes, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy_dim::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::squeeze_copy_dims::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor t_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::t_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::transpose_copy_int::call(self, dim0, dim1); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim0, dim1); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unsqueeze_copy::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_values_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor values_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::values_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::crow_indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::col_indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::ccol_indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::row_indices_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::vector unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unbind_copy_int::call(self, dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unbind_copy_int_out::call(self, dim, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dim, out); +} +template +void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_size, dim, out); +} +template +void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_sizes, dim, out); +} +template +at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_copy::call(self, size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::view_copy_dtype::call(self, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::unfold_copy::call(self, dimension, size, step); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dimension, size, step); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::alias_copy::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::to_padded_tensor::call(self, padding, output_size); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, padding, output_size); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _jagged_to_padded_dense_forward_generated_plumbing(const at::Tensor & values, at::TensorList offsets, c10::SymIntArrayRef max_lengths, double padding_value) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_jagged_to_padded_dense_forward::call(values, offsets, max_lengths, padding_value); + } + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(values_value, values_bdim, offsets, max_lengths, padding_value); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _padded_dense_to_jagged_forward_generated_plumbing(const at::Tensor & dense, at::TensorList offsets, ::std::optional total_L) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(dense, cur_level) && !isBatchedAtLevel(offsets, cur_level)) { + return at::_ops::_padded_dense_to_jagged_forward::call(dense, offsets, total_L); + } + auto [dense_value, dense_bdim] = unwrapTensorAtLevel(dense, cur_level); + auto results = batch_rule(dense_value, dense_bdim, offsets, total_L); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) { + return at::_ops::_nested_tensor_softmax_with_shape::call(self, query); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto results = batch_rule(self_value, self_bdim, query_value, query_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _safe_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, ::std::optional dtype) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_safe_softmax::call(self, dim, dtype); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, dim, dtype); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional & mask, ::std::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); + } + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level); + auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level); + auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level); + auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level); + auto [norm_weight_1_value, norm_weight_1_bdim] = unwrapTensorAtLevel(norm_weight_1, cur_level); + auto [norm_bias_1_value, norm_bias_1_bdim] = unwrapTensorAtLevel(norm_bias_1, cur_level); + auto [norm_weight_2_value, norm_weight_2_bdim] = unwrapTensorAtLevel(norm_weight_2, cur_level); + auto [norm_bias_2_value, norm_bias_2_bdim] = unwrapTensorAtLevel(norm_bias_2, cur_level); + auto [ffn_weight_1_value, ffn_weight_1_bdim] = unwrapTensorAtLevel(ffn_weight_1, cur_level); + auto [ffn_bias_1_value, ffn_bias_1_bdim] = unwrapTensorAtLevel(ffn_bias_1, cur_level); + auto [ffn_weight_2_value, ffn_weight_2_bdim] = unwrapTensorAtLevel(ffn_weight_2, cur_level); + auto [ffn_bias_2_value, ffn_bias_2_bdim] = unwrapTensorAtLevel(ffn_bias_2, cur_level); + std::optional mask_value; + std::optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask, bool need_weights, bool average_attn_weights, ::std::optional mask_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level); + auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level); + auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level); + auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level); + std::optional mask_value; + std::optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask, double dropout_p, bool is_causal, ::std::optional scale, bool enable_gqa) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_mask_value; + std::optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, scale, enable_gqa); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +::std::tuple _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask, double dropout_p, bool is_causal, const ::std::optional & dropout_mask, ::std::optional scale, bool enable_gqa) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) { + return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale, enable_gqa); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_mask_value; + std::optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + std::optional dropout_mask_value; + std::optional dropout_mask_bdim; + if (dropout_mask) { + std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale, enable_gqa); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_attention_math_for_mps_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask, double dropout_p, bool is_causal, const ::std::optional & dropout_mask, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) { + return at::_ops::_scaled_dot_product_attention_math_for_mps::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_mask_value; + std::optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + std::optional dropout_mask_value; + std::optional dropout_mask_bdim; + if (dropout_mask) { + std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, return_debug_mask, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), std::get<8>(results), std::get<9>(results), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level), makeBatched(std::get<14>(results), std::get<15>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_for_cpu_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const ::std::optional & attn_mask, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu::call(query, key, value, dropout_p, is_causal, attn_mask, scale); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_mask_value; + std::optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level); + auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_flash_attention_for_cpu_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const ::std::optional & attn_mask, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) { + return at::_ops::_scaled_dot_product_flash_attention_for_cpu_backward::call(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + std::optional attn_mask_value; + std::optional attn_mask_bdim; + if (attn_mask) { + std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level); + } + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_fused_attention_overrideable_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, ::std::array grad_input_mask, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_scaled_dot_product_fused_attention_overrideable_backward::call(grad_out, query, key, value, attn_bias, grad_input_mask, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level); + auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, grad_input_mask, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) { + return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_bias_value; + std::optional attn_bias_bdim; + if (attn_bias) { + std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); + } + auto [grad_out__value, grad_out__bdim] = unwrapTensorAtLevel(grad_out_, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, dropout_p, grad_input_mask, is_causal, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_cudnn_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) { + return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional attn_bias_value; + std::optional attn_bias_bdim; + if (attn_bias) { + std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, return_debug_mask, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), std::get<8>(results), std::get<9>(results), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level), makeBatched(std::get<14>(results), std::get<15>(results), cur_level)); +} +template +::std::tuple _scaled_dot_product_cudnn_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) { + return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + auto [attn_bias_value, attn_bias_bdim] = unwrapTensorAtLevel(attn_bias, cur_level); + auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level); + auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, attn_bias_value, attn_bias_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, scale); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _flash_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & cum_seq_q, const ::std::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale, ::std::optional window_size_left, ::std::optional window_size_right, const ::std::optional & seqused_k, const ::std::optional & alibi_slopes) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(seqused_k, cur_level) && !isBatchedAtLevel(alibi_slopes, cur_level)) { + return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k, alibi_slopes); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + std::optional cum_seq_q_value; + std::optional cum_seq_q_bdim; + if (cum_seq_q) { + std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q.value(), cur_level); + } + std::optional cum_seq_k_value; + std::optional cum_seq_k_bdim; + if (cum_seq_k) { + std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k.value(), cur_level); + } + std::optional seqused_k_value; + std::optional seqused_k_bdim; + if (seqused_k) { + std::tie(seqused_k_value, seqused_k_bdim) = unwrapTensorAtLevel(seqused_k.value(), cur_level); + } + std::optional alibi_slopes_value; + std::optional alibi_slopes_bdim; + if (alibi_slopes) { + std::tie(alibi_slopes_value, alibi_slopes_bdim) = unwrapTensorAtLevel(alibi_slopes.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale, window_size_left, window_size_right, seqused_k_value, seqused_k_bdim, alibi_slopes_value, alibi_slopes_bdim); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +::std::tuple _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale, ::std::optional window_size_left, ::std::optional window_size_right) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale, window_size_left, window_size_right); + } + auto [grad_out_value, grad_out_bdim] = unwrapTensorAtLevel(grad_out, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [cum_seq_q_value, cum_seq_q_bdim] = unwrapTensorAtLevel(cum_seq_q, cur_level); + auto [cum_seq_k_value, cum_seq_k_bdim] = unwrapTensorAtLevel(cum_seq_k, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale, window_size_left, window_size_right); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +::std::tuple _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & bias, const at::Tensor & out, const ::std::optional & cu_seqlens_q, const ::std::optional & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional scale, ::std::optional num_splits_key, ::std::optional window_size, bool shared_storage_dqdkdv) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) { + return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv); + } + auto [grad_out__value, grad_out__bdim] = unwrapTensorAtLevel(grad_out_, cur_level); + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [out_value, out_bdim] = unwrapTensorAtLevel(out, cur_level); + auto [logsumexp_value, logsumexp_bdim] = unwrapTensorAtLevel(logsumexp, cur_level); + auto [philox_seed_value, philox_seed_bdim] = unwrapTensorAtLevel(philox_seed, cur_level); + auto [philox_offset_value, philox_offset_bdim] = unwrapTensorAtLevel(philox_offset, cur_level); + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + std::optional cu_seqlens_q_value; + std::optional cu_seqlens_q_bdim; + if (cu_seqlens_q) { + std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level); + } + std::optional cu_seqlens_k_value; + std::optional cu_seqlens_k_bdim; + if (cu_seqlens_k) { + std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level); + } + auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, bias_value, bias_bdim, out_value, out_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, max_seqlen_k, logsumexp_value, logsumexp_bdim, dropout_p, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, custom_mask_type, bias_requires_grad, scale, num_splits_key, window_size, shared_storage_dqdkdv); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level)); +} +template +at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) { + return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p); + } + auto [q_value, q_bdim] = unwrapTensorAtLevel(q, cur_level); + auto [k_value, k_bdim] = unwrapTensorAtLevel(k, cur_level); + auto [v_value, v_bdim] = unwrapTensorAtLevel(v, cur_level); + auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor & _fill_mem_eff_dropout_mask__generated_plumbing(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_fill_mem_eff_dropout_mask_::call(self, dropout_p, seed, offset); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, dropout_p, seed, offset); + return self; +} +template +at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) { + return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); + } + auto [query_value, query_bdim] = unwrapTensorAtLevel(query, cur_level); + auto [key_value, key_bdim] = unwrapTensorAtLevel(key, cur_level); + auto [value_value, value_bdim] = unwrapTensorAtLevel(value, cur_level); + auto [qkv_weight_value, qkv_weight_bdim] = unwrapTensorAtLevel(qkv_weight, cur_level); + auto [qkv_bias_value, qkv_bias_bdim] = unwrapTensorAtLevel(qkv_bias, cur_level); + auto [proj_weight_value, proj_weight_bdim] = unwrapTensorAtLevel(proj_weight, cur_level); + auto [proj_bias_value, proj_bias_bdim] = unwrapTensorAtLevel(proj_bias, cur_level); + std::optional mask_value; + std::optional mask_bdim; + if (mask) { + std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level); + } + auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_airy_ai::call(x); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_j0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_j1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_y0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_bessel_y1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_h::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_he::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_laguerre_polynomial_l::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_legendre_polynomial_p::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_i0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_i1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_k0::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::special_modified_bessel_k1::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_scaled_modified_bessel_k0::call(x); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_scaled_modified_bessel_k1::call(x); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x_value, x_bdim, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(n, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n); + } + auto [n_value, n_bdim] = unwrapTensorAtLevel(n, cur_level); + auto results = batch_rule(x, n_value, n_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim, n); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(x, cur_level)) { + return at::_ops::special_spherical_bessel_j0::call(x); + } + auto [x_value, x_bdim] = unwrapTensorAtLevel(x, cur_level); + auto results = batch_rule(x_value, x_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foobar::call(self, arg1, arg2, arg3); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adam__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adamw__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_sgd__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd_::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_sgd__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd__tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _fused_adagrad__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adagrad_::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); +} +template +void _propagate_xla_data_generated_plumbing(const at::Tensor & input, const at::Tensor & output) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) { + return at::_ops::_propagate_xla_data::call(input, output); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + batch_rule(input_value, input_bdim, output_value, output_bdim); +} +template +void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) { + return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level); + auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level); + auto [out1_value, out1_bdim] = unwrapTensorAtLevel(out1, cur_level); + auto [out2_value, out2_bdim] = unwrapTensorAtLevel(out2, cur_level); + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional grad_output_value; + std::optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3); +} +template +at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) { + return at::_ops::bernoulli_Tensor::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [p_value, p_bdim] = unwrapTensorAtLevel(p, cur_level); + auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) { + return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [indices_value, indices_bdim] = unwrapTensorAtLevel(indices, cur_level); + auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::resize::call(self, size, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_resize_output::call(self, size, device); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, device); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate, bool unsafe) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) { + return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [values_value, values_bdim] = unwrapTensorAtLevel(values, cur_level); + auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) { + return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [weight_buf_value, weight_buf_bdim] = unwrapTensorAtLevel(weight_buf, cur_level); + auto [hx_value, hx_bdim] = unwrapTensorAtLevel(hx, cur_level); + auto [output_value, output_bdim] = unwrapTensorAtLevel(output, cur_level); + auto [reserve_value, reserve_bdim] = unwrapTensorAtLevel(reserve, cur_level); + auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level); + auto [out1_value, out1_bdim] = unwrapTensorAtLevel(out1, cur_level); + auto [out2_value, out2_bdim] = unwrapTensorAtLevel(out2, cur_level); + std::optional cx_value; + std::optional cx_bdim; + if (cx) { + std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level); + } + std::optional grad_output_value; + std::optional grad_output_bdim; + if (grad_output) { + std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + std::optional dropout_state_value; + std::optional dropout_state_bdim; + if (dropout_state) { + std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level); + } + batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3); +} +template +::std::tuple _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level); + auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_size, dim, out); +} +template +void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + batch_rule(self_value, self_bdim, split_sizes, dim, out); +} +template +::std::tuple _batch_norm_with_update_functional_generated_plumbing(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) { + return at::_ops::_batch_norm_with_update_functional::call(input, weight, bias, running_mean, running_var, momentum, eps); + } + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [running_mean_value, running_mean_bdim] = unwrapTensorAtLevel(running_mean, cur_level); + auto [running_var_value, running_var_bdim] = unwrapTensorAtLevel(running_var, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + std::optional bias_value; + std::optional bias_bdim; + if (bias) { + std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level); + } + auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level)); +} +template +at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as::call(self, the_template, memory_format); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level); + auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) { + return at::_ops::resize_as_sparse::call(self, the_template); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [the_template_value, the_template_bdim] = unwrapTensorAtLevel(the_template, cur_level); + auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor zero_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::zero::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_coalesced::call(self, coalesced); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, coalesced); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) { + return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [src_value, src_bdim] = unwrapTensorAtLevel(src, cur_level); + auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); + } + auto [scales_value, scales_bdim] = unwrapTensorAtLevel(scales, cur_level); + auto [zero_points_value, zero_points_bdim] = unwrapTensorAtLevel(zero_points, cur_level); + batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out); +} +template +void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::dequantize_tensors_out::call(tensors, out); + } + + batch_rule(tensors, out); +} +template +::std::tuple _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) { + return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [observer_on_value, observer_on_bdim] = unwrapTensorAtLevel(observer_on, cur_level); + auto [fake_quant_on_value, fake_quant_on_bdim] = unwrapTensorAtLevel(fake_quant_on, cur_level); + auto [running_min_value, running_min_bdim] = unwrapTensorAtLevel(running_min, cur_level); + auto [running_max_value, running_max_bdim] = unwrapTensorAtLevel(running_max, cur_level); + auto [scale_value, scale_bdim] = unwrapTensorAtLevel(scale, cur_level); + auto [zero_point_value, zero_point_bdim] = unwrapTensorAtLevel(zero_point, cur_level); + auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level)); +} +template +void lstm_mps_backward_out_generated_plumbing(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) { + return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + auto [z_state_value, z_state_bdim] = unwrapTensorAtLevel(z_state, cur_level); + auto [cell_state_fwd_value, cell_state_fwd_bdim] = unwrapTensorAtLevel(cell_state_fwd, cur_level); + auto [input_value, input_bdim] = unwrapTensorAtLevel(input, cur_level); + auto [layersOutputs_value, layersOutputs_bdim] = unwrapTensorAtLevel(layersOutputs, cur_level); + auto [out0_value, out0_bdim] = unwrapTensorAtLevel(out0, cur_level); + std::optional grad_y_value; + std::optional grad_y_bdim; + if (grad_y) { + std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level); + } + std::optional grad_hy_value; + std::optional grad_hy_bdim; + if (grad_hy) { + std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level); + } + std::optional grad_cy_value; + std::optional grad_cy_bdim; + if (grad_cy) { + std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level); + } + batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2); +} +template +at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set_source_Storage::call(self, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) { + return at::_ops::set_source_Tensor::call(self, source); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [source_value, source_bdim] = unwrapTensorAtLevel(source, cur_level); + auto results = batch_rule(self_value, self_bdim, source_value, source_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor set_generated_plumbing(const at::Tensor & self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::set::call(self); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_from::call(self, from, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, from, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random_to::call(self, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor random_generated_plumbing(const at::Tensor & self, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::random::call(self, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::uniform::call(self, from, to, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, from, to, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::cauchy::call(self, median, sigma, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, median, sigma, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::log_normal::call(self, mean, std, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, mean, std, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::exponential::call(self, lambd, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, lambd, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, ::std::optional generator) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::geometric::call(self, p, generator); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto results = batch_rule(self_value, self_bdim, p, generator); + return makeBatched(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + std::optional weight_value; + std::optional weight_bdim; + if (weight) { + std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level); + } + batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out); +} +template +void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out); + } + auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level); + auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level); + batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out); +} +template +::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale); + } + auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level); + auto [inv_scale_value, inv_scale_bdim] = unwrapTensorAtLevel(inv_scale, cur_level); + auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +::std::tuple _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + auto [self_value, self_bdim] = unwrapTensorAtLevel(self, cur_level); + auto [growth_tracker_value, growth_tracker_bdim] = unwrapTensorAtLevel(growth_tracker, cur_level); + auto [found_inf_value, found_inf_bdim] = unwrapTensorAtLevel(found_inf, cur_level); + auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval); + return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level)); +} +template +void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); + } + + batch_rule(self, other, alpha, out); +} +template +void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_add_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, alpha, out); +} +template +void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out); + } + + batch_rule(self, other, alpha, out); +} +template +void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_mul_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_mul_Tensor_out::call(self, other, out); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, out); +} +template +void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_div_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_div_Tensor_out::call(self, other, out); + } + auto [other_value, other_bdim] = unwrapTensorAtLevel(other, cur_level); + batch_rule(self, other_value, other_bdim, out); +} +template +void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out); + } + + batch_rule(self, scalar, out); +} +template +void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_List_out::call(self, other, out); + } + + batch_rule(self, other, out); +} +template +void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out); + } + + batch_rule(self, scalars, out); +} +template +void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out); + } + + batch_rule(self, tensor1, tensor2, value, out); +} +template +void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out); + } + + batch_rule(self, tensor1, tensor2, scalars, out); +} +template +void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out); +} +template +void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out); + } + + batch_rule(self, tensor1, tensor2, value, out); +} +template +void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out); + } + + batch_rule(self, tensor1, tensor2, scalars, out); +} +template +void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out); + } + auto [scalars_value, scalars_bdim] = unwrapTensorAtLevel(scalars, cur_level); + batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out); +} +template +void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_abs_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_acos_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_asin_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_atan_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_ceil_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_cos_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_cosh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_erf_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_erfc_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_exp_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_expm1_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_floor_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_frac_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out); + } + + batch_rule(self, tensors1, weights, out); +} +template +void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out); + } + + batch_rule(self, tensors1, weight, out); +} +template +void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_lgamma_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log10_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log1p_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_log2_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_max_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_max_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_neg_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, ::std::optional dtype, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_norm_Scalar_out::call(self, ord, dtype, out); + } + + batch_rule(self, ord, dtype, out); +} +template +void _foreach_pow_List_out_generated_plumbing(at::TensorList self, at::TensorList exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_List_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_pow_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_pow_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef exponent, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out); + } + + batch_rule(self, exponent, out); +} +template +void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_reciprocal_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_round_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sigmoid_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sign_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sign_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sin_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sinh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_sqrt_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_tan_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_tanh_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_trunc_out::call(self, out); + } + + batch_rule(self, out); +} +template +void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_zero_out::call(self, out); + } + + batch_rule(self, out); +} +template +::std::vector _foreach_zero_generated_plumbing(at::TensorList self) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level)) { + return at::_ops::_foreach_zero::call(self); + } + + auto results = batch_rule(self); + return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level); +} +template +void _foreach_copy_out_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_foreach_copy_out::call(self, src, non_blocking, out); + } + + batch_rule(self, src, non_blocking, out); +} +template +void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adam_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_adamw_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adamw_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adamw_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level)); +} +template +void _fused_sgd_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +void _fused_sgd_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector> _fused_sgd_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_sgd_tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf); + } + auto [lr_value, lr_bdim] = unwrapTensorAtLevel(lr, cur_level); + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level)); +} +template +void _fused_adagrad_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) { + return at::_ops::_fused_adagrad_out::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf, out); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out); +} +template +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector> _fused_adagrad_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList state_sums, at::TensorList state_steps, double lr, double lr_decay, double weight_decay, double eps, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf) { + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + vmap_check_escaped(maybe_layer, "gen_vmap_plumbing"); + int64_t cur_level = maybe_layer->layerId(); + if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(state_sums, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) { + return at::_ops::_fused_adagrad::call(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale, found_inf); + } + std::optional grad_scale_value; + std::optional grad_scale_bdim; + if (grad_scale) { + std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level); + } + std::optional found_inf_value; + std::optional found_inf_bdim; + if (found_inf) { + std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level); + } + auto results = batch_rule(self, grads, state_sums, state_steps, lr, lr_decay, weight_decay, eps, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim); + return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level)); +} + +}} // namespace at::functorch diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..829031f423eb2783837c74d0c1641108fa165ef5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/BoxedKernel.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct IValue; +using Stack = std::vector; + +class OperatorHandle; +class KernelFunction; + +// This kernel implements the behavior of falling through to the next available +// registered dispatch key. The implementation of this function is FAST; it is +// no overhead to fallthrough to the next key. See cpp file for some more +// implementation notes; notably, this does NOT actually go through the +// boxing/unboxing codepath. +TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [Ambiguity in AutogradOther kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This error-reporting kernel is registered to the AutogradOther entry in the +// dispatch table when there is both a CompositeImplicitAutograd kernel and a +// backend kernel for ANY backend that maps to AutogradOther. To see why +// this is necessary in the AutogradOther case, it's helpful to first see +// why everything works out fine for a backend that has a reserved Autograd +// entry (see rule 2.2 in [Note] DispatchTable computation): +// +// CPU AutogradCPU +// reg? registers with... +// ------------------------------------------------- +// y Autograd registration takes precedence +// over CompositeImplicitAutograd. +// This is good, because the CPU specific backend +// implementation is more specialized and typically better; +// if we used the composite, we would bypass it. +// (NB: the Autograd key is guaranteed to exist because +// the autograd codegen requires it!) +// +// n CompositeImplicitAutograd takes precedence. +// This is also good, because the Autograd +// registration (if it exists) would try to redispatch +// to the (non-existent) CPU implementation; by +// using the composite, we ensure the operator +// actually works. +// +// As you can see, when we have a specific Autograd key (AutogradCPU), we can +// decide whether or not to use the CompositeImplicitAutograd kernel or the +// Autograd kernel based on whether or not the backend kernel exists. +// +// However, for AutogradOther (which is the catchall autograd kernel for +// everything that doesn't have a specific Autograd key), we can't do this +// trick because there isn't any unique backend to peek at to disambiguate; +// if there are some backends that have implementations they prefer Autograd, +// but unimplemented backends would prefer CompositeImplicitAutograd. Rather +// than arbitrarily pick one or the other, we just register a kernel that raises +// an error and let the user decide how to proceed. +TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [named_not_supported_kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This kernel implements reporting an error message saying that named tensor is +// not supported. This kernel doesn't rely on the Stack, and so it is special +// cased in the dispatcher to be triggered before we attempt boxing (so we can +// give a good error message in cases when boxing is not supported). When +// boxing is universally supported this can be removed. +[[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +/** + * BoxedKernel is similar to a std::function storing a boxed kernel. + */ +class TORCH_API BoxedKernel final { +public: + // This is how boxed kernels are actually stored + // + // Note [Plumbing Keys Through The Dispatcher] + // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS) + // upon every dispatch call into order to compute which kernel to dispatch to. + // + // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores + // to have a first argument of type DispatchKeySet. + // + // What are the invariants of the DispatchKeySet when it gets passed to a kernel? + // - All keys to the left of the current dispatch key have been masked out. + // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer) + // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments + // are still in the set. + // + // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches: + // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will + // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher + // upon redispatching. + // + // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature + // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples. + // + // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + // This is the public API for how boxed kernels are defined + using BoxedKernelFunction = void(const OperatorHandle&, Stack*); + using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*); + + BoxedKernel(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function with boxed arguments. + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>(); + */ + template + static BoxedKernel makeFromFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static BoxedKernel makeFromFunction(); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique()); + */ + template + static BoxedKernel makeFromFunctor(std::unique_ptr kernelFunctor); + + + static BoxedKernel makeFallthrough(); + static BoxedKernel makeAmbiguousAutogradOther(); + static BoxedKernel makeNamedNotSupported(); + +private: + + friend class KernelFunction; + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + explicit BoxedKernel(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func); + + OperatorKernel* getFunctor() const; + InternalBoxedKernelFunction* getFnPtr() const; + + c10::intrusive_ptr functor_; + InternalBoxedKernelFunction* boxed_kernel_func_; +}; + +} // namespace c10 + +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..7b55c2323a2ff0a564d7f8618bb3a10fceb47d7a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction.h @@ -0,0 +1,260 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. + +class OperatorHandle; +struct OperatorKernel; +class KernelFunction; + +template +using has_symint = + std::disjunction< + std::is_same, + std::is_same, + std::is_same, + std::is_same, T> + >; + +template +struct remove_symint { + using type = T; +}; + +template <> +struct remove_symint { + using type = int64_t; +}; + +template <> +struct remove_symint { + using type = OptionalIntArrayRef; +}; + +template <> +struct remove_symint { + using type = c10::IntArrayRef; +}; + +template <> +struct remove_symint> { + using type = std::optional; +}; + + +template +struct maybe_keep_symint final {}; + +template +struct maybe_keep_symint { using type = T; }; + +template +struct maybe_keep_symint { using type = typename remove_symint::type; }; + +template +using fn_has_symint = typename guts::typelist::true_for_any_type< + has_symint, + typename guts::infer_function_traits::type::parameter_types +>; + +template +struct fn_remove_symint; + +template +struct fn_remove_symint { + using type = Ret(typename remove_symint::type...); +}; + +/** + * KernelFunction is similar to std::function but stores a kernel function. + * You can create a KernelFunction from a boxed or unboxed function/functor/lambda + * and call it in a boxed or unboxed way. If the way it was created doesn't + * match the way it was called, it will do boxing or unboxing as necessary. + */ +class TORCH_API KernelFunction final { +public: + using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction; + using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction; + using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys; + + KernelFunction(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValidUnboxed() const; + bool isValidSymUnboxed() const; + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function in a boxed way. + * If the kernel function was created with an unboxed function, + * this will call an unboxing wrapper which then calls into that + * unboxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.callBoxed(stack); + * + * Or, with an unboxed implementation: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.callBoxed(stack); + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Call the function in an unboxed way. + * If the kernel function was created with a boxed function, + * this will box all inputs and then call into that boxed function. + * + * Note that this doesn't work for all types yet. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.call(tensor1, true); + * + * Or, with a boxed implementation: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.call(tensor1, true); + */ + template + Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const; + + /** + * Create a KernelFunction from a BoxedKernel. + */ + static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn); + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>(); + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * Create a KernelFunction from an unboxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromUnboxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromBoxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from an unboxed function. + * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction + * because knowing the function pointer as a template argument (i.e. at + * compile time) allows the compiler to inline the function into its + * unboxing wrapper and yields better performance when calling the function. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedFunction(); + */ + template + static KernelFunction makeFromUnboxedFunction(FuncPtr); + + /** + * Create a KernelFunction from an unboxed function. + * KernelFunction::makeFromUnboxedFunction is usually a better choice than + * this if you know the function pointer at compile time, see doc comment + * there for an explanation. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func); + */ + template + static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func); + + static KernelFunction makeFallthrough(); + static KernelFunction makeAmbiguousAutogradOther(); + static KernelFunction makeNamedNotSupported(); + + /** + * Create a KernelFunction from an unboxed lambda. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + */ + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + + std::string dumpState() const; + // For testing internal invariants only + bool _equalsBoxedAndUnboxed(const KernelFunction&) const; + +private: + + explicit KernelFunction( + std::unique_ptr functor, + InternalBoxedKernelFunction* boxed_kernel_func, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + explicit KernelFunction( + BoxedKernel boxed_fn, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + + BoxedKernel boxed_kernel_func_; + void* unboxed_kernel_func_; + void* sym_unboxed_kernel_func_; +}; + +} + +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..8ba3049157d21ab2b0d9ed6143883e902d3acb1a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/KernelFunction_impl.h @@ -0,0 +1,229 @@ +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +inline KernelFunction::KernelFunction() + : boxed_kernel_func_() + , unboxed_kernel_func_(nullptr) + , sym_unboxed_kernel_func_(nullptr) +{} + +inline KernelFunction::KernelFunction(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(functor), boxed_kernel_func) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(boxed_fn)) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline bool KernelFunction::isValidUnboxed() const { + return unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValidSymUnboxed() const { + return sym_unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValid() const { + return boxed_kernel_func_.isValid(); +} + +inline bool KernelFunction::isFallthrough() const { + return boxed_kernel_func_.isFallthrough(); +} + +inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const { + boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack); +} + +template +inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) { + using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...); + ActualSignature* func = reinterpret_cast(unboxed_kernel_func); + return (*func)(functor, dispatchKeySet, std::forward(args)...); +} + +// This template requires you to explicitly specify the argument you want to +// forward; it doesn't work if you try to deduce it +// NB: keep this in sync with cloneWithRealTypes in function_schema.cpp + +template +inline typename remove_symint::type unpackSymInt(T x) { return x; } + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymInt x) { + return x.guard_int(__FILE__, __LINE__); +} + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymIntArrayRef x) { + return C10_AS_INTARRAYREF_SLOW(x); +} + +template <> +inline typename remove_symint>::type unpackSymInt(std::optional x) { + return x.has_value() ? std::make_optional(x->guard_int(__FILE__, __LINE__)) : std::nullopt; +} + +template <> +inline typename remove_symint::type unpackSymInt(at::OptionalSymIntArrayRef x) { + return x.has_value() ? std::make_optional(C10_AS_INTARRAYREF_SLOW(*x)) : std::nullopt; +} + +template +C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const { + // note: Args above is intentionally not Args&&. We don't want perfect + // forwarding, which would require Args to be deduced, but instead we + // want callers to explicitly specify the Args. + + if constexpr (std::disjunction_v...>) { + if (sym_unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + + if (unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction::type...>( + unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt(args)...); + } + } else { + if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + } + + return impl::BoxedKernelWrapper::call( + boxed_kernel_func_, + opHandle, + dispatchKeySet, + std::forward(args)... + ); +} + +inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) { + return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +inline KernelFunction KernelFunction::makeFallthrough() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFallthrough()); +} + +inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeAmbiguousAutogradOther()); +} + +inline KernelFunction KernelFunction::makeNamedNotSupported() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeNamedNotSupported()); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr kernelFunctor) { +#ifndef NDEBUG + // This assertion is costly for build time so it's debug-gated. + static_assert(guts::is_functor::value, "Tried to call KernelFunction::makeFromUnboxedFunctor but the argument is not a functor."); +#endif + static_assert(std::is_base_of::value, "Tried to call KernelFunction::makeFromUnboxedFunctor, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed::call; + void* void_unboxed_fn = reinterpret_cast(unboxed_fn); + bool is_symint = fn_has_symint::value; + return KernelFunction( + std::move(kernelFunctor), + &impl::make_boxed_from_unboxed_functor::call, + is_symint ? nullptr : void_unboxed_fn, + is_symint ? void_unboxed_fn : nullptr + ); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr kernelFunctor) { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) { + static_assert(is_compile_time_function_pointer::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr"); + +#if !defined(C10_MOBILE) + (void)func_ptr; // Suppress unused variable warning + return makeFromUnboxedFunctor::type>( + guts::make_unique_base::type>() + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr()); +#endif +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) { + static_assert(guts::is_function_type::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr"); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(func) + ); +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + +#if !defined(C10_MOBILE) + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + using FuncType = typename guts::infer_function_traits_t>::func_type; + return makeFromUnboxedRuntimeFunction(lambda); +#endif +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +} + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d7687cde3f74348ad1f73deacc2af1d03c8da9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + namespace detail { + template class WrapFunctionIntoFunctor_ {}; + template + class WrapFunctionIntoFunctor_> final : public c10::OperatorKernel { + public: + C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) { + return (*FuncPtr::func_ptr())(std::forward(args)...); + } + }; + } + + // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor. + // Since it is a compile time function pointer, many compilers can inline it + // into the wrapper and you don't get any performance overhead for wrapping. + template + struct WrapFunctionIntoFunctor final { + static_assert(c10::is_compile_time_function_pointer::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN."); + using type = detail::WrapFunctionIntoFunctor_< + FuncPtr, + typename guts::function_traits::return_type, + typename guts::function_traits::parameter_types + >; + }; +} + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..9cd647597d42d461431164ec76a16ccccc75063e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +namespace c10 { + +namespace impl { + namespace detail { + template class WrapFunctionIntoRuntimeFunctor_ {}; + template + class WrapFunctionIntoRuntimeFunctor_> final : public c10::OperatorKernel { + public: + template + explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func) + : kernel_func_(std::forward(kernel_func)) {} + + decltype(auto) operator()(Parameters... args) { + return kernel_func_(std::forward(args)...); + } + + private: + FuncType kernel_func_; + }; + } + + // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that + // inherits from c10::OperatorKernel, so it can be used as a c10 kernel. + // This can, for example, be used for lambdas, functors or even function pointers. + // In the case of function pointers, since it is a runtime function pointer, + // there is an overhead for calling it whenever the kernel is invoked. + template + using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_< + FuncType, + typename guts::infer_function_traits_t::return_type, + typename guts::infer_function_traits_t::parameter_types + >; +} + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h new file mode 100644 index 0000000000000000000000000000000000000000..e109b808ff0c265fa52e15b74a909fc3e510e5bd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h @@ -0,0 +1,395 @@ +#pragma once + +// This file contains boxing (not unboxing) logic, +// i.e. how to make a vector from a set of concrete arguments. + +#include +#include +#include + +#include + +#include +#include + +namespace c10 { +namespace impl { + +// +// utils +// + +// is_mutable_tensor_ref +template struct is_mutable_tensor_ref : std::false_type {}; +template <> struct is_mutable_tensor_ref : std::true_type {}; + +// is_tuple_of_mutable_tensor_refs +// +template +struct is_tuple_of_mutable_tensor_refs : std::false_type {}; + +template +struct is_tuple_of_mutable_tensor_refs::value, void>> +: guts::typelist::all> +{}; + +// has_ivalue_to tests the presence/absence of instance method IValue::to() +// +template +struct has_ivalue_to : std::false_type {}; + +template +struct ivalue_to_helper +{ + using type = decltype(std::declval().template to()); +}; +template +using ivalue_to_helper_t = typename ivalue_to_helper::type; + +template +struct has_ivalue_to>> +: std::true_type +{}; + +// +// boxing predicates +// + +// A boxable arg type is one that IValue has a constructor for. +template +using can_box = + std::disjunction< + std::is_constructible>, + // TensorOptions are not directly constructible into IValue, + // but torch::jit::push knows how to handle them + std::is_same> + >; + +template +using can_box_all = std::conjunction...>; + +// an unboxable result is one that can be extracted from an IValue +template +using can_unbox = + std::conjunction< + std::disjunction< + has_ivalue_to, + // void returns are ok + std::is_same + >, + std::negation> + >; + +// +// boxArgs - utility for pushing unboxed args onto IValue stack +// +template +torch::jit::Stack boxArgs(Args... args) { + // TODO Reuse stack vector instead of allocating? + torch::jit::Stack stack; + stack.reserve(sizeof...(Args)); + torch::jit::push(stack, std::forward(args)...); + return stack; +} + +template +inline constexpr size_t boxed_size_one() { + static_assert(!std::is_same, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference"); + return 1; +} + +// torch::jit::push pushes 4 values for a TensorOptions; this needs to +// be kept in sync. +template <> +inline constexpr size_t boxed_size_one() { + return 4; +} + +// NOTE: this could probably be simplified with C++17 fold expressions. +template +struct BoxedSize : std::integral_constant {}; +template +struct BoxedSize : std::integral_constant() + BoxedSize::value> {}; + +template +static inline constexpr size_t boxed_size() { + return BoxedSize::value; +} + +using IValueAlignedStorage = std::aligned_storage_t; + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) { + new (&dest[lastIdx]) IValue(arg); + lastIdx++; +} + +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) { + new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype())); + new (&dest[lastIdx++]) IValue(options.layout()); + new (&dest[lastIdx++]) IValue(options.device()); + new (&dest[lastIdx++]) IValue(options.pinned_memory()); +} + +inline void boxArgsToStack(IValueAlignedStorage*, int&) {} + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) { + boxToStack(dest, arg, lastIdx); + boxArgsToStack(dest, lastIdx, args...); +} + +// +// PopResult is a helper class whose specializations handle popping single and +// multiple return values, respectively. +// +template +struct PopResult final { + static Result call(Stack& stack) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return one value on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return std::move(stack[0]).to(); + } +}; + +template +struct PopResult> final { + using Result = std::tuple; + + static Result call(Stack& stack) { + // for tuple return types, boxed kernel has pushed multiple values onto the stack + constexpr int RetCount = sizeof...(Types); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return pop_to_tuple_impl(stack, std::make_index_sequence()); + } +private: + // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise. + // I'm sure there's an incantation that slips it past the parser but eh + template + static Result pop_to_tuple_impl(Stack& stack, std::index_sequence) { + return std::make_tuple((std::move(stack[indices]).to())...); + } +}; + +// +// BoxedKernelWrapper +// +// For a given function type FT, BoxedKernelWrapper implements +// a `call` method that +// - takes a boxed kernel and unboxed arguments as specified by FT, +// - calls `boxArgs` to box the arguments +// - calls the boxed kernel +// - unboxes and returns the result +// +// The partial specializations below handle various cases: in +// particular, not all types appearing in op signatures are supported, +// and ops returning references have nonstandard wrapper implementations. +// + +// 1. The base specialization of BoxedKernelWrapper should never be instantiated. +// A "no call method defined on BoxedKernelWrapper" compile error means that +// an op signature has failed to trigger any of the partial specializations +// that follow this one. +// +template +struct BoxedKernelWrapper { + // The reason we're not just doing straight up static_assert(false, ...) here: + // Basically, the way to make sure a static_assert only fires if a template + // is actually instantiated (rather than every time the file is parsed) is to use + // template parameters in the expression, e.g. FuncType here. However, since + // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same + // effect. + static_assert(sizeof(FuncType) != sizeof(FuncType), + "Function signature contains one or more unsupported parameter and/or return types. " + "Look for a nearby error like " + "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" " + "- (your function type) is the unsupported signature."); +}; + +// +// 2. Supported signatures, other than those involving non-const Tensor refs - +// i.e., "functional" ops. +// + +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && can_unbox::value && !is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + + if constexpr (!std::is_same_v) { + // op has pushed one or more values onto the stack. + return PopResult::call(stack); + } else { + // op returns void, boxed kernel has pushed nothing onto stack. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.empty(), + "Boxed kernel was expected to return no values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + } + } +}; + +// +// 3. in-place ops take a single non-const Tensor reference +// as their first argument, and return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// Because of this, the generated BoxedKernelWrapper specializations simply +// return the in-place argument. +// + +template +struct BoxedKernelWrapper< + at::Tensor&(at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, std::forward(otherArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 3.5. In-process migration to make in-place ops take and return +// const references instead. +template +struct BoxedKernelWrapper< + const at::Tensor&(const at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static const at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + const at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, otherArgs...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 4. out of place ops that take a single non-const Tensor reference as their +// final argument, and also return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return out arguments. +// +template +struct BoxedKernelWrapper< + at::Tensor&(FirstArg, RestArgs...), + std::enable_if_t< + can_box_all::value + // this skips over in-place kernels with a non-const Tensor + // arg at the front, so those can unambiguously trigger the preceding specialization. + && !is_mutable_tensor_ref::value, + void + > +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + FirstArg firstArg, RestArgs... restArgs + ) { + torch::jit::Stack stack = boxArgs(std::forward(firstArg), std::forward(restArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing restArgs after it has been forwarded here is ok because we know + // that the last element is of type `Tensor&`. + return std::get(std::tuple{restArgs...}); + } +}; + +// +// 5. out of place ops that take multiple non-const Tensor references as their +// final arguments, and return them in a std::tuple. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return the out arguments. +// +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + using ArgTuple = std::tuple; + constexpr int RetCount = std::tuple_size(); + + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing args after it has been forwarded here is ok because we know + // that the last RetCount elements are of type `Tensor&`. + auto result = guts::tuple_take(ArgTuple{std::forward(args)...}); + static_assert( + std::is_same::value, + "The parameter list of an op returning a tuple of Tensor references " + "must end with an equal number of Tensor reference parameters." + ); + return result; + } +}; + +} // impl +} // c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..729691c1cd825a7b8f51ccda6079dc80cc00cfc3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -0,0 +1,600 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +class OperatorHandle; + +/* + * [Note: Argument forwarding in the dispatcher] + * + * The dispatcher uses a somewhat unusual way to forward arguments through several layers of + * wrapper functions. This can be confusing because an experienced C++ programmer would look at this + * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.". + * It is not a bug. The common way in C++ to forward arguments is to use universal references: + * + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument. + * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a + * different reference type than was used in the kernel function. The correct reference type + * is dictated by the kernel signature and must be identical since we cast function pointers + * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines + * the reference type to use by looking at the explicitly supplied operator signature, not by looking at + * the argument we're calling it with. + * + * What does std::forward do, exactly? + * ------------------------------------ + * std::forward(t) is a way to cast t to the reference type supplied in T. + * Let's assume decay_t == U and T is either U or some reference of U. + * - std::forward(t) will return U&, no matter what kind of reference t is. + * - std::forward(t) will return U&&, no matter what kind of reference t is. + * - std::forward(t) will return U&& (not U!), no matter what kind of reference t is. + * + * For universal references, that means that in the following function + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be + * a non-reference U, and std::forward(t) will return U&&, correctly moving the argument. + * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only + * way to match the signature (in C++, a type that is (T&)&& will collapse to T&). + * That means std::forward(t) will return U& and the value will not be moved but passed on as + * a lvalue reference. + * + * How do we use that? + * ------------------------------------ + * But std::forward can also be used outside of the common "universal forwarding" pattern to change + * reference types. So instead of following the common C++ pattern, we notice what + * std::forward() actually does, and that is it takes a value and changes its reference to the + * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this + * to forward based on an explicitly specified reference type instead of the inferred argument type. + * + * This is why many of the dispatcher functions look like + * > template func(T t) { func2(std::forward(t)); } + * instead of the common + * > template func(T&& t) { func2(std::forward(t)); } + * + * and are expected to be called by explicitly specifying the template parameters in a way that matches + * the expected operator signature at each call site. + */ + +namespace impl { + // supported_primitive_arg_types defines which primitive types we allow in + // kernel functions as arguments or returns. + // Additionally, we support lists, dicts and optionals containing these types. + using supported_primitive_arg_types = guts::typelist::typelist< + int64_t, + double, + bool, + c10::string_view, + at::Tensor, + at::Scalar, + c10::QScheme, + c10::ScalarType, + c10::Device, + c10::DeviceIndex, + c10::Layout, + c10::MemoryFormat, + at::Dimname + >; + + // We have an unboxed functor in hand that takes C++ arguments, and + // we're building a boxed functor wrapper for it that takes IValues. + // So "outside" is boxed and "inside" is unboxed. + // + // So a valid input type is one that our boxed functor wrapper can + // unbox from an IValue into a C++ value. + // + // Whereas a valid output type is one that our wrapper can recieve + // as a C++ value from the unboxed functor, and box into an IValue. + + // + // assert_is_valid_input_type + // checks that T can be unboxed from an IValue into a C++ value. + // + + template + struct assert_is_valid_input_type { + assert_is_valid_input_type() { + if constexpr (guts::typelist::contains::value) { + /* everything is ok, this is a primitive type */ + } else { + /* otherwise this must be an instance of a valid custom class, since it can only + have been created via IValue(x), which ensures this. */ + } + } + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type {}; + + template + struct TypeCheckHelper; + + template + struct TypeCheckHelper {}; + + template + struct TypeCheckHelper + : TypeCheckHelper { + assert_is_valid_input_type check; + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : TypeCheckHelper {}; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported input type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: ArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: OptionalArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: std::array. Please use std::array instead."); + }; + + template + struct assert_is_valid_input_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string."); + }; + template + struct assert_is_valid_input_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_input_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: vector. Please use List instead."); + }; + template + struct assert_is_valid_input_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string."); + }; + template + struct assert_is_valid_input_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead."); + }; + + // TODO: it probably would be good to tighten this up quite a bit more with + // an explicit list for everything + + // + // assert_is_valid_output_type + // + + template + struct assert_is_valid_output_type { + assert_is_valid_output_type() { + if constexpr(guts::typelist::contains::value) { + /* everything is ok, this is a primitive type */ + } else { + /* otherwise T is verified to be a registered custom class in the IValue + constructor, so no benefit in double-checking here */ + } + } + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: Dict. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::vector. Please use List, List or Tensor instead."); + // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector. Please use List instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::array. Please use std::array instead."); + }; + + // The following specialisations of assert_is_valid_output_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. + template + struct assert_is_valid_output_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string."); + }; + template + struct assert_is_valid_output_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_output_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: vector. Please use List instead."); + }; + template + struct assert_is_valid_output_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string."); + }; + + // ivalue_to_arg + + template + struct decay_if_not_tensor final { + using type = std::decay_t; + }; + + template<> + struct decay_if_not_tensor final { + using type = at::Tensor&; + }; + + template<> + struct decay_if_not_tensor final { + using type = const at::Tensor&; + }; + + template + struct ivalue_to_arg final { + static decltype(auto) call(IValue& v) { + assert_is_valid_input_type(); + return std::move(v).to(); + } + }; + + // The following two specializations take advantage of specialized + // `toTensor()` overloads on IValue to avoid copying. + template + struct ivalue_to_arg final { + // We cannot use the default implementation if they asked for a + // `at::Tensor&` because it moves from the IValue, so it can't get + // an lvalue reference. + static at::Tensor& call(IValue& v) { + // Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + // We should not use the default implementation if they asked for + // a `const at::Tensor&` because it moves from the IValue and they + // didn't ask for that. + static const at::Tensor& call(IValue& v) { + // const Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + static List call(IValue& v) { + return v.toTensorList(); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is ArrayRef, convert the IValue to a std::vector and pass that + // to the operator. std::vector is implicitly convertible to ArrayRef. + static std::vector call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + template + struct ivalue_to_arg final { + static std::vector call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return r; + } else { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + } + }; + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + static OptionalArray call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return OptionalArray(std::move(r)); + } else { + return std::move(v).to>(); + } + } + }; + template + struct ivalue_to_arg>, AllowDeprecatedTypes> final { + // If an argument is std::optional>, convert the IValue to an std::optional> and pass that + // to the operator. OptionalArray is basically a std::optional> but implicitly convertible + // to std::optional>. + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is OptionalArrayRef, convert the IValue to an + // std::optional> and pass that to the operator. OptionalArray + // is basically a std::optional> but implicitly convertible to + // OptionalArrayRef + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + // return_to_ivalue + template + struct return_to_ivalue final {}; + + template + struct return_to_ivalue::value>> final { + static IValue call(T&& v) { + assert_is_valid_output_type(); + return c10::ivalue::from(std::move(v)); + } + static IValue copy(const T& v) { + assert_is_valid_output_type(); + return IValue(v); + } + }; + + // Special case to allow kernels to return `Tensor&`. + // TODO Delete this once kernels don't do that anymore + template + struct return_to_ivalue final { + static IValue call(at::Tensor& v) { + return c10::ivalue::from(v); + } + static IValue copy(at::Tensor& v) { + return IValue(v); + } + }; + + // wrap_kernel_functor_unboxed_ + + template + struct wrap_kernel_functor_unboxed_ final {}; + + // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet + // This includes kernels with 0 arguments. + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // Note [Plumbing Keys Through The Dispatcher 2] + // See Note [Plumbing Keys Through The Dispatcher] for the background. + // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel. + // + // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type + // DispatchKeySet. + // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention + // of the dispatcher from the calling convention of manually written kernels. + return (*functor_)(std::forward(args)...); + } + }; + + // This specialization is for kernels with a first argument of type DispatchKeySet + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + return (*functor_)(dispatchKeySet, std::forward(args)...); + } + }; + + template + using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_::func_type>; + + // call_functor_with_args_from_stack + + template + std::decay_t::return_type> + call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence, guts::typelist::typelist*) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning. + + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + return wrap_kernel_functor_unboxed::call(functor, dispatchKeySet, + ivalue_to_arg::type, AllowDeprecatedTypes>::call( + torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices)) + )...); + } + + template + std::decay_t::return_type> + call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) { + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr size_t num_ivalue_args = guts::typelist::size::value; + return call_functor_with_args_from_stack_(functor, dispatchKeySet, stack, std::make_index_sequence(), static_cast(nullptr)); + } + + // push_outputs + + template + struct push_outputs final { + // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here + // to avoid one extra call to the move constructor in this case. This is still not a + // universal reference though because OutputType is an explicitly specified class + // template parameter. + static void call(OutputType&& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(output))); + } + static void copy(const OutputType& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::copy(output)); + } + }; + template + struct push_outputs, AllowDeprecatedTypes> final { + static void call(std::tuple&& output, Stack* stack) { + call_(std::move(output), stack, std::make_index_sequence()); + } + static void copy(const std::tuple& output, Stack* stack) { + copy_(output, stack, std::make_index_sequence()); + } + + private: + template + static void call_(std::tuple&& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(std::get(output)))...); + } + template + static void copy_(const std::tuple& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::copy(std::get(output))...); + } + }; + template + struct push_outputs final { + static void call(int /*dummy*/, Stack* /*stack*/) { + } + static void copy(int /*dummy*/, Stack* /*stack*/) { + } + }; + + // make_boxed_from_unboxed_functor + + template + struct make_boxed_from_unboxed_functor final { + static_assert(std::is_base_of::value, + "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) { + using ReturnType = typename guts::infer_function_traits_t::return_type; + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr bool has_outputs = !std::is_same::value; + constexpr size_t num_inputs = guts::typelist::size::value; + if constexpr (has_outputs) { + // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value + // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`. + // [Note: VC++ and 'std': ambiguous symbol] + using ReturnType_ = ::std::decay_t; + ReturnType_ output = call_functor_with_args_from_stack(functor, dispatchKeySet, stack); + torch::jit::drop(*stack, num_inputs); + // See note [ VC++ and 'std': ambiguous symbol] + push_outputs::call(::std::move(output), stack); + } else { + call_functor_with_args_from_stack(functor, dispatchKeySet, stack); + torch::jit::drop(*stack, num_inputs); + } + } + }; +} // namespace impl + +} // namespace c10 + +namespace torch { + using OperatorKernel = c10::OperatorKernel; +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..93b11dc853f00f2ac06ebfd361b6ee02986cfd1f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +template +inline std::vector makeStack(Inputs&&... inputs) { + return {std::forward(inputs)...}; +} + +inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) { + auto* allocator = c10::GetCPUAllocator(); + int64_t nelements = 1; + auto dtype = caffe2::TypeMeta::Make(); + int64_t size_bytes = nelements * dtype.itemsize(); + auto storage_impl = c10::make_intrusive( + c10::StorageImpl::use_byte_size_t(), + size_bytes, + allocator->allocate(size_bytes), + allocator, + /*resizable=*/true); + at::Tensor t = at::detail::make_tensor(storage_impl, ks, dtype); + // TODO: We add this to simulate the ideal case where we only have Autograd backend keys + // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl + // constructor by default. + if (!requires_grad) { + t.unsafeGetTensorImpl()->remove_autograd_key(); + } + return t; +} + +inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) { + return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad); +} + +template +inline std::vector callOp(const c10::OperatorHandle& op, Args... args) { + auto stack = makeStack(std::forward(args)...); + op.callBoxed(&stack); + return stack; +} + +template +inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) { + return op.typed().call(std::forward(args)...); +} + +template +inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) { + return op.typed().callWithDispatchKey(dispatchKey, std::forward(args)...); +} + +template +inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) { + return op.typed().redispatch(ks, std::forward(args)...); +} + +inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_ANY_THROW( + callOp(*op, dummyTensor(dispatch_key), 5); + ); +} + +inline void expectDoesntFindOperator(const char* op_name) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_FALSE(op.has_value()); +} + +template +inline void expectThrows(Functor&& functor, const char* expectMessageContains) { + try { + std::forward(functor)(); + } catch (const Exception& e) { + EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains)); + return; + } + ADD_FAILURE() << "Expected to throw exception containing \"" + << expectMessageContains << "\" but didn't throw"; +} + +template +void expectListEquals(c10::ArrayRef expected, std::array actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::ArrayRef actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::List actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual.get(i)); + } +} + +template +void expectListEquals(c10::ArrayRef expected, std::vector actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +// NB: This is not really sound, but all of the type sets constructed here +// are singletons so it's fine +static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) { + return legacyExtractDispatchKey(t.key_set()); +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h new file mode 100644 index 0000000000000000000000000000000000000000..9aef3a0f62cf52a0455908cfb59503e330c5e0c8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct BuiltinOpFunction : public Function { + BuiltinOpFunction( + c10::QualifiedName qualname, + c10::FunctionSchema schema, + std::function callable, + std::string doc_string = "") + : name_(std::move(qualname)), + callable_(std::move(callable)), + schema_(std::move(schema)), + doc_string_(std::move(doc_string)) { + TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1); + } + + c10::string_view doc_string() const override { + return doc_string_; + } + + void run(Stack& stack) override { + callable_(stack); + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher /* not used */) override { + run(stack); + auto res = c10::make_intrusive(stack.front().type()); + res->markCompleted(std::move(stack.front())); + return res; + } + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override { + // nop + } + + const c10::FunctionSchema& getSchema() const override { + return schema_; + } + + size_t num_inputs() const override { + return schema_.arguments().size(); + } + + Function& setSchema(c10::FunctionSchema schema) override { + schema_ = std::move(schema); + return *this; + } + + bool call( + Stack& stack, + std::optional, + c10::function_ref) override { + run(stack); + return false; + } + + bool call(Stack& stack, c10::function_ref) + override { + run(stack); + return false; + } + + ~BuiltinOpFunction() override = default; + + private: + c10::QualifiedName name_; + + std::function callable_; + + c10::FunctionSchema schema_; + + std::string doc_string_; +}; + +} // namespace torch::jit diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..ff9bda981b2906e55449e93a582266888c2eb258 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +struct ClassType; +using ClassTypePtr = std::shared_ptr; + +TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex); + +template +const c10::ClassTypePtr& getCustomClassType() { + // Classes are never unregistered from getCustomClassTypeMap and the + // hash lookup can be a hot path, so just cache. + // For the same reason, it's fine If this ends up getting duplicated across + // DSO boundaries for whatever reason. + static c10::ClassTypePtr cache = getCustomClassTypeImpl( + std::type_index(typeid(T))); + return cache; +} + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h new file mode 100644 index 0000000000000000000000000000000000000000..0a152a60d923d1753ba6a2f373afad34d28aba02 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +// A CppSignature object holds RTTI information about a C++ function signature at runtime +// and can compare them or get a debug-printable name. +class TORCH_API CppSignature final { +public: + CppSignature(const CppSignature&) = default; + CppSignature(CppSignature&&) noexcept = default; + CppSignature& operator=(const CppSignature&) = default; + CppSignature& operator=(CppSignature&&) noexcept = default; + + template + static CppSignature make() { + // Normalize functors, lambdas, function pointers, etc. into the plain function type + // The first argument of the schema might be of type DispatchKeySet, in which case we remove it. + // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered + // with different calling conventions. + // See Note [Plumbing Keys Through The Dispatcher] + using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func>::func_type; + + return CppSignature(std::type_index(typeid(decayed_function_type))); + } + + std::string name() const { + return c10::demangle(signature_.name()); + } + + friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) { + if (lhs.signature_ == rhs.signature_) { + return true; + } + // Without RTLD_GLOBAL, the type_index comparison could yield false because + // they point to different instances of the RTTI data, but the types would + // still be the same. Let's check for that case too. + // Note that there still is a case where this might not work, i.e. when + // linking libraries of different compilers together, they might have + // different ways to serialize a type name. That, together with a missing + // RTLD_GLOBAL, would still fail this. + if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) { + return true; + } + + return false; + } + +private: + explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {} + std::type_index signature_; +}; + +inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) { + return !(lhs == rhs ); +} + +} +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h new file mode 100644 index 0000000000000000000000000000000000000000..b1746ec20a8a8c8ec5045ac1cdc25f995e7bc2f6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h @@ -0,0 +1,242 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace impl { + +// Take a DispatchKeySet for a Tensor and determine what the actual dispatch +// DispatchKey should be, taking into account TLS, and skipping backends which +// fall through. +// +// Unlike Tensor::key_set(), the value of this on a tensor can change depending +// on TLS. +// +// NB: If there is no valid dispatch key, this will return Undefined +inline DispatchKeySet computeDispatchKeySet( + DispatchKeySet ks, + // The key mask lets us eliminate (by zero entries) keys which should not + // be considered for dispatch. There are two cases when we use this: + // + // - If an operator's dispatch table contains a fallthrough entry, we + // should bypass it entirely when finding the key + // - If a user invokes with redispatch, the mask lets us + // zero out the key the user asked us to stop. + // + // These excluded backends are NOT tracked in the TLS, but must be applied + // AFTER TLS (since the backend may have been introduced for consideration + // by the included TLS), which is why you have to pass them in to this + // function (as opposed to just applying it to the input 'ks'). + DispatchKeySet key_mask +) { + c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set(); + // TODO: It's a bit irritating that we have to do logical ORs here, it would + // be nice to only do one. Can always_included be folded into the TLS? Well, + // it's a bit troublesome, because fastpath TLS access requires the type of + // the TLS in question to be zero-initialized, so you don't actually win + // anything in that case. + return (((ks | local.included_) - local.excluded_) & key_mask); +} + +} + +namespace detail { + // A small gadget to extract the DispatchKeySet from types which are known + // to have it. Used to extract dispatch keys from unboxed calls. + struct MultiDispatchKeySet : at::IterArgs { + DispatchKeySet ts; + void operator()(const at::Tensor& x) { + ts = ts | x.key_set(); + } + void operator()(const std::optional& x) { + if (x.has_value()) { + ts = ts | x->key_set(); + } + } + void operator()(at::ArrayRef xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + // Tensor?[] translates to this case. + void operator()(const c10::List>& xs) { + for (std::optional x : xs) { + if (x.has_value()) { + ts = ts | x.value().key_set(); + } + } + } + // Structured Tensor[] translates to this case + void operator()(const at::ITensorListRef& xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + [[noreturn]] void operator()(at::ArrayRef>) { + // Just checking that the handling of Tensor?[] didn't change. + TORCH_INTERNAL_ASSERT(false); + } + void operator()(const at::Generator& gen) { + if (gen.defined()) { + ts = ts | gen.key_set(); + } + } + void operator()(const std::optional& gen) { + if (gen.has_value() && gen->defined()) { + ts = ts | gen->key_set(); + } + } + template + void operator()(const T&) { + // do nothing + } + }; + + // NB: take by const reference (Don't do universal forwarding here! You + // don't want to move into this function!) + template + DispatchKeySet multi_dispatch_key_set(const Args&... args) { + return MultiDispatchKeySet().apply(args...).ts; + } +} + +/** + * An instance of DispatchKeyExtractor knows how to get a dispatch key given + * a list of arguments for an operator call. + * + * The instance is specific for a certain operator as: + * - In boxed dispatch, different operators have different ways to extract + * the dispatch key (e.g. different numbers of arguments), and we precompute + * the stack locations we should look at; and + * - In all dispatch, some backends should be excluded from dispatch because + * they have been registered as fallthrough. The set of excluded backends + * varies from operator, as some operators may have overridden the + * fallthrough with custom behavior. + * + * Note - this should maintain identical impl to the py dispatcher key extraction logic + * at pytorch/torch/dispatcher.py + */ +struct TORCH_API DispatchKeyExtractor final { +public: + static DispatchKeyExtractor make(const FunctionSchema& schema) { + return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema)); + } + + static DispatchKeyExtractor makeUninitialized() { + return DispatchKeyExtractor(c10::utils::bitset()); + } + + void registerSchema(const FunctionSchema& schema) { + TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset()); + dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema); + } + void deregisterSchema() { + dispatch_arg_indices_reverse_ = c10::utils::bitset(); + } + + DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const { + DispatchKeySet ks; + dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) { + const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1); + if (C10_LIKELY(ivalue.isTensor())) { + // NB: Take care not to introduce a refcount bump (there's + // no safe toTensorRef method, alas) + ks = ks | ivalue.unsafeToTensorImpl()->key_set(); + } else if (C10_UNLIKELY(ivalue.isTensorList())) { + for (const at::Tensor& tensor : ivalue.toTensorList()) { + ks = ks | tensor.key_set(); + } + } + // Tensor?[] translates to a c10::List so we need to peek inside + else if (C10_UNLIKELY(ivalue.isList())) { + for (const auto& elt : ivalue.toListRef()) { + if (elt.isTensor()) { + ks = ks | elt.toTensor().key_set(); + } + } + } + }); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + template + DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const { + auto ks = detail::multi_dispatch_key_set(args...); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough); + + std::string dumpState() const; + void checkInvariants(const FunctionSchema& schema) const; + +private: + static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) { + TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(), + "The function schema has ", schema.arguments().size(), + " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS()); + c10::utils::bitset dispatch_arg_indices_reverse; + for (const auto index : c10::irange(schema.arguments().size())) { + if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofOptionalTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *OptionalType::ofTensor())) { + dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index); + } + } + return dispatch_arg_indices_reverse; + } + + explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse) + : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse) + , nonFallthroughKeys_(DispatchKeySet::FULL) + , requiresBitsetPerBackend_(false) { + for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) { + nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL; + } + } + + // this is a bitset that has ones for each argument index which has to be + // considered for dispatch. This avoids having to iterate over the stack + // to find all the tensors. The bits are stored in reverse order, i.e. + // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from + // the top of the stack (i.e. the i-th last argument of the function) + // is relevant for dispatch. + // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the + // fallthrough + c10::utils::bitset dispatch_arg_indices_reverse_; + + // Set of functionality keys for which the operator does NOT have fallthrough kernel. + DispatchKeySet nonFallthroughKeys_; + // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND. + // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends. + std::array nonFallthroughKeysPerBackend_; + // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path), + // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_ + bool requiresBitsetPerBackend_; +}; + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h new file mode 100644 index 0000000000000000000000000000000000000000..d863039b56f5868579e498d348d06bca8997a819 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h @@ -0,0 +1,793 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef NDEBUG +#include +#endif + +namespace c10 { + +TORCH_API bool show_dispatch_trace(); +TORCH_API void dispatch_trace_nesting_incr(); +TORCH_API void dispatch_trace_nesting_decr(); +TORCH_API int64_t dispatch_trace_nesting_value(); + +struct DispatchTraceNestingGuard { + DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); } + ~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); } +}; + +class TORCH_API OperatorHandle; +template class TypedOperatorHandle; + +/** + * Implement this interface and register your instance with the dispatcher + * to get notified when operators are registered or deregistered with + * the dispatcher. + * + * NB: registration events only occur when a 'def' occurs; we don't trigger + * on 'impl' or 'fallback' calls. + */ +class TORCH_API OpRegistrationListener { +public: + virtual ~OpRegistrationListener(); + + virtual void onOperatorRegistered(const OperatorHandle& op) = 0; + virtual void onOperatorDeregistered(const OperatorHandle& op) = 0; +}; + +namespace detail { +class RegistrationListenerList; +} +class SchemaRegistrationHandleRAII; + +/** + * Top-level dispatch interface for dispatching via the dynamic dispatcher. + * Most end users shouldn't use this directly; if you're trying to register + * ops look in op_registration + */ +class TORCH_API Dispatcher final { +private: + // For direct access to backend fallback information + friend class impl::OperatorEntry; + + struct OperatorDef final { + explicit OperatorDef(OperatorName&& op_name) + : op(std::move(op_name)) {} + + impl::OperatorEntry op; + + // These refer to the number of outstanding RegistrationHandleRAII + // for this operator. def_count reflects only def() registrations + // (in the new world, this should only ever be 1, but old style + // registrations may register the schema multiple times, which + // will increase this count). def_and_impl_count reflects the number + // of combined def() and impl() registrations. When the last def() gets + // unregistered, we must immediately call the Deregistered listeners, but we + // must not actually delete the handle as there are other outstanding RAII + // destructors which will try to destruct and they had better still have a + // working operator handle in this case + size_t def_count = 0; + size_t def_and_impl_count = 0; + }; + friend class OperatorHandle; + template friend class TypedOperatorHandle; + + struct Guard final { + Guard() : alive(true), mutex() {} + std::atomic alive; + std::mutex mutex; + }; + +public: + ~Dispatcher(); + + // Implementation note: this class abstracts over the fact that we have per-operator + // dispatch tables. This could be easily adjusted to have a single global hash + // table. + static Dispatcher& realSingleton(); + + C10_ALWAYS_INLINE static Dispatcher& singleton() { +#if !defined C10_MOBILE + // Implemented inline so that steady-state code needn't incur + // function-call overhead. We can't just inline `realSingleton` + // because the function-local static would get duplicated across + // all DSOs that include & use this header, leading to multiple + // singleton instances. + static Dispatcher& s = realSingleton(); + return s; +#else + // For C10_MOBILE, we should never inline a static function that + // has a static member, since the generated code calls + // __cxa_guard_acquire and __cxa_guard_release which help + // implement exactly once semantics for the initialization of the + // static Dispatcher& s above (for the non-mobile case). That + // additional code when duplicated across all operator stubs + // for every backend results in a lot of additional code + // being generated by the compiler. + return realSingleton(); +#endif + } + + // ------------------------------------------------------------------------ + // + // Accessing operators by schema + // + // ------------------------------------------------------------------------ + + /** + * Looks for an operator schema with the given name and overload name + * and returns it if it is registered WITH A SCHEMA. + * Returns nullopt otherwise. + */ + std::optional findSchema(const OperatorName& operator_name); + + /** + * Variant of findSchema that results in less code generated at the call site. + * It (1) takes const char* pointer rather than OperatorName (so we skip + * generating std::string constructor calls at the call site), and (2) + * it raises an exception if the operator is not found (so we skip + * generating exception raising code at the call site) + * + * Irritatingly, we still have to generate the handful of instructions + * for dealing with an exception being thrown during static initialization + * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we + * could avoid this code too, but as the name of the function suggests, + * it does throw exceptions. + */ + OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name); + + // Like findSchema, but also returns OperatorHandle even if there is no schema + std::optional findOp(const OperatorName& operator_name); + + // Returns a list of all operator names present in the operatorLookupTable_ + const std::vector getAllOpNames(); + + // ------------------------------------------------------------------------ + // + // Invoking operators + // + // ------------------------------------------------------------------------ + + template + Return call(const TypedOperatorHandle& op, Args... args) const; + + + template + static Return callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args); + + // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation. + // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set. + // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key. + // See Note [Plumbing Keys Through The Dispatcher] + template + Return redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const; + + // Invoke an operator via the boxed calling convention using an IValue stack + void callBoxed(const OperatorHandle& op, Stack* stack) const; + void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const; + + // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + // See Note [Plumbing Keys Through The Dispatcher] + void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const; + + bool hasBackendFallbackForDispatchKey(DispatchKey dk) { + auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk); + if (dispatch_ix < 0) return false; + return backendFallbackKernels_[dispatch_ix].kernel.isValid(); + } + + // Used by torchdeploy/multipy for multiple interpreters racing. + void waitForDef(const FunctionSchema& schema); + void waitForImpl(const OperatorName& op_name, std::optional dispatch_key); + + // ------------------------------------------------------------------------ + // + // Performing registrations (NON user public; use op_registration) + // + // ------------------------------------------------------------------------ + + /** + * Register a new operator schema. + * + * If a schema with the same operator name and overload name already exists, + * this function will check that both schemas are exactly identical. + */ + RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector tags = {}); + + /** + * Register a kernel to the dispatch table for an operator. + * If dispatch_key is nullopt, then this registers a fallback kernel. + * + * @return A RAII object that manages the lifetime of the registration. + * Once that object is destructed, the kernel will be deregistered. + */ + // NB: steals the inferred function schema, as we may need to hold on to + // it for a bit until the real schema turns up + RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional dispatch_key, KernelFunction kernel, std::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug); + + /** + * Given an operator, tells the Dispatcher that we have implemented a fake impl + * for this op in the given Python module. Call this a "pystub". + */ + RegistrationHandleRAII registerPythonModule(const OperatorName& op_name, const char* pymodule, const char* context); + + /** + * Given an operator, throws if we have a pystub. + */ + void throwIfHasPythonModule(OperatorName op_name); + + std::optional> getPyStub(OperatorName op_name); + + /** + * Register a new operator by name. + */ + RegistrationHandleRAII registerName(OperatorName op_name); + + /** + * Register a fallback kernel for a backend. + * If an operator is called but there is no concrete kernel for the dispatch + * key of the given operator arguments, it will check if there is such a + * fallback kernel for the given dispatch key and, if yes, call that one. + */ + RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug); + + /** + * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend + * API. These invocations are only permitted once per program, so we raise + * an error if this is called again for the same namespace. + */ + RegistrationHandleRAII registerLibrary(std::string ns, std::string debug); + + // ------------------------------------------------------------------------ + // + // Listeners on registrations + // + // ------------------------------------------------------------------------ + + /** + * Add a listener that gets called whenever a new op is registered or an existing + * op is deregistered. Immediately after registering, this listener gets called + * for all previously registered ops, so it can be used to keep track of ops + * registered with this dispatcher. + */ + RegistrationHandleRAII addRegistrationListener(std::unique_ptr listener); + + void checkInvariants() const; + + // + // ------------------------------------------------------------------------ + // + // Assertions + // + // ------------------------------------------------------------------------ + + /** + * For testing purposes. + * Returns a list of all operators that were created through calls to registerImpl(), + * without any corresponding calls to registerDef(). After static initialization + * is done this is almost certainly a bug, as the created OperatorHandle won't have + * any schema associated with it and users calling the op through the dispatcher + * won't be able to access it + * + * Note that we cannot enforce this invariant "as we go" during static initialization, + * due to undefined static initialization order- we have no guarantees over the order + * in which .def() and .impl() calls are registered in the dispatcher at static + * initialization time. So this function should only be called after static initialization. + */ + std::vector findDanglingImpls() const; + + /** + * Useful for inspecting global Dispatcher registration state. + * Returns the names of all operators with a kernel registered for the specified DispatchKey. + * If no DispatchKey is specified, it returns all registered operators. + */ + std::vector getRegistrationsForDispatchKey(std::optional k) const; + +private: + Dispatcher(); + + static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef args); + + #ifdef FBCODE_CAFFE2 + static bool profilingOperatorEvents(); + static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref); + static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref); + #endif // FBCODE_CAFFE2 + + OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema); + OperatorHandle findOrRegisterName_(const OperatorName& op_name); + + void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterImpl_( + const OperatorHandle& op, + const OperatorName& op_name, + std::optional dispatch_key, + impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle); + void deregisterName_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterFallback_(DispatchKey dispatchKey); + void deregisterLibrary_(const std::string& ns); + void cleanup(const OperatorHandle& op, const OperatorName& op_name); + void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug); + + std::list operators_; +#if !defined(C10_MOBILE) + LeftRight> operatorLookupTable_; +#else + RWSafeLeftRightWrapper> operatorLookupTable_; +#endif + // Map from namespace to debug string (saying, e.g., where the library was defined) + ska::flat_hash_map libraries_; + + std::array backendFallbackKernels_; + + std::unique_ptr listeners_; + + // This condition variable gets notified whenever we add a new def/impl to the + // dispatch table. This is primarily used by multipy/torchdeploy, when + // we have multiple interpreters trying to register to the dispatch table. + // In this situation, whenever the non-primary interpreter would have tried + // to register to the dispatch table, instead it will check to see if the + // expected registration has already been made, and if it hasn't, wait on + // this condition variable to see if it was just racing with the primary + // interpreter. + // + // We expect it to be rare for there to be any waiters on this condition + // variable. This is mostly just to help give better diagnostics if + // something goes horribly wrong + std::condition_variable cond_var_; + + // Protect concurrent access to the dispatcher. We store this in a + // `shared_ptr` as we return callbacks that call back into dispatcher methods, + // and we need to be able to handle and guard against the event when the + // `Dispatcher` has been destroyed before the callbacks fire. + std::shared_ptr guard_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * This handle can be used to register kernels with the dispatcher or + * to lookup a kernel for a certain set of arguments. + */ +class TORCH_API OperatorHandle { + template friend struct std::hash; + +public: + OperatorHandle(OperatorHandle&&) noexcept = default; + OperatorHandle& operator=(OperatorHandle&&) noexcept = default; + OperatorHandle(const OperatorHandle&) = default; + OperatorHandle& operator=(const OperatorHandle&) = default; + // NOLINTNEXTLINE(performance-trivially-destructible) + ~OperatorHandle(); + + const OperatorName& operator_name() const { + return operatorDef_->op.operator_name(); + } + + bool hasSchema() const { + return operatorDef_->op.hasSchema(); + } + + const FunctionSchema& schema() const { + return operatorDef_->op.schema(); + } + + const std::string& debug() const { + return operatorDef_->op.debug(); + } + + std::string dumpState() const { + return operatorDef_->op.dumpState(); + } + + bool hasKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasKernelForDispatchKey(k); + } + + bool isKernelFallthroughKernel(DispatchKey k) const { + return operatorDef_->op.kernelForDispatchKey(k).isFallthrough(); + } + + bool hasKernelForAnyDispatchKey(DispatchKeySet k) const { + return operatorDef_->op.hasKernelForAnyDispatchKey(k); + } + + bool hasComputedKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasComputedKernelForDispatchKey(k); + } + + std::string dumpComputedTable() const { + return operatorDef_->op.dumpComputedTable(); + } + + void checkInvariants() const { + return operatorDef_->op.checkInvariants(); + } + + c10::ArrayRef getTags() const { + return operatorDef_->op.getTags(); + } + + void setReportErrorCallback_(std::unique_ptr callback) { + operatorDef_->op.setReportErrorCallback_(std::move(callback)); + } + + bool hasTag(const at::Tag& tag) const { + for(const auto& tag_: getTags()) { + if (tag == tag_) { + return true; + } + } + return false; + } + + template + TypedOperatorHandle typed() const { + // NB: This assert is not 100% sound: you can retrieve a typed() operator + // handle prior to ANY C++ signature being registered on the operator + // and the check will say everything is OK (at which point you can then + // smuggle in a kernel that is typed incorrectly). For everything + // in core library this won't happen, because all the static registrations + // will be done by the time a typed() handle is acquired. +#if !defined C10_MOBILE + operatorDef_->op.assertSignatureIsCorrect(); + if (fn_has_symint::value) { + operatorDef_->op.assertSignatureIsCorrect::type>(); + } +#endif + return TypedOperatorHandle(operatorIterator_); + } + + void callBoxed(Stack* stack) const { + c10::Dispatcher::singleton().callBoxed(*this, stack); + } + + void callBoxed(Stack& stack) const { + callBoxed(&stack); + } + + void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const { + c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack); + } + + void redispatchBoxed(DispatchKeySet ks, Stack* stack) const { + c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack); + } + + template + PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const { + return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor); + } + + bool operator==(const OperatorHandle& other) const { + return operatorDef_ == other.operatorDef_; + } + + bool operator!=(const OperatorHandle& other) const { + return operatorDef_ != other.operatorDef_; + } + +private: + explicit OperatorHandle(std::list::iterator operatorIterator) + : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {} + friend class Dispatcher; + template friend class TypedOperatorHandle; + + // Storing a direct pointer to the OperatorDef even though we + // already have the iterator saves an instruction in the critical + // dispatch path. The iterator is effectively a + // pointer-to-std::list-node, and (at least in libstdc++'s + // implementation) the element is at an offset 16 bytes from that, + // because the prev/next pointers come first in the list node + // struct. So, an add instruction would be necessary to convert from the + // iterator to an OperatorDef*. + Dispatcher::OperatorDef* operatorDef_; + + // We need to store this iterator in order to make + // Dispatcher::cleanup() fast -- it runs a lot on program + // termination (and presuambly library unloading). + std::list::iterator operatorIterator_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * It holds the same information as an OperatorHandle, but it is templated + * on the operator arguments and allows calling the operator in an + * unboxed way. + */ +template +class TypedOperatorHandle final { + static_assert(guts::false_t(), "FuncType in OperatorHandle::typed was not a valid function type"); +}; +template +class TypedOperatorHandle final : public OperatorHandle { +public: + TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle(const TypedOperatorHandle&) = default; + TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default; + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return call(Args... args) const { + return c10::Dispatcher::singleton().call(*this, std::forward(args)...); + } + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const { + return c10::Dispatcher::singleton().redispatch(*this, currentDispatchKeySet, std::forward(args)...); + } + +private: + explicit TypedOperatorHandle(std::list::iterator operatorIterator) + : OperatorHandle(operatorIterator) {} + friend class OperatorHandle; +}; + +namespace detail { +template inline void unused_arg_(const Args&...) {} + +// CaptureKernelCall is intended to capture return values from Dispatcher +// unboxed kernel calls. A record function may request to get outputs from the +// kernel calls. For boxed kernels, it's straightforward, the returned values +// are in the stack object. The stack can be passed to record functions. For +// unboxed kernels, we need to handle different kinds of return values, cache +// them temporarily, then release the values for the actual function call +// return. +template +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) + // Calls the kernel and capture the result in output_. + : output_{kernel.template call( + op, + dispatchKeySet, + std::forward(args)...)} {} + // Wraps the return values in a Stack. + Stack getOutputs() { + Stack stack; + impl::push_outputs::copy(output_, &stack); + return stack; + } + // Since we are returning the output_, we don't expect the output_ to be used + // afterward. Copy elision and RVO do not apply to class data members. Using + // move semantic to avoid copies when possible. + ReturnType release() && { + return std::move(output_); + } + + private: + ReturnType output_; +}; + +// Handle the lvalue reference differently since it should not be moved. +template <> +inline at::Tensor& CaptureKernelCall::release() && { + return output_; +} + +// Handle case where the kernel returns void. +template <> +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) { + // Calling the kernel and no need to capture void. + kernel.template call( + op, dispatchKeySet, std::forward(args)...); + } + Stack getOutputs() { + return Stack(); + } + void release() && {} +}; + +TORCH_API void _print_dispatch_trace(const std::string& label, const std::string& op_name, const DispatchKeySet& dispatchKeySet); + +} // namespace detail + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) { + // If callbacks need inputs, we box the arguments and pass them to the guard. + // Note: For perf reasons we wouldn't want to prematurely box the arguments. + at::RecordFunction guard(std::move(stepCallbacks)); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved()); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + constexpr auto num_boxed_args = impl::boxed_size(); + if constexpr (num_boxed_args != 0) { + if (guard.needsInputs()) { + // If we used std::array here, we would + // have to spend time default constructing the IValues in + // boxedArgs. aligned_storage has no such requirement. + impl::IValueAlignedStorage boxedArgs[num_boxed_args]; + // For debugging only; could be removed (but the compiler will do + // that for us and it's nice to have the extra assurance of + // correctness from our debug builds). + int lastArgIdx = 0; + impl::boxArgsToStack(boxedArgs, lastArgIdx, args...); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args); + // I don't *think* we need std::launder here, because IValue has + // no subclasses and no const or reference fields. + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef(reinterpret_cast(boxedArgs), num_boxed_args)); + for (size_t ii = 0; ii < num_boxed_args; ++ii) { + reinterpret_cast(&boxedArgs[ii])->~IValue(); + } + } else { + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + } + } else { + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + } + + if (C10_UNLIKELY(guard.needsOutputs())) { + // Calls the kernel and capture the output temporarily to pass to + // RecordFunction. + detail::CaptureKernelCall captureKernelCall( + kernel, op, dispatchKeySet, std::forward(args)...); + guard.setOutputs(captureKernelCall.getOutputs()); + // Releases the captured output to return to caller. + return std::move(captureKernelCall).release(); + } + + // keeping the guard alive while executing the kernel + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle& op, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor() + .template getDispatchKeySetUnboxed(args...); +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + detail::_print_dispatch_trace("[call]", toString(op.operator_name()), dispatchKeySet); + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) { + return callWithDispatchKeySlowPath(op, *step_callbacks, dispatchKeySet, kernel, std::forward(args)...); + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + +#ifdef FBCODE_CAFFE2 + if(profilingOperatorEvents()) { + struct FireOpRAII { + FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) { + fireOpStartUSDT(schema_ref); + } + ~FireOpRAII() { fireOpEndUSDT(schema_ref_); } + at::RecordFunction::schema_ref_t schema_ref_; + } event(op.schema()); + return kernel.template call(op, dispatchKeySet, std::forward(args)...); + } else { + return kernel.template call(op, dispatchKeySet, std::forward(args)...); + } +#else + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +#endif // FBCODE_CAFFE2 +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + // do not use RecordFunction on redispatch +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + detail::_print_dispatch_trace("[redispatch]", toString(op.operator_name()), currentDispatchKeySet); + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet); + return kernel.template call(op, currentDispatchKeySet, std::forward(args)...); +} + +inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + detail::_print_dispatch_trace("[callBoxed]", toString(op.operator_name()), dispatchKeySet); + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) { + at::RecordFunction guard(std::move(*step_callbacks)); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef(stack->data(), stack->size())) + : runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + + // keeping the guard alive while executing the kernel + kernel.callBoxed(op, dispatchKeySet, stack); + + if (C10_UNLIKELY(guard.needsOutputs())) { + guard.setOutputs(*stack); + } + return; + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + kernel.callBoxed(op, dispatchKeySet, stack); +} + +// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation +inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + // We still compute this as we're obligated to pass it on to the internal + // kernel, if it is a boxed fallback + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); + const auto& kernel = ([&]() { + if (op.hasKernelForDispatchKey(dk)) { + return entry.kernelForDispatchKey(dk); + } else { + auto idx = getDispatchTableIndexForDispatchKey(dk); + TORCH_INTERNAL_ASSERT(idx >= 0); + return backendFallbackKernels_[idx].kernel; + } + })(); + kernel.callBoxed(op, dispatchKeySet, stack); +} + +inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + detail::_print_dispatch_trace("[redispatchBoxed]", toString(op.operator_name()), dispatchKeySet); + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); + return kernel.callBoxed(op, dispatchKeySet, stack); +} + +} // namespace c10 + +namespace std { + +template <> +struct hash { + size_t operator()(const c10::OperatorHandle& op) const noexcept { + return std::hash{}(static_cast(op.operatorDef_)); + } +}; + +} // namespace std diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..1741171fbf00412647178b2210071cee36928e54 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct TORCH_API ObservedOperators { + ObservedOperators() = delete; + + static bool isObserved(const OperatorName& name); + + static std::unordered_set& getUnobservedOperatorList(); +}; + +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h new file mode 100644 index 0000000000000000000000000000000000000000..e273881826364c6eaeac4035360c3d714bc464fa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h @@ -0,0 +1,313 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#ifdef C10_MOBILE +#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY +#endif + +namespace c10 { + +class Dispatcher; + +namespace impl { + +// This data structure represents a kernel that was registered to us from a +// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata +// about the kernel that isn't necessary for actual dispatching (this is why +// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for +// giving good error messages. +struct AnnotatedKernel final { + AnnotatedKernel(KernelFunction k, std::unique_ptr s, std::string d) + : kernel(std::move(k)) + , inferred_function_schema(std::move(s)) + , debug(std::move(d)) + {} + AnnotatedKernel() = default; + KernelFunction kernel; + std::unique_ptr inferred_function_schema; + // A little debug string to help us identify the kernel in question. + // Most importantly it records the TORCH_LIBRARY block that did the + // registration. + std::string debug; +}; + +// This data structure represents operator schema, with metadata specifying +// where the registration of this schema occurred +struct AnnotatedSchema final { + AnnotatedSchema(FunctionSchema s, std::string d) + : schema(std::move(s)) + , debug(std::move(d)) + {} + FunctionSchema schema; + std::string debug; +}; + +// Internal data structure that records information about a specific operator. +// It's not part of the public API; typically, users will interact with +// OperatorHandle instead. +// +// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher +// lock (this is important because some methods in OperatorEntry access +// dispatcher state) +class TORCH_API OperatorEntry final { +public: + explicit OperatorEntry(OperatorName&& operator_name); + + OperatorEntry(const OperatorEntry&) = delete; + OperatorEntry(OperatorEntry&&) noexcept = delete; + OperatorEntry& operator=(const OperatorEntry&) = delete; + OperatorEntry& operator=(OperatorEntry&&) noexcept = delete; + + const FunctionSchema& schema() const { + TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet"); + return schema_->schema; + } + const std::string& debug() const { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + return schema_->debug; + } + bool hasSchema() const { + return schema_.has_value(); + } + + bool isObserved() const { + return is_observed_; + } + + // We may allocate an OperatorEntry for an operator even when we don't + // have a schema. When we receive the schema registration, we post + // facto register a schema. + // + // NB: registerSchema/deregisterSchema are not idempotent; if you + // attempt to register a schema when one is already present or vice + // versa that is an error. (Refcounting for the registrations is + // handled in the OperatorHandle in Dispatcher) + void registerSchema(FunctionSchema&&, std::string&& debug, std::vector tags = {}); + void deregisterSchema(); + + const OperatorName& operator_name() const { + return name_; + } + +#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY + using AnnotatedKernelContainer = std::array; +#else + using AnnotatedKernelContainer = std::list; +#endif + using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator; + + // Why are kernels and fallback asymmetric? It has to do with ownership. + // Kernels and the computed dispatch tables for them are canonically + // owned by OperatorEntry, but backend fallbacks are specified once + // and apply for all operators, so they should be owned by Dispatcher. + // However, the registration of a backend fallback affects the + // state of the computed dispatch table, so when a backend fallback + // is updated, we need to update the operator tables too. Thus, + // registerKernel is the mechanism by which we give kernels to + // operator entry to own (and update dispatch table), but we only + // need a non-owning mechanism to update fallback. + + // Precondition: Dispatcher::mutex_ is held + // Postcondition: caller is responsible for disposing of the kernel + AnnotatedKernelContainerIterator registerKernel( + const Dispatcher& dispatcher, + std::optional dispatch_key, + KernelFunction kernel, + std::optional cpp_signature, + std::unique_ptr inferred_function_schema, + std::string debug + ); + + // Precondition: Dispatcher::mutex_ is held + void deregisterKernel_( + const Dispatcher& dispatcher, + std::optional dispatch_key, + AnnotatedKernelContainerIterator kernel + ); + + // Precondition: Dispatcher::mutex_ is held + void updateFallback( + const Dispatcher& dispatcher, + DispatchKey dispatch_key + ); + + // Precondition: Dispatcher::mutex_ is held + void updateSchemaAliasAnalysis(AliasAnalysisKind a) { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + schema_->schema.setAliasAnalysis(a); + } + + std::string dumpComputedTable() const; + std::string dumpState() const; + void checkInvariants() const; + + const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; } + + // Asserts that the given FuncType is correct for calling this operator in an unboxed way. + template + inline void assertSignatureIsCorrect() { + assertSignatureIsCorrect(CppSignature::make(), fn_has_symint::value); + } + + void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const; + + [[noreturn]] void reportError(DispatchKey dispatchKey) const; + + const KernelFunction& lookup(DispatchKeySet ks) const { + const auto idx = ks.getDispatchTableIndexForDispatchKeySet(); + if (C10_UNLIKELY(idx == -1)) { + reportError(ks.highestPriorityTypeId()); + } + const auto& kernel = dispatchTable_[idx]; + // A valid kernel *always* has a boxed kernel and *may* have an + // unboxed kernel. However, we typically do unboxed calls in at:: + // APIs, where the kernel 1) will very likely be valid and 2) + // should have an unboxed kernel. Checking the unboxed kernel + // first will allow us to avoid touching the boxed kernel at all + // in the common case. + if (C10_UNLIKELY(!kernel.isValidUnboxed())) { + if (!kernel.isValid()) { + reportError(ks.highestPriorityTypeId()); + } + } + return kernel; + } + + std::string listAllDispatchKeys() const; + + // Returns true if kernel_ has entry for any key in ks. + // + // Invariant: There are no alias keys in the passed-in dispatch key set. + // Note [No Alias Keys in DispatchKeySet] + // Alias keys should be checked using `hasKernelForDispatchKey` + // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically + // have a value > 63 (causing overflow). + bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const; + // Returns true if kernel_ has entry for a particular key. + bool hasKernelForDispatchKey(DispatchKey k) const; + // Retrieves the kernel entry at a particular key. Symmetric with + // hasKernelForDispatchKey. To get the AnnotatedKernel, see + // getKernelForDispatchKey (private) + const KernelFunction& kernelForDispatchKey(DispatchKey k) const; + // Returns true if the "computed table" has an entry for a particular key. + bool hasComputedKernelForDispatchKey(DispatchKey k) const; + // Returns all the operator tags added at the time of registration + const std::vector& getTags() const; + void setReportErrorCallback_(std::unique_ptr callback); + + template + PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const { + return py_cache_.ptr_or(self_interpreter, slow_accessor); + } + +private: + + OperatorName name_; + std::optional schema_; + #ifndef C10_MOBILE + std::vector tags_; + #endif + std::array dispatchTable_; + DispatchKeyExtractor dispatchKeyExtractor_; + // Pointer to the torch.ops.ns.op.overload object for speed + c10::PyHandleCache py_cache_; + + // kernels_ stores all registered kernels for the corresponding dispatch key + // and catchAllKernels_ stores the catch-all kernels. + // If an operator library gets loaded that overwrites an already existing kernel, + // both kernels will be in that list but only the newer one will be in + // dispatchTable. If any of the kernels go away (say the library gets + // unloaded), we remove the kernel from this list and update the + // dispatchTable if necessary. + // Kernels in the list are ordered by registration time descendingly, + // newer registrations are before older registrations. + // We do not combine dispatchTable and kernels into one hash map because + // kernels is a larger data structure and accessed quite infrequently + // while dispatchTable is accessed often and should be kept small to fit + // into CPU caches. + // Invariants: + // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front() + // - dispatchTable[dispatch_key] does not exist if and only if + // kernels_[dispatch_key] does not exist + // - If kernels_[dispatch_key] exists, then it has elements. + // It is never an empty list. + // + // Why do we do that? + // ----- + // We mostly do this to enable Jupyter notebooks where a cell registering + // a kernel could be executed multiple times and the later execution + // should overwrite the earlier one. Note that this still fails when the + // function schema changed between the executions, but it works as long + // as the function schema didn't change. A better solution would be to + // unload the old extension library from the Jupyter cell when the cell is + // re-executed and then only allow one kernel here, i.e. error if a kernel + // is already registered, but that's a lot of effort to implement and + // currently not high-pri. + ska::flat_hash_map +#else + std::list +#endif + > kernels_; + + const AnnotatedKernel& missingKernel() const; + const AnnotatedKernel& ambiguousAutogradOtherKernel() const; + + // cpp_signature_ stores function signature if any of + // the kernels was created in a way that allowed us to know the function + // signature (i.e. by supplying an unboxed C++ kernel function). + // If this is set, it will be used to check that future kernel + // registrations match and it will be used in unboxed function calls + // to verify their arguments against the known function signature. + struct CppSignatureWithDebug { + CppSignature signature; + std::string debug; + std::optional dispatch_key; + }; + std::optional cpp_signature_; + std::optional sym_cpp_signature_; + + // A Python custom error handler for OperatorEntry::reportError + std::unique_ptr report_error_callback_; + + // Whether this operator needs to be observed with RecordFunction + const bool is_observed_; + + [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const; + const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const; + std::pair computeDispatchTableEntryWithDebug( + const c10::Dispatcher& dispatcher, DispatchKey dispatch_key + ) const; + // This function re-establishes the invariant that dispatchTable + // contains the front element from the kernels list for a given runtime dispatch key. + void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but also handles alias dispatch keys. + void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but for ALL entries in the dispatch table. + void updateDispatchTableFull_(const c10::Dispatcher& dispatcher); + // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front(). + const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const; +}; + +} // namespace impl +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..5c87f93657ac174b341074359e661c8e187421d3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace c10 { + +enum class AliasAnalysisKind : uint8_t { + INTERNAL_SPECIAL_CASE, + CONSERVATIVE, // The most conservative alias analysis type, assumes + // side-effects. This is the default analysis. + FROM_SCHEMA, + PURE_FUNCTION +}; + +#if !defined(_MSC_VER) +constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr. +#endif +inline const char* toString(AliasAnalysisKind aliasAnalysisKind) { + return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE) + ? "CONSERVATIVE" + : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA) + ? "FROM_SCHEMA" + : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION) + ? "PURE_FUNCTION" + : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE) + ? "INTERNAL_SPECIAL_CASE" + : "UNKNOWN"; +} + +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h new file mode 100644 index 0000000000000000000000000000000000000000..e6ef2128fd495f873465c98b10ebfad6f1e323df --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace c10 { + +class RegistrationHandleRAII final { +public: + explicit RegistrationHandleRAII(std::function onDestruction) + : onDestruction_(std::move(onDestruction)) {} + + ~RegistrationHandleRAII() { + if (onDestruction_) { + onDestruction_(); + } + } + + RegistrationHandleRAII(const RegistrationHandleRAII&) = delete; + RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete; + + RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept + : onDestruction_(std::move(rhs.onDestruction_)) { + rhs.onDestruction_ = nullptr; + } + + RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept { + onDestruction_ = std::move(rhs.onDestruction_); + rhs.onDestruction_ = nullptr; + return *this; + } + +private: + std::function onDestruction_; +}; + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..3a1cd234a98acd39bcec47c3c17d2fc2856ceef4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from enum_tag.h + +namespace at { + // Enum of valid tags obtained from the entries in tags.yaml + enum class Tag { + core, + data_dependent_output, + dynamic_output_shape, + flexible_layout, + generated, + inplace_view, + needs_fixed_stride_order, + nondeterministic_bitwise, + nondeterministic_seeded, + pointwise, + pt2_compliant_tag, + view_copy + }; +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h new file mode 100644 index 0000000000000000000000000000000000000000..136fe59e22fb542678d92533250d9792e0e26005 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h @@ -0,0 +1,101 @@ +#pragma once + +#include + +#include + +namespace c10 { + +struct EnumType; +using EnumTypePtr = std::shared_ptr; +using EnumNameValue = std::pair; +struct TORCH_API EnumType : public NamedType { + friend struct Type; + static const TypeKind Kind = TypeKind::EnumType; + + static EnumTypePtr create( + const c10::QualifiedName& qualified_class_name, + TypePtr value, + std::vector enum_names_values, + std::weak_ptr<::torch::jit::CompilationUnit> cu) { + switch (value->kind()) { + case TypeKind::IntType: + case TypeKind::FloatType: + case TypeKind::StringType: + return EnumTypePtr(new EnumType( + qualified_class_name, + std::move(value), + std::move(enum_names_values), + std::move(cu))); + default: + AT_ERROR( + "Cannot create Enum with value type '", + value->str(), + "', only int, float and string are supported"); + } + } + + std::string str() const override { + return "Enum<" + annotation_str() + ">"; + } + + std::string repr_str() const override { + return str(); + } + + const TypePtr& getValueType() const { + return value_type_; + } + + bool equals(const Type& rhs) const override { + if (auto* enum_rhs = rhs.castRaw()) { + return name().value() == enum_rhs->name().value() && + *getValueType() == *(enum_rhs->getValueType()) && + this->compilation_unit() == enum_rhs->compilation_unit(); + } + return false; + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::shared_ptr compilation_unit() + const { + auto cu = cu_.lock(); + return cu; + } + + const QualifiedName& qualifiedClassName() const { + return name().value(); + } + + at::ArrayRef containedTypes() const override { + return value_type_; + } + + const at::ArrayRef enumNamesValues() const { + return enum_names_values_; + } + + private: + EnumType( + c10::QualifiedName qualified_class_name, + TypePtr value_type, + std::vector enum_names_values, + std::weak_ptr cu) + : NamedType(TypeKind::EnumType, std::move(qualified_class_name)), + value_type_(std::move(value_type)), + enum_names_values_(std::move(enum_names_values)), + cu_(std::move(cu)) {} + + std::string annotation_str_impl( + C10_UNUSED const TypePrinter& printer = nullptr) const override { + const auto& n = name().value(); + return n.qualifiedName(); + } + + TypePtr value_type_; + std::vector enum_names_values_; + std::weak_ptr<::torch::jit::CompilationUnit> cu_; +}; + +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..38942031befcd62ed216002f549b67fb547088b0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h @@ -0,0 +1,355 @@ +#pragma once + +#include + +#include +#include + +namespace c10 { + +#define FORALL_NS_SYMBOLS(_) \ + _(namespaces, prim) \ + _(namespaces, prims) \ + _(namespaces, nvprims) \ + _(namespaces, aten) \ + _(namespaces, cuda) \ + _(namespaces, onnx) \ + _(namespaces, attr) \ + _(namespaces, scope) \ + _(namespaces, user) \ + _(namespaces, _caffe2) \ + _(namespaces, dimname) \ + _(namespaces, namespaces) \ + _(prim, Assign) \ + _(prim, BroadcastingChunk) \ + _(prim, BroadcastSizes) \ + _(prim, ReductionSizes) \ + _(prim, Constant) \ + _(prim, ChunkSizes) \ + _(prim, ConstantMKLDNNTensor) \ + _(prim, BroadcastMKLDNNTensors) \ + _(prim, MKLDNNGroup) \ + _(prim, MKLDNNHardSwish) \ + _(prim, MKLDNNHardSigmoid) \ + _(prim, MKLDNNHardTanh) \ + _(prim, MKLDNNClamp) \ + _(prim, StaticRuntimeCopyOuts) \ + _(prim, Drop) \ + _(prim, Eval) \ + _(prim, Expand) /* onnx */ \ + _(prim, FusionGroup) \ + _(prim, CudaFusionGroup) \ + _(prim, CudaFusionGuard) \ + _(prim, oneDNNFusionGroup) \ + _(prim, oneDNNFusionGuard) \ + _(prim, FunctionalGraph) \ + _(prim, add_optional) \ + _(prim, view_copy) \ + _(prim, permute_copy) \ + _(prim, reshape_copy) \ + _(prim, squeeze_copy) \ + _(prim, t_copy) \ + _(prim, transpose_copy) \ + _(prim, unsqueeze_copy) \ + _(prim, flatten_copy) \ + _(prim, expand_copy) \ + _(prim, expand_as_copy) \ + _(prim, DifferentiableGraph) \ + _(prim, TensorExprGroup) \ + _(prim, TensorExprDynamicGroup) \ + _(prim, StaticSubgraph) \ + _(prim, If) \ + _(prim, Jump) /* debug */ \ + _(prim, JumpNZ) /* debug */ \ + _(prim, JumpZ) /* debug */ \ + _(prim, Load) \ + _(prim, Loop) \ + _(prim, Param) \ + _(prim, PackPadded) /* onnx */ \ + _(prim, PadPacked) /* onnx */ \ + _(prim, Placeholder) /* debug */ \ + _(prim, Print) \ + _(prim, EmptyListLiteral) \ + _(prim, LegacyTypedConstructor) \ + _(prim, PythonOp) \ + _(prim, IgnoredPythonOp) \ + _(prim, Reverse) \ + _(prim, Return) \ + _(prim, ReturnStmt) \ + _(prim, BreakStmt) \ + _(prim, ContinueStmt) \ + _(prim, ComprehensionScope) \ + _(prim, Store) \ + _(prim, AutogradZero) \ + _(prim, AutogradAnyNonZero) \ + _(prim, AutogradAllNonZero) \ + _(prim, AutogradAllZero) \ + _(prim, Starred) \ + _(prim, TupleConstruct) \ + _(prim, TupleUnpack) \ + _(prim, TupleIndex) \ + _(prim, TupleSlice) \ + _(prim, ListConstruct) \ + _(prim, ListUnpack) \ + _(prim, DictConstruct) \ + _(prim, ModuleContainerIndex) \ + _(prim, EnumName) \ + _(prim, EnumValue) \ + _(prim, StringIndex) \ + _(prim, NumToTensor) \ + _(prim, Uninitialized) \ + _(prim, VarConcat) \ + _(prim, VarStack) \ + _(prim, With) \ + _(prim, Enter) \ + _(prim, Exit) \ + _(prim, IfThenElse) \ + _(aten, Bool) \ + _(aten, Int) \ + _(aten, FloatImplicit) \ + _(aten, ComplexImplicit) \ + _(aten, IntImplicit) \ + _(aten, ScalarImplicit) \ + _(aten, Float) \ + _(aten, Complex) \ + _(aten, str) \ + _(aten, Delete) \ + _(prim, device) \ + _(prim, dtype) \ + _(prim, layout) \ + _(prim, id) \ + _(prim, requires_grad) \ + _(prim, MakeTestTensor) /* test */ \ + _(prim, AutogradAdd) \ + _(prim, GradOf) \ + _(aten, grad) \ + _(aten, backward) \ + _(prim, Guard) \ + _(prim, BailOut) \ + _(prim, TypeCheck) \ + _(prim, RequiresGradCheck) \ + _(prim, FallbackGraph) \ + _(prim, FusedConcat) \ + _(prim, ConstantChunk) \ + _(prim, MMTreeReduce) \ + _(prim, MMBatchSide) \ + _(prim, list) \ + _(prim, dict) \ + _(prim, min) \ + _(prim, max) \ + _(prim, abs) \ + _(aten, divmod) \ + _(prim, zip) \ + _(prim, enumerate) \ + _(prim, range) \ + _(prim, rangelist) \ + _(prim, isinstance) \ + _(prim, tolist) \ + _(prim, unchecked_cast) \ + _(aten, _grad_sum_to_size) \ + _(aten, _size_if_not_equal) \ + _(aten, _ncf_unsqueeze) \ + _(aten, warn) \ + _(aten, sorted) \ + _(aten, floordiv) \ + _(aten, __range_length) \ + _(aten, __derive_index) \ + _(aten, __round_to_zero_floordiv) \ + _(aten, is_scripting) \ + _(aten, _unwrap_optional) \ + _(prim, fork) \ + _(prim, awaitable) \ + _(prim, forkClosure) \ + _(prim, awaitableClosure) \ + _(prim, awaitable_nowait) \ + _(prim, awaitable_wait) \ + _(prim, RaiseException) \ + _(prim, Closure) \ + _(prim, CreateObject) \ + _(prim, SetAttr) \ + _(prim, GetAttr) \ + _(prim, HasAttr) \ + _(prim, profile) \ + _(prim, profile_ivalue) \ + _(prim, AddStatValue) \ + _(prim, TimePoint) \ + _(prim, CallFunction) \ + _(prim, CallMethod) \ + _(prim, LoopContinuation) \ + _(prim, annotate) \ + _(prim, TracedModuleForward) \ + _(prim, TracedFork) \ + _(prim, TracedAttr) \ + _(prim, rpc_async) \ + _(prim, rpc_sync) \ + _(prim, rpc_remote) \ + _(prim, is_cuda) \ + _(aten, append) \ + _(aten, as_tensor) \ + _(aten, adaptive_avg_pool2d_backward) \ + _(aten, dim) \ + _(aten, format) \ + _(aten, percentFormat) \ + _(aten, __not__) \ + _(aten, __is__) \ + _(aten, __isnot__) \ + _(aten, _ger) \ + _(aten, __getitem__) \ + _(aten, _set_item) \ + _(aten, manual_seed) \ + _(aten, device) \ + _(aten, hash) \ + _(aten, len) \ + _(aten, list) \ + _(aten, dict) \ + _(aten, wait) \ + _(aten, save) \ + _(aten, keys) \ + _(aten, ord) \ + _(aten, chr) \ + _(aten, hex) \ + _(aten, oct) \ + _(aten, clear) \ + _(aten, setdefault) \ + _(aten, bin) \ + _(aten, pop) \ + _(aten, insert) \ + _(aten, tensor) \ + _(prim, unchecked_unwrap_optional) \ + _(aten, __contains__) \ + _(prim, BailoutTemplate) \ + _(prim, grad) \ + _(cuda, _set_device) \ + _(cuda, set_stream) \ + _(cuda, _current_device) \ + _(cuda, synchronize) \ + _(aten, has_torch_function) \ + _(aten, is_autocast_enabled) \ + _(aten, is_autocast_cpu_enabled) \ + _(aten, is_autocast_xla_enabled) \ + _(aten, get_autocast_dtype) \ + _(aten, is_autocast_mps_enabled) \ + FORALL_ATEN_BASE_SYMBOLS(_) \ + _(onnx, Add) \ + _(onnx, Concat) \ + _(onnx, Constant) \ + _(onnx, ConstantFill) \ + _(onnx, Div) \ + _(onnx, GRU) \ + _(onnx, Gather) \ + _(onnx, Gemm) \ + _(onnx, LSTM) \ + _(onnx, MatMul) \ + _(onnx, Min) \ + _(onnx, Max) \ + _(onnx, Mul) \ + _(onnx, Pow) \ + _(onnx, RNN) \ + _(onnx, Shape) \ + _(onnx, Size) \ + _(onnx, Slice) \ + _(onnx, Softmax) \ + _(onnx, Squeeze) \ + _(onnx, Sub) \ + _(onnx, Transpose) \ + _(onnx, Unsqueeze) \ + _(onnx, Loop) \ + _(onnx, If) \ + _(onnx, Reshape) \ + _(onnx, Expand) \ + _(onnx, Equal) \ + _(onnx, Greater) \ + _(onnx, GreaterOrEqual) \ + _(onnx, Less) \ + _(onnx, LessOrEqual) \ + _(onnx, Not) \ + _(aten, ATen) \ + _(onnx, Split) \ + _(onnx, ConstantOfShape) \ + _(onnx, Cast) \ + _(onnx, Mod) \ + _(onnx, Sqrt) \ + _(onnx, SplitToSequence) \ + _(onnx, SequenceAt) \ + _(onnx, SequenceConstruct) \ + _(onnx, SequenceEmpty) \ + _(onnx, SequenceInsert) \ + _(onnx, SequenceErase) \ + _(onnx, ConcatFromSequence) \ + _(onnx, Identity) \ + _(onnx, SoftmaxCrossEntropyLoss) \ + _(onnx, NegativeLogLikelihoodLoss) \ + _(onnx, LogSoftmax) \ + _(onnx, ReduceL1) \ + _(onnx, ReduceL2) \ + _(onnx, Conv) \ + _(onnx, BatchNormalization) \ + _(onnx, ReduceMean) \ + _(onnx, ReduceProd) \ + _(onnx, Relu) \ + _(onnx, Neg) \ + _(onnx, NonZero) \ + _(onnx, Range) \ + _(onnx, Tile) \ + _(onnx, Where) \ + _(onnx, Optional) \ + _(onnx, OptionalGetElement) \ + _(onnx, OptionalHasElement) \ + FORALL_ATTR_BASE_SYMBOLS(_) \ + _(attr, Subgraph) \ + _(attr, ReverseSubgraph) \ + _(attr, f_real_outputs) \ + _(attr, df_input_vjps) \ + _(attr, df_input_captured_inputs) \ + _(attr, df_input_captured_outputs) \ + _(attr, df_output_vjps) \ + _(attr, axes) \ + _(attr, symbolic_shape_inputs) \ + _(attr, allow_stack_outputs) \ + _(attr, striding_inputs_desc) \ + _(attr, striding_outputs_desc) \ + _(attr, broadcast) \ + _(attr, direction) \ + _(attr, ends) \ + _(attr, inplace) \ + _(attr, input_as_shape) \ + _(attr, is_zero) \ + _(attr, num_none) \ + _(attr, num_present) \ + _(attr, perm) \ + _(attr, starts) \ + _(attr, profiled_type) \ + _(attr, transA) \ + _(attr, transB) \ + _(attr, name) \ + _(attr, module) \ + _(attr, beg) \ + _(attr, idx) \ + _(attr, split) \ + _(attr, slot) \ + _(attr, kinds) \ + _(attr, types) \ + _(attr, scope) \ + _(attr, keepdims) \ + _(attr, cache_id) \ + _(attr, new_axis) \ + _(attr, warn_id) \ + _(attr, output_layouts) \ + _(attr, allowzero) \ + _(attr, seen_none) \ + _(attr, overload_name) \ + _(attr, node_stack_idx) + +enum class _keys : unique_t { + #define DEFINE_KEY(ns, s) ns##_##s, + FORALL_NS_SYMBOLS(DEFINE_KEY) + #undef DEFINE_KEY + num_symbols +}; + +#define DEFINE_SYMBOL(ns, s) \ + namespace ns { constexpr Symbol s(static_cast(_keys::ns##_##s)); } +FORALL_NS_SYMBOLS(DEFINE_SYMBOL) +#undef DEFINE_SYMBOL + +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h new file mode 100644 index 0000000000000000000000000000000000000000..035a816f84e59bab424afbdc1e00d9412ecb2b57 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include +#include + +/* + * [Note: hacky wrapper removal for optional tensor] + * + * The kernel implementation takes an optional tensor marked in the schema as + * Tensor? but the C++ function takes Tensor instead of the std::optional + * expected by the dispatcher. + * + * To remove the hacky wrapper, the C++ function is changed to take + * std::optional and unwrap the Tensor value at the beginning of + * the function, e.g.: + * > c10::MaybeOwned weight_maybe_owned = + * > at::borrow_from_optional_tensor(weight_opt); + * > const Tensor& weight = *weight_maybe_owned; + * + * We may want to make the kernel handle optional directly without + * going through the creation of a default-constructed Tensor in + * at::borrow_from_optional_tensor. + */ + +/* + * [Note: hacky wrapper removal for TensorOptions] + * + * The kernel implementation takes a TensorOptions argument but the dispatcher + * expects separate arguments for dtype, layout, device, pin_memory. + * + * To remove the hacky wrapper, the kernel implementation is changed to take + * the 4 arguments (dtype, layout, device, pin_memory), and assemble the + * TensorOptions value at the beginning of the function, e.g.: + * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout) + * > .device(device).pinned_memory(pin_memory); + * + * We may want make the kernel handle these parameters directly without going + * through the creation of a TensorOptions value. + */ + +namespace c10 { +namespace impl { + +TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName); + +inline void check_and_update_common_device(std::optional& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + // TODO: Remove this once the following issue is addressed: + // https://github.com/pytorch/pytorch/issues/57380 + if (!tensor.defined()) { + return; + } + + if (!common_device.has_value()) { + common_device = tensor.device(); + return; + } + + if (C10_UNLIKELY(common_device != tensor.device())) { + common_device_check_failure(*common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(std::optional& common_device, const std::optional& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + if (tensor.has_value()) { + check_and_update_common_device(common_device, tensor.value(), methodName, argName); + } +} + +inline void check_and_update_common_device(std::optional& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(std::optional& common_device, const List>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} +} // namespace impl +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..2f845f7c4c10f1ac8f48edae0c37c743ef8fea64 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h @@ -0,0 +1,160 @@ +#pragma once + +/** + * This file contains functionality to take a C++ function and infer its + * c10::FunctionSchema. + */ + +#include +#include + +namespace c10 { +namespace detail { + +namespace infer_schema { + +/// The templated inference code creates `ArgumentDef` instead of `Argument`, +/// because that can be constructed at compile time and has a much smaller +/// binary size than having calls to `Argument` constructors in the template. +/// Creating `Argument` objects from `ArgumentDef` can then be done at +/// runtime in a non-templated way. +struct ArgumentDef final { + using GetTypeFn = TypePtr(); + GetTypeFn* getTypeFn; + GetTypeFn* getFakeTypeFn; + constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {} + explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {} +}; + +template +struct bool_t {}; +template<> struct bool_t : std::true_type {}; +template<> struct bool_t : std::false_type {}; + +/// Checks the static C++ types `Types` for correctness to catch common error cases. +template +constexpr int checkStaticTypes() { + // Give nice error messages for some of the common error cases. + // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT + static_assert(std::conjunction< + bool_t::value || std::is_same::value || std::is_same::value || std::is_same::value>... + >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type"); + static_assert(std::conjunction< + bool_t::value>... + >::value, "INVALID TYPE: float is not supported as an argument type, use double instead"); + return 0; +} + +template +constexpr std::array createArgumentVectorFromTypes(std::index_sequence) { + return ( + // Check types for common errors + checkStaticTypes(), + + // Create the return value + std::array{ + ArgumentDef(&getTypePtrCopy>, &getFakeTypePtrCopy>)...} + ); +} + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as template arguments. +template struct createArguments final {}; +template +struct createArguments> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as a tuple (i.e. in the way c10 kernels return values). +/// It can be a tuple if there's three output arguments with types A, B, C. +/// It can be an empty tuple<>, or void for kernels that don't return anything. +/// It can be a single type A (i.e. no tuple) for the case where a kernel just +/// returns one value. +template struct createReturns final {}; + +template +struct createReturns, void> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +template +struct createReturns::value && !guts::is_instantiation_of::value>> final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template<> +struct createReturns final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template +struct createSingleReturn { + static constexpr std::array call() { + return createArgumentVectorFromTypes(std::make_index_sequence<1>()); + } +}; + +TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef arguments, c10::ArrayRef returns); +TORCH_API FunctionSchema make_function_schema(c10::ArrayRef arguments, c10::ArrayRef returns); + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Flattens std::tuple returns into multiple return types +template +FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createReturns::call(); + + return make_function_schema(arguments, returns); +} + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Preserves std::tuple returns as a Tuple return type +template +FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createSingleReturn::call(); + + return make_function_schema(std::move(name), std::move(overload_name), arguments, returns); +} + +} +} + +template +FunctionSchema inferFunctionSchemaFlattenedReturns() { + return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns>(); +} + +template +FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) { + return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn>(std::move(name), std::move(overload_name)); +} + +TORCH_API std::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); + +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h new file mode 100644 index 0000000000000000000000000000000000000000..a0fa84c9ec263b686572a88ea94b3440a84f8f4e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h @@ -0,0 +1,199 @@ +#pragma once + +// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. +#ifdef TEMPLATE_SELECTIVE_BUILD +#include +#endif + +/** + * This header implements functionality to build PyTorch with only a certain + * set of operators (+ dependencies) included. + * + * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these + * two ops will be included in your build. The allowlist records operators + * only, no overloads; if you include aten::add, all overloads of aten::add + * will be included. + * + * Internally, this is done by removing the operator registration calls + * using compile time programming, and the linker will then prune all + * operator functions that weren't registered. + * See Note [Selective build] for more details + * + * WARNING: The allowlist mechanism doesn't work for all ways you could go about + * registering an operator. If the dispatch key / operator name is not + * sufficiently obvious at compile time, then the allowlisting mechanism + * will fail (and the operator will be included in the binary anyway). + */ + +#include +#include +#include + + +#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) +#include +#endif + +namespace c10 { + +namespace impl { + +constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare + +/** + * In selective build mode returns true/false depending on whether a build + * feature is available or not. + * + * In instrumenting mode (tracing mode), always returns true, and doesn't + * trigger any side effects. + */ +constexpr bool is_build_feature_available(const char* name) { +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) + // Selective Build mode. +#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST) + (void)name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST), + name); +#endif + +#else + // Instrumenting mode. + (void)name; + return true; +#endif +} + +[[noreturn]] void build_feature_required_feature_not_available(const char* feature); + +/** + * Use BUILD_FEATURE_REQUIRED macro in user-code. + * + * In selective build mode becomes a no-op if the build feature passed + * in is available. If not available, throws an exception (c10::Error). + * The compiler is able to perform dead code elimination for code + * following this method if the build feature is not available. + * + * In instrumenting mode (tracing mode), registers (as a side effect) + * the presence of this specific build feature being triggered. + */ +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode + +#if defined(TORCH_BUILD_FEATURE_ALLOWLIST) +#define BUILD_FEATURE_REQUIRED(NAME) \ + if (!c10::impl::is_build_feature_available(NAME)) { \ + ::c10::impl::build_feature_required_feature_not_available(NAME); \ + } +#else // Everything trivially selected +#define BUILD_FEATURE_REQUIRED(NAME) + +#endif + +#else // trace mode +#define BUILD_FEATURE_REQUIRED(NAME) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::BUILD_FEATURE, \ + std::string(NAME), \ + {}); +#endif + +// Use this macro, and not is_build_feature_available +#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) + +// returns true iff allowlist contains item +// allowlist_contains("a;bc;d", "bc") == true +constexpr bool allowlist_contains(string_view allowlist, string_view item) { + //Choose a really big value for next so that if something goes wrong + //this code will blow up in a hopefully detectable way. + size_t next = std::numeric_limits::max(); + for (size_t cur = 0; cur <= allowlist.size(); cur = next) { + next = allowlist.find(';', cur); + if (next != string_view::npos) { + if (allowlist.substr(cur, next - cur).compare(item) == 0) { + return true; + } + next++; + } else { + if (allowlist.substr(cur).compare(item) == 0) { + return true; + } + break; + } + } + return false; +} + +// Returns true iff the given op name is on the allowlist +// and should be registered +constexpr bool op_allowlist_check(string_view op_name [[maybe_unused]]) { + assert(op_name.find("::") != string_view::npos); + // Use assert() instead of throw() due to a gcc bug. See: + // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function + // https://github.com/fmtlib/fmt/issues/682 + assert(op_name.find("(") == string_view::npos); +#if !defined(TORCH_OPERATOR_WHITELIST) + // If the TORCH_OPERATOR_WHITELIST parameter is not defined, + // all ops are to be registered + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_OPERATOR_WHITELIST), + // This function is majorly used for mobile selective build with + // root operators, where the overload is included in the allowlist. + op_name); + // // Strip overload name (as allowlist doesn't contain overloads) + // // Another function based on this may be added when there's usage + // // on op names without overload. + // OperatorNameView::parse(op_name).name); +#endif +} + +// Returns true iff the given schema string is on the allowlist +// and should be registered +constexpr bool schema_allowlist_check(string_view schema) { +#if defined(TORCH_FORCE_SCHEMA_REGISTRATION) + return true; +#else + return op_allowlist_check(schema.substr(0, schema.find("("))); +#endif +} + +// Returns true iff the given custom class name is on the allowlist +// and should be registered +constexpr bool custom_class_allowlist_check(string_view custom_class_name) { +#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST) + // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined, + // all custom classes are to be registered + (void)custom_class_name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST), + custom_class_name); +#endif +} + +// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. +// Add this API to pass arbitrary allowlist. +constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) { + return allowlist_contains(allowlist, schema.substr(0, schema.find("("))); +} + +// Returns true iff the given dispatch key is on the allowlist +// and should be registered. When we turn this on, the list of valid +// mobile dispatch keys is hard coded (but you need to make sure +// that you have the correct set of dispatch keys for this). +constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) { +#ifdef C10_MOBILE + return true; + // Disabled for now: to be enabled later! + // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll; +#else + return true; +#endif +} + +} // namespace impl +} // namespace c10 diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h new file mode 100644 index 0000000000000000000000000000000000000000..f309ee2f277b34c62092f0cf21439018247ac867 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h @@ -0,0 +1,596 @@ +#pragma once + +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ + +#include +#include +#include +#include +#include +#include +#include +#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +#include +#endif +#include + +namespace c10 { + +namespace detail { +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] +template +std::unique_ptr inferFunctionSchemaFromFunctor() { + using func_type = typename c10::remove_DispatchKeySet_arg_from_func::func_type; + return std::make_unique(inferFunctionSchemaFlattenedReturns()); +} +} + +/** + * An instance of this class handles the registration for one or more operators. + * Make sure you keep the RegisterOperators instance around since it will + * deregister the operator it's responsible for in its destructor. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ +class TORCH_API RegisterOperators final { +public: + RegisterOperators() = default; + ~RegisterOperators() = default; + + RegisterOperators(const RegisterOperators&) = delete; + RegisterOperators& operator=(const RegisterOperators&) = delete; + RegisterOperators(RegisterOperators&&) noexcept = default; + RegisterOperators& operator=(RegisterOperators&&) noexcept = default; + + class TORCH_API Options final { + public: + Options(const Options&) = delete; + Options(Options&&) noexcept = delete; + Options& operator=(const Options&) = delete; + Options& operator=(Options&&) noexcept = delete; + + // internal-only for registering stack based kernels + template + Options&& kernel(DispatchKey dispatch_key) && { + return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction(), std::nullopt, nullptr); + } + + // internal-only for registering stack based catch-all kernels + template + Options&& catchAllKernel() && { + return std::move(*this).kernel(std::nullopt, KernelFunction::makeFromBoxedFunction(), std::nullopt, nullptr); + } + + // internal only for registering caffe2 ops + Options&& schema(FunctionSchema&& schema) { + TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration."); + schemaOrName_ = FunctionSchema(std::move(schema)); + return std::move(*this); + } + + /** + * Use this to specify the schema for an operator. You can also specify + * the operator name only to have the function signature part of the + * schema be inferred from the kernel function. + * + * Example: + * + * > // Infer function signature from my_kernel_cpu + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * > + * > + * > // Explicitly specify full schema + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op(Tensor a) -> Tensor") + * > .kernel(DispatchKey::CPU)); + */ + Options&& schema(const std::string& schemaOrName) { + TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration."); + + #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD) + throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build."); + #else + schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName); + #endif + + return std::move(*this); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, "some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel("some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel() && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel(FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. A functor kernel with cache gets a new instance of + // its cache each time the kernel is looked up from the dispatch table. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // So, instead of making users having to think about it (including the thread-safety + // issues this causes), let's just forbid stateful lambdas altogether. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedLambda(std::forward(functor)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel([] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> catchAllKernel(Lambda&& lambda) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // This would be a likely source for unexpected race conditions, so we forbid it. + // If a kernel really needs global state, they can just have regular global state + // in their .cpp file next to the kernel lambda. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && { + TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration."); + aliasAnalysisKind_ = aliasAnalysisKind; + return std::move(*this); + } + + private: + Options&& kernel(std::optional dispatch_key, KernelFunction&& func, std::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { + KernelRegistrationConfig config; + config.dispatch_key = dispatch_key; + config.func = std::move(func); + config.cpp_signature = cpp_signature; + config.inferred_function_schema = std::move(inferred_function_schema); + kernels.push_back(std::move(config)); + return std::move(*this); + } + + Options() + : schemaOrName_(std::nullopt) + , kernels() + , aliasAnalysisKind_(std::nullopt) + {} + + // KernelRegistrationConfig accumulates all information from the config + // parameters passed to a RegisterOperators::op() call into one object. + struct KernelRegistrationConfig final { + KernelRegistrationConfig() + : dispatch_key(std::nullopt) + , func() + , cpp_signature(std::nullopt) + , inferred_function_schema(nullptr) + {} + + std::optional dispatch_key; + KernelFunction func; + std::optional cpp_signature; + std::unique_ptr inferred_function_schema; + }; + + std::optional> schemaOrName_; + + std::vector kernels; + std::optional aliasAnalysisKind_; + friend class RegisterOperators; + friend class Library; + }; + + /** + * Call this to get an instance of registration options, which + * can be passed to a call to RegisterOperators::op() to specify + * these options for the operator registration. + * See class doc comment for examples. + */ + static Options options() { + return {}; + } + + /** + * Call this to register an operator. See class doc comment for examples. + */ + RegisterOperators&& op(Options&& options) && { + checkSchemaAndRegisterOp_(std::move(options)); + return std::move(*this); + } + + // Regular mutator version of the && version above + RegisterOperators& op(Options&& options) & { + checkSchemaAndRegisterOp_(std::move(options)); + return *this; + } + + /** + * This is a shorthand for RegisterOperators::op(Options) where you can + * specify the operator schema outside of the options parameter. + * See class doc comment for examples. + */ + RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && { + return std::move(*this).op(std::move(options).schema(schemaOrName)); + } + + // internal only for registering caffe2 ops + RegisterOperators&& op(FunctionSchema schema, Options&& options) && { + return std::move(*this).op(std::move(options).schema(std::move(schema))); + } + + template + explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options()) + : RegisterOperators() { + std::move(*this).op(schemaOrName, std::forward(func), std::move(options)); + } + + /** + * This API registers an operator based on a kernel function pointer. + * + * Given a kernel + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", &my_kernel_cpu); + * + * If your kernel is small and the overhead of calling it matters, + * then this API might be the wrong choice since the following API + * has a slightly lower overhead for calling into the kernel: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + * + * Or, alternatively, write your kernel as a functor: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction. + std::enable_if_t::value && !std::is_same::value, RegisterOperators&&> + op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && { + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + /** + * This API registers an operator based on a kernel lambda. + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", [] (Tensor a, Tensor b) {...}); + * + * This is equivalent to: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); + * + */ + template + // enable_if: only enable it if Lambda is actually a stateless lambda + std::enable_if_t::value && guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + template + C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.") + // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda + std::enable_if_t::value && !guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + std::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + +private: + void checkSchemaAndRegisterOp_(Options&& config); + + static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options); + void checkNoDuplicateKernels_(const Options& options); + void registerOp_(Options&& options); + + std::vector registrars_; +}; + +} // namespace c10 + +namespace torch { + // Old-style API + using RegisterOperators = c10::RegisterOperators; +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h new file mode 100644 index 0000000000000000000000000000000000000000..9bb1bfccc42a1971568346fbb6bce859d0f3018a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h @@ -0,0 +1,14 @@ +/// Flush-To-Zero and Denormals-Are-Zero mode +/// +/// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass +/// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64 +/// and some x86 CPUs. They result in reduced precision for values near zero, +/// but increased performance. +/// +/// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz + +namespace at::cpu { + +bool set_flush_denormal(bool on); + +} // namespace at::cpu diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..ad918dde7e05995ef96ddd6c8c5676d000abfcc0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#include + +namespace at::cpu { + +TORCH_API bool is_avx2_supported(); +TORCH_API bool is_avx512_supported(); + +// Detect if CPU support Vector Neural Network Instruction. +TORCH_API bool is_avx512_vnni_supported(); + +// Detect if CPU supports AVX512_BF16 ISA +TORCH_API bool is_avx512_bf16_supported(); + +// Detect if CPU support Advanced Matrix Extension. +TORCH_API bool is_amx_tile_supported(); + +// Enable the system to use AMX instructions. +TORCH_API bool init_amx(); + +// Get the L1 cache size per core in Byte +TORCH_API uint32_t L1d_cache_size(); + +// Get the L2 cache size per core in Byte +TORCH_API uint32_t L2_cache_size(); + +} // namespace at::cpu diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..388b3170d5b55a8c4bdd3af4ff982397fb323cb6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h new file mode 100644 index 0000000000000000000000000000000000000000..48d44dc42c33ceb8f78eee94d2f088a728598071 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h @@ -0,0 +1,358 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include + +namespace at::vec { + +// slow path +template +inline scalar_t vec_reduce_all( + const Op& vec_fun, + vec::Vectorized acc_vec, + int64_t size) { + using Vec = vec::Vectorized; + scalar_t acc_arr[Vec::size()]; + acc_vec.store(acc_arr); + for (const auto i : c10::irange(1, size)) { + std::array acc_arr_next = {0}; + acc_arr_next[0] = acc_arr[i]; + Vec acc_vec_next = Vec::loadu(acc_arr_next.data()); + acc_vec = vec_fun(acc_vec, acc_vec_next); + } + acc_vec.store(acc_arr); + return acc_arr[0]; +} + +template +struct VecReduceAllSIMD { + static inline scalar_t apply(const Op& vec_fun, const Vectorized& acc_vec) { + return vec_reduce_all(vec_fun, acc_vec, Vectorized::size()); + } +}; + +#if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) +#if defined(CPU_CAPABILITY_AVX2) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 128-bit shuffle + Vec v1 = _mm256_permute2f128_ps(v, v, 0x1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm256_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX2) +#if defined(CPU_CAPABILITY_AVX512) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 256-bit shuffle + Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E); + v = vec_fun(v, v1); + // 128-bit shuffle + v1 = _mm512_shuffle_f32x4(v, v, 0xB1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm512_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX512) +#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) + +#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + + // 128-bit shuffle: [a1, a2, a3, a4, a5, a6, a7, a8] -> [a5, a6, a7, a8, a1, a2, a3, a4] + Vec v1 = {v.get_high(), v.get_low()}; + // [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] ('+' stands for the reduction function. Note that the last 4 elements are not required) + v = vec_fun(v, v1); + + // 64-bit shuffle: [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] -> [a3+a7, a4+a8, a1+a5, a2+a6, -, -, -, -] + float32x4_t v1_1 = vextq_f32(v.get_low(), v.get_low(), 2); + v1 = {v1_1, v1_1}; + // [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] + v = vec_fun(v, v1); + + // 32-bit shuffle: [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] -> [a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, -, -, -, -] + v1_1 = vrev64q_f32(v.get_low()); + v1 = {v1_1, v1_1}; + // [a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, -, -, -, -] + v = vec_fun(v, v1); + + return v.get_low()[0]; + } +}; +#endif // defined(__aarch64__) + +template +inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized& acc_vec) { + return VecReduceAllSIMD::apply(vec_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(vec_fun, Vec::loadu(data, size), size); + int64_t d = Vec::size(); + Vec acc_vec = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec = vec_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(vec_fun, acc_vec); +} + +// similar to reduce_all, but reduces into two outputs +template , int> = 0> +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + auto loaded_data = Vec::loadu(data, size); + return std::pair( + vec_reduce_all(vec_fun1, loaded_data, size), + vec_reduce_all(vec_fun2, loaded_data, size)); + } + int64_t d = Vec::size(); + Vec acc_vec1 = Vec::loadu(data); + Vec acc_vec2 = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec1 = vec_fun1(acc_vec1, data_vec); + acc_vec2 = vec_fun2(acc_vec2, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d); + acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d); + } + return std::pair( + vec_reduce_all(vec_fun1, acc_vec1), + vec_reduce_all(vec_fun2, acc_vec2)); +} + +template , int> = 0> +inline scalar_t map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size); + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + data_vec = map_fun(data_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + data_vec = map_fun(data_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + data_vec = map_fun(data_vec, data2_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + const scalar_t* data3, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + Vec data3_vec = Vec::loadu(data3, size); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + Vec data3_vec = Vec::loadu(data3 + d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + Vec data3_vec = Vec::loadu(data3 + d, size - d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d)); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d)); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map2( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + const scalar_t* input_data2, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(input_data + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(input_data + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map3( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map4( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + const scalar_t* input_data4, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec data_vec4 = Vec::loadu(input_data4 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec data_vec4 = Vec::loadu(input_data4 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d, size - d); + } +} + +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..3bd22b3820f0b13d6d518329dd7df687ced37948 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h @@ -0,0 +1,549 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +namespace at::vec { + +// BFloat16 specification +template struct VecScalarType { using type = scalar_t; }; +template <> struct VecScalarType { using type = float; }; +template <> struct VecScalarType { using type = float; }; + +// This is different from at::acc_type since we only need to specialize BFloat16 +template +using vec_scalar_t = typename VecScalarType::type; + +// Vector conversion between float and bfloat16/half +template , int> = 0> +inline std::tuple, Vectorized> convert_to_float(const Vectorized&); + +template <> +inline std::tuple, Vectorized> convert_to_float (const Vectorized& a) { + return convert_bfloat16_float(a); +} + +template <> +inline std::tuple, Vectorized> convert_to_float (const Vectorized& a) { + return convert_half_float(a); +} + +template , int> = 0> +inline Vectorized convert_from_float(const Vectorized&, const Vectorized&); + +template <> +inline Vectorized convert_from_float(const Vectorized& a, const Vectorized& b) { + return convert_float_bfloat16(a, b); +} + +template <> +inline Vectorized convert_from_float(const Vectorized& a, const Vectorized& b) { + return convert_float_half(a, b); +} + +template , int> = 0> +inline void load_to_float(const scalar_t *data, Vectorized &out1, Vectorized &out2); + +template <> +inline void load_to_float (const BFloat16 *data, Vectorized &out1, Vectorized &out2) { + load_fp32_from_bf16(data, out1, out2); +} + +template <> +inline void load_to_float (const Half *data, Vectorized &out1, Vectorized &out2) { + load_fp32_from_fp16(data, out1, out2); +} + +template , int> = 0> +inline void load_to_float(const scalar_t *data, Vectorized &out); + +template <> +inline void load_to_float (const BFloat16 *data, Vectorized &out) { + load_fp32_from_bf16(data, out); +} + +template <> +inline void load_to_float (const Half *data, Vectorized &out) { + load_fp32_from_fp16(data, out); +} + +// Note that we already have specialized member of Vectorized for BFloat16 +// so the following functions would run smoothly: +// using Vec = Vectorized; +// Vec one = Vec(BFloat16(1)); +// vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N); +// +// Then why we still need to specialize "functional"? +// If we do specialization at Vectorized<> level, the above example would need 3 pairs of +// conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/". +// If we do specialization at vec::map<>() level, we have only 1 pair of conversion +// of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only. +// +// The following BFloat16 functionality will only do data type conversion for input +// and output vector (reduce functionality will only convert the final scalar back to bf16). +// Compared to Vectorized<> specialization, +// 1. better performance since we have less data type conversion; +// 2. less rounding error since immediate results are kept in fp32; +// 3. accumulation done on data type of fp32. +// +// If you plan to extend this file, please ensure adding unit tests at +// aten/src/ATen/test/vec_test_all_types.cpp +// +template , int> = 0> +inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(vec_fun, data_fvec0, fVec::size()); + } else { + return vec_reduce_all(vec_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = vec_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(vec_fun, acc_fvec0); +} + +template , int> = 0> +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const scalar_t* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size()); + fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size()); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec, fVec::size()), + vec_reduce_all(vec_fun2, acc2_fvec, fVec::size())); + } else { + return std::pair( + vec_reduce_all(vec_fun1, data_fvec0, size), + vec_reduce_all(vec_fun2, data_fvec0, size)); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc1_fvec0, acc1_fvec1] = convert_to_float(acc_bvec); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size()); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d); + acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d); + } + } + acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec0), + vec_reduce_all(vec_fun2, acc2_fvec0)); +} + +template , int> = 0> +inline float map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + acc_fvec0 = map_fun(acc_fvec0); + acc_fvec1 = map_fun(acc_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline float map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc2_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline float map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + const scalar_t* data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3, size); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc2_bvec); + bVec acc3_bvec = bVec::loadu(data3); + auto [acc3_fvec0, acc3_fvec1] = convert_to_float(acc3_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const float* input_data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + fVec data_fvec0 = fVec::loadu(input_data + d); + fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size()); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + fVec data_fvec0, data_fvec1; + if (size - d > fVec::size()) { + data_fvec0 = fVec::loadu(input_data + d); + data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size()); + } else { + // choose to align with behaviour of bVec::loadu(ptr, size), + // which leaves data_fvec1 uninitialized + data_fvec0 = fVec::loadu(input_data + d, size - d); + } + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map2( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + const scalar_t* input_data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map3( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map4( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + const scalar_t* input_data4, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d); + auto [data4_fvec0, data4_fvec1] = convert_to_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d, size - d); + auto [data4_fvec0, data4_fvec1] = convert_to_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..a82a8ef1a69457d4800f6c3de277c82f61dfa03c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h @@ -0,0 +1,43 @@ +#pragma once +#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +/* GCC or clang-compatible compiler, targeting x86/x86-64 */ +#include +#elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* Clang-compatible compiler, targeting arm neon */ +#include +#elif defined(_MSC_VER) +/* Microsoft C/C++-compatible compiler */ +#include +#if _MSC_VER <= 1900 +#define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2)) +#define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4)) +#define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8)) +#define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16)) +#endif +#elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* GCC-compatible compiler, targeting ARM with NEON */ +#include +#if defined (MISSING_ARM_VLD1) +#include +#elif defined (MISSING_ARM_VST1) +#include +#endif +#elif defined(__GNUC__) && defined(__IWMMXT__) +/* GCC-compatible compiler, targeting ARM with WMMX */ +#include +#elif defined(__s390x__) +// targets Z/architecture +// we will include vecintrin later +#elif (defined(__GNUC__) || defined(__xlC__)) && \ + (defined(__VEC__) || defined(__ALTIVEC__)) +/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */ +#include +/* We need to undef those tokens defined by to avoid conflicts + with the C++ types. => Can still use __bool/__vector */ +#undef bool +#undef vector +#undef pixel +#elif defined(__GNUC__) && defined(__SPE__) +/* GCC-compatible compiler, targeting PowerPC with SPE */ +#include +#endif diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h new file mode 100644 index 0000000000000000000000000000000000000000..234431068a40bc25d95563d5e443b75ca7ddccc5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h @@ -0,0 +1,47 @@ +#pragma once + +#if defined(CPU_CAPABILITY_AVX512) +#include +#else +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline Vectorized convert_to_bool(Vectorized x) { + __at_align__ bool buffer[x.size()]; + x.ne(Vectorized(0)).store(buffer); + + Vectorized ret; + static_assert(x.size() == ret.size()); + std::memcpy(ret, buffer, ret.size() * sizeof(bool)); + return ret; +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr)); +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr, int64_t count) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr, count)); +} + +template +struct VecHoldType { using hold_type = typename VT::value_type; }; + +template <> +struct VecHoldType> { using hold_type = BFloat16; }; + +template <> +struct VecHoldType> {using hold_type = Half; }; + +template +using vechold_type = typename VecHoldType::hold_type; + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..5540c8bc782faedbadb0794142580bad1207afc0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h @@ -0,0 +1,452 @@ +/* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */ + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u8_x2 (const uint8_t *__a) +{ + uint8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s8_x2 (const int8_t *__a) +{ + int8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u16_x2 (const uint16_t *__a) +{ + uint16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s16_x2 (const int16_t *__a) +{ + int16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u32_x2 (const uint32_t *__a) +{ + uint32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s32_x2 (const int32_t *__a) +{ + int32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u64_x2 (const uint64_t *__a) +{ + uint64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s64_x2 (const int64_t *__a) +{ + int64x1x2_t ret; + __builtin_aarch64_simd_oi __o; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f16_x2 (const float16_t *__a) +{ + float16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f32_x2 (const float32_t *__a) +{ + float32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f64_x2 (const float64_t *__a) +{ + float64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p8_x2 (const poly8_t *__a) +{ + poly8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p16_x2 (const poly16_t *__a) +{ + poly16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p64_x2 (const poly64_t *__a) +{ + poly64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u8_x2 (const uint8_t *__a) +{ + uint8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s8_x2 (const int8_t *__a) +{ + int8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u16_x2 (const uint16_t *__a) +{ + uint16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s16_x2 (const int16_t *__a) +{ + int16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u32_x2 (const uint32_t *__a) +{ + uint32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s32_x2 (const int32_t *__a) +{ + int32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u64_x2 (const uint64_t *__a) +{ + uint64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s64_x2 (const int64_t *__a) +{ + int64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f16_x2 (const float16_t *__a) +{ + float16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f32_x2 (const float32_t *__a) +{ + float32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f64_x2 (const float64_t *__a) +{ + float64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p8_x2 (const poly8_t *__a) +{ + poly8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p16_x2 (const poly16_t *__a) +{ + poly16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p64_x2 (const poly64_t *__a) +{ + poly64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +/* vst1x2 */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s64_x2 (int64_t * __a, int64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f64_x2 (float64_t * __a, float64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s8_x2 (int8_t * __a, int8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s16_x2 (int16_t * __a, int16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s32_x2 (int32_t * __a, int32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f16_x2 (float16_t * __a, float16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f32_x2 (float32_t * __a, float32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s8_x2 (int8_t * __a, int8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s16_x2 (int16_t * __a, int16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s32_x2 (int32_t * __a, int32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s64_x2 (int64_t * __a, int64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f16_x2 (float16_t * __a, float16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f64_x2 (float64_t * __a, float64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..711d16f9b231f0de8ef7950de809337027b1b2ee --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h @@ -0,0 +1,8 @@ +/* Workaround for missing vst1q_f32_x2 in gcc-8. */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h new file mode 100644 index 0000000000000000000000000000000000000000..da38b9d26d2188494e003ee587df4d15dc0427f3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h @@ -0,0 +1,330 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +#include +#if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR)) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX) +#include +#else +#include +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include + +namespace at::vec { + +// Note [CPU_CAPABILITY namespace] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This header, and all of its subheaders, will be compiled with +// different architecture flags for each supported set of vector +// intrinsics. So we need to make sure they aren't inadvertently +// linked together. We do this by declaring objects in an `inline +// namespace` which changes the name mangling, but can still be +// accessed as `at::vec`. +inline namespace CPU_CAPABILITY { + +inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { + stream << val.val_; + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { + stream << static_cast(val.val_); + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { + stream << static_cast(val.val_); + return stream; +} + +template +std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { + T buf[Vectorized::size()]; + vec.store(buf); + stream << "vec["; + for (int i = 0; i != Vectorized::size(); i++) { + if (i != 0) { + stream << ", "; + } + stream << buf[i]; + } + stream << "]"; + return stream; +} + + +#if defined(CPU_CAPABILITY_AVX2) + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castpd_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castps_pd(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castsi256_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castsi256_pd(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#ifndef _MSC_VER +// MSVC is not working well on complex function overload. +template +std::enable_if_t> +inline gather(const double* base_addr, const Vectorized& vindex) { + return _mm256_i64gather_pd(base_addr, vindex, scale); +} + +template +std::enable_if_t> +inline gather(const float* base_addr, const Vectorized& vindex) { + return _mm256_i32gather_ps(base_addr, vindex, scale); +} +#endif +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#ifndef _MSC_VER +// MSVC is not working well on complex function overload. +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const double* base_addr, + const Vectorized& vindex, Vectorized& mask) { + return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const float* base_addr, + const Vectorized& vindex, Vectorized& mask) { + return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale); +} +#endif +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +// Only works for inputs in the range: [-2^51, 2^51] +// From: https://stackoverflow.com/a/41148578 +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000)); + return _mm256_sub_epi64( + _mm256_castpd_si256(x), + _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000)) + ); +} + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm256_cvttps_epi32(src); +} + +// From: https://stackoverflow.com/a/41148578 +template<> +Vectorized +inline convert_to_fp_of_same_size(const Vectorized &src) { + __m256i magic_i_lo = _mm256_set1_epi64x(0x4330000000000000); /* 2^52 */ + __m256i magic_i_hi32 = _mm256_set1_epi64x(0x4530000080000000); /* 2^84 + 2^63 */ + __m256i magic_i_all = _mm256_set1_epi64x(0x4530000080100000); /* 2^84 + 2^63 + 2^52 */ + __m256d magic_d_all = _mm256_castsi256_pd(magic_i_all); + + __m256i v_lo = _mm256_blend_epi32(magic_i_lo, src, 0b01010101); /* v_low = low32 + 2^52 */ + __m256i v_hi = _mm256_srli_epi64(src, 32); + v_hi = _mm256_xor_si256(v_hi, magic_i_hi32); /* v_hi = high32*2^32 + 2^84 + 2^63 */ + /* int64 = low32 + high32*2^32 = v_hi + v_lo - 2^52 - 2^63 - 2^84 */ + __m256d v_hi_dbl = _mm256_sub_pd(_mm256_castsi256_pd(v_hi), magic_d_all); + __m256d result = _mm256_add_pd(v_hi_dbl, _mm256_castsi256_pd(v_lo)); + return result; +} + +template<> +Vectorized +inline convert_to_fp_of_same_size(const Vectorized &src) { + return _mm256_cvtepi32_ps(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a3, a3} + // b = {b0, b1, b2, b3} + + // swap lanes: + // a_swapped = {a0, a1, b0, b1} + // b_swapped = {a2, a3, b2, b3} + auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart + auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart + + // group cols crossing lanes: + // return {a0, b0, a1, b1} + // {a2, b2, a3, b3} + return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3 + _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3 +} + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3, a4, a5, a6, a7} + // b = {b0, b1, b2, b3, b4, b5, b6, b7} + + // swap lanes: + // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3} + // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7} + // TODO: can we support caching this? + auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart + auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart + + // group cols crossing lanes: + // return {a0, b0, a1, b1, a2, b2, a3, b3} + // {a4, b4, a5, b5, a6, b6, a7, b7} + const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7); + return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl), + _mm256_permutevar8x32_ps(b_swapped, group_ctrl)); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1} + // b = {a2, b2, a3, b3} + + // group cols crossing lanes: + // a_grouped = {a0, a1, b0, b1} + // b_grouped = {a2, a3, b2, b3} + auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3 + auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3 + + // swap lanes: + // return {a0, a1, a2, a3} + // {b0, b1, b2, b3} + return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart + _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart +} + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3} + // b = {a4, b4, a5, b5, a6, b6, a7, b7} + + // group cols crossing lanes: + // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3} + // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7} + // TODO: can we support caching this? + const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); + auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl); + auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl); + + // swap lanes: + // return {a0, a1, a2, a3, a4, a5, a6, a7} + // {b0, b1, b2, b3, b4, b5, b6, b7} + return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart + _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); + return _mm256_permutevar8x32_ps(v, mask_float); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3) +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3) +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); + return _mm256_permutevar8x32_epi32(v, mask_int32); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m256i mask = _mm256_set_epi8( + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14, + 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 + ); + auto reversed = _mm256_shuffle_epi8(v, mask); + return _mm256_permute2x128_si256(reversed, reversed, 1); +} + +inline __m256i flip8(const __m256i & v) { + const __m256i mask_int8 = _mm256_set_epi8( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + ); + auto reversed = _mm256_shuffle_epi8(v, mask_int8); + return _mm256_permute2x128_si256(reversed, reversed, 1); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return flip8(v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return flip8(v); +} + +inline Vectorized operator&&( + const Vectorized& self, + const Vectorized& other) { + const __m256i* self_ = reinterpret_cast(self.as_bytes()); + const __m256i* other_ = reinterpret_cast(other.as_bytes()); + __m256i out = _mm256_and_si256(*self_, *other_); + Vectorized ret; + std::memcpy(ret, &out, ret.size() * sizeof(bool)); + return ret; +} + +#endif // (defined(CPU_CAPABILITY_AVX2) + +}} // namepsace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..e567c1925be8404e1c413c8bf182891aea5ffc76 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h @@ -0,0 +1,1182 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wignored-qualifiers" + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +#ifndef SLEEF_CONST +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define SLEEF_CONST const +#else +#define SLEEF_CONST +#endif +#define SLEEF_CONST_OLD SLEEF_CONST +#else +#define SLEEF_CONST_OLD +#endif + +// bfloat16 conversion +static inline void cvtbf16_fp32(const __m128i& a, __m256& o) { + o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16)); +} + +static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) { + __m128i lo = _mm256_extractf128_si256(a, 0); + __m128i hi = _mm256_extractf128_si256(a, 1); + cvtbf16_fp32(lo, o1); + cvtbf16_fp32(hi, o2); +} + +static inline __m128i cvtfp32_bf16(const __m256& src) { + __m256i value = _mm256_castps_si256(src); + __m256i nan = _mm256_set1_epi32(0xffff); + __m256i mask = _mm256_castps_si256(_mm256_cmp_ps(src, src, _CMP_ORD_Q)); + __m256i ones = _mm256_set1_epi32(0x1); + __m256i vec_bias = _mm256_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_value = _mm256_and_si256(_mm256_srli_epi32(value, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_value = _mm256_add_epi32(t_value, vec_bias); + // input += rounding_bias; + t_value = _mm256_add_epi32(t_value, value); + // input = input >> 16; + t_value = _mm256_srli_epi32(t_value, 16); + // Check NaN before converting back to bf16 + t_value = _mm256_blendv_epi8(nan, t_value, mask); + t_value = _mm256_packus_epi32(t_value, t_value); // t[4-7] t[4-7] t[0-4] t[0-4] + t_value = _mm256_permute4x64_epi64(t_value, 0xd8); // 11 01 10 00 + return _mm256_castsi256_si128(t_value); +} + +static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + __m256i nan = _mm256_set1_epi32(0xffff); + __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q)); + __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q)); + __m256i ones = _mm256_set1_epi32(0x1); + __m256i vec_bias = _mm256_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones); + auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_lo = _mm256_add_epi32(t_lo, vec_bias); + t_hi = _mm256_add_epi32(t_hi, vec_bias); + // input += rounding_bias; + t_lo = _mm256_add_epi32(t_lo, lo); + t_hi = _mm256_add_epi32(t_hi, hi); + // input = input >> 16; + t_lo = _mm256_srli_epi32(t_lo, 16); + t_hi = _mm256_srli_epi32(t_hi, 16); + // Check NaN before converting back to bf16 + t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo); + t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi); + + t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4] + return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00 +} + +static inline __m256i merge_compare_result(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + lo = _mm256_srli_epi32(lo, 16); + hi = _mm256_srli_epi32(hi, 16); + auto out = _mm256_packus_epi32(lo, hi); + return _mm256_permute4x64_epi64(out, 0xd8); +} + +// float16 conversion +static inline void cvtfp16_fp32(const __m128i& a, __m256& o) { + o = _mm256_cvtph_ps(a); +} + +static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) { + __m128i lo = _mm256_extractf128_si256(a, 0); + __m128i hi = _mm256_extractf128_si256(a, 1); + cvtfp16_fp32(lo, o1); + cvtfp16_fp32(hi, o2); +} + +static inline __m128i cvtfp32_fp16(const __m256& src) { + return _mm256_cvtps_ph( + src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); +} + +static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) { + __m128i lo = _mm256_cvtps_ph( + a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + __m128i hi = _mm256_cvtps_ph( + b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); +} + +// dtype conversion between float16/bfloat16 and float32 +template , int> = 0> +inline void cvt_to_fp32(const __m128i& a, __m256& o); +template <> inline void cvt_to_fp32(const __m128i& a, __m256& o) { + cvtbf16_fp32(a, o); +}; +template <> inline void cvt_to_fp32(const __m128i& a, __m256& o) { + cvtfp16_fp32(a, o); +} + +template , int> = 0> +inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2); +template <> inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2) { + cvtbf16_fp32(a, o1, o2); +} +template <> inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2) { + cvtfp16_fp32(a, o1, o2); +} + +template , int> = 0> +inline __m256i cvt_from_fp32(const __m256& a, const __m256& b); +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_bf16(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return merge_compare_result(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_fp16(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_fp16(a, b); +} + +template +class Vectorized16 { +static_assert( + is_reduced_floating_point_v, + "Support only float16 and bfloat16."); +protected: + __m256i values; +public: + using value_type = uint16_t; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized16() {} + Vectorized16(__m256i v) : values(v) {} + Vectorized16(T val) { + value_type uw = val.x; + values = _mm256_set1_epi16(uw); + } + Vectorized16(T val1, T val2, T val3, T val4, + T val5, T val6, T val7, T val8, + T val9, T val10, T val11, T val12, + T val13, T val14, T val15, T val16) { + values = _mm256_setr_epi16( + val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x, + val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x); + } + operator __m256i() const { + return values; + } + T& operator[](int idx) = delete; + const T& operator[](int idx) const = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0)); + return _mm256_movemask_epi8(cmp); + } + static Vectorized loadu(const void* ptr, int16_t count = size()) { + if (count == size()) + return _mm256_loadu_si256(reinterpret_cast(ptr)); + + __at_align__ int16_t tmp_values[size()]; + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return _mm256_loadu_si256(reinterpret_cast(tmp_values)); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, + const Vectorized& b, const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(T base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized set(const Vectorized& a, + const Vectorized& b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + + Vectorized map(SLEEF_CONST __m256 (*SLEEF_CONST_OLD vop)(__m256)) const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + const auto o1 = vop(lo); + const auto o2 = vop(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized isnan() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + return merge_compare_result(lo, hi); + } + Vectorized abs() const { + return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values); + } + Vectorized angle() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto angle_lambda = [](__m256 values_2) { + const auto zero_vec = _mm256_set1_ps(0.f); + const auto nan_vec = _mm256_set1_ps(NAN); + const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_ps(c10::pi); + + const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); + angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); + return angle; + }; + auto o1 = angle_lambda(lo); + auto o2 = angle_lambda(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return map(Sleef_acosf8_u10); + } + Vectorized acosh() const { + return map(Sleef_acoshf8_u10); + } + Vectorized asin() const { + return map(Sleef_asinf8_u10); + } + Vectorized atan() const { + return map(Sleef_atanf8_u10); + } + Vectorized atanh() const { + return map(Sleef_atanhf8_u10); + } + Vectorized atan2(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_atan2f8_u10(lo, b1); + auto o2 = Sleef_atan2f8_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized copysign(const Vectorized &sign) const { + // copy sign bit (0x8000) from sign and remaining bits from values + __m256i mask_value = _mm256_set1_epi32(~0x80008000); + __m256i mask_signbit = _mm256_set1_epi32(0x80008000); + return Vectorized( + _mm256_or_si256( + _mm256_and_si256(values, mask_value), + _mm256_and_si256(sign, mask_signbit))); + } + Vectorized erf() const { + return map(Sleef_erff8_u10); + } + Vectorized erfc() const { + return map(Sleef_erfcf8_u15); + } + Vectorized erfinv() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_erfinv(tmp1[i]); + tmp2[i] = calc_erfinv(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized exp() const { + return map(Sleef_expf8_u10); + } + Vectorized exp2() const { + return map(Sleef_exp2f8_u10); + } + Vectorized expm1() const { + return map(Sleef_expm1f8_u10); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized & q) const { + __m256 x_lo, x_hi; + cvt_to_fp32(values, x_lo, x_hi); + __m256 q_lo, q_hi; + cvt_to_fp32(q.values, q_lo, q_hi); + auto o1 = Sleef_fmodf8(x_lo, q_lo); + auto o2 = Sleef_fmodf8(x_hi, q_hi); + return cvt_from_fp32(o1, o2); + } + Vectorized hypot(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_hypotf8_u05(lo, b1); + auto o2 = Sleef_hypotf8_u05(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_i0(tmp1[i]); + tmp2[i] = calc_i0(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0e() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_i0e(tmp1[i]); + tmp2[i] = calc_i0e(tmp2[i]); + } + const auto o1 = _mm256_loadu_ps(tmp1); + const auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized digamma() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_digamma(tmp1[i]); + tmp2[i] = calc_digamma(tmp2[i]); + } + const auto o1 = _mm256_loadu_ps(tmp1); + const auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized igamma(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + + Vectorized igammac(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized log() const { + return map(Sleef_logf8_u10); + } + Vectorized log2() const { + return map(Sleef_log2f8_u10); + } + Vectorized log10() const { + return map(Sleef_log10f8_u10); + } + Vectorized log1p() const { + return map(Sleef_log1pf8_u10); + } + Vectorized sin() const { + return map(Sleef_sinf8_u10); + } + Vectorized sinh() const { + return map(Sleef_sinhf8_u10); + } + Vectorized cos() const { + return map(Sleef_cosf8_u10); + } + Vectorized cosh() const { + return map(Sleef_coshf8_u10); + } + Vectorized ceil() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_ceil_ps(lo); + auto o2 = _mm256_ceil_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized floor() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_floor_ps(lo); + auto o2 = _mm256_floor_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized neg() const { + return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000)); + } + Vectorized round() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized tan() const { + return map(Sleef_tanf8_u10); + } + Vectorized tanh() const { + return map(Sleef_tanhf8_u10); + } + Vectorized trunc() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized lgamma() const { + return map(Sleef_lgammaf8_u10); + } + Vectorized sqrt() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_sqrt_ps(lo); + auto o2 = _mm256_sqrt_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized reciprocal() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, lo); + auto o2 = _mm256_div_ps(ones, hi); + return cvt_from_fp32(o1, o2); + } + Vectorized rsqrt() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo)); + auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi)); + return cvt_from_fp32(o1, o2); + } + Vectorized pow(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_powf8_u10(lo, b1); + auto o2 = Sleef_powf8_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } +private: + template + Vectorized inline binary_compare(const Vectorized& b, Op op) const { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvt_to_fp32(values, a_lo, a_hi); + cvt_to_fp32(b.values, b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); + } + +public: + Vectorized inline operator>(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); }); + } + Vectorized inline operator<(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); }); + } + Vectorized inline operator>=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); }); + } + Vectorized inline operator<=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); }); + } + Vectorized inline operator==(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); }); + } + Vectorized inline operator!=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); }); + } +}; + +template +static inline Vectorized binary_op_as_fp32(const Vectorized& a, const Vectorized& b, Op op) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvt_to_fp32(__m256i(a), a_lo, a_hi); + cvt_to_fp32(__m256i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); }); +} +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto max_lo = _mm256_max_ps(a_lo, b_lo); + auto max_hi = _mm256_max_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(max_lo, nan_lo); + auto o2 = _mm256_or_ps(max_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto min_lo = _mm256_min_ps(a_lo, b_lo); + auto min_hi = _mm256_min_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(min_lo, nan_lo); + auto o2 = _mm256_or_ps(min_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo)); + auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi)); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, a_lo); + auto o2 = _mm256_min_ps(max_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + auto o1 = _mm256_max_ps(min_lo, a_lo); + auto o2 = _mm256_max_ps(min_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, BFloat16* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = _mm256_loadu_ps(&src[i]); + __m256 b = _mm256_loadu_ps(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, BFloat16* dst, int64_t n) { + auto load_float = [](const double *src) -> __m256 { + // Load one float vector from an array of doubles + __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src)); + __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4)); + return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = load_float(&src[i]); + __m256 b = load_float(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + __m256 c_lo, c_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + cvtbf16_fp32(__m256i(c), c_lo, c_hi); + auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); }); +} +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + auto max_lo = _mm256_max_ps(a_lo, b_lo); + auto max_hi = _mm256_max_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(max_lo, nan_lo); + auto o2 = _mm256_or_ps(max_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + auto min_lo = _mm256_min_ps(a_lo, b_lo); + auto min_hi = _mm256_min_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(min_lo, nan_lo); + auto o2 = _mm256_or_ps(min_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + __m256 max_lo, max_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(min), min_lo, min_hi); + cvtfp16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo)); + auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi)); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 max_lo, max_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, a_lo); + auto o2 = _mm256_min_ps(max_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(min), min_lo, min_hi); + auto o1 = _mm256_max_ps(min_lo, a_lo); + auto o2 = _mm256_max_ps(min_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +inline void convert(const Half* src, Half* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, Half* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = _mm256_loadu_ps(&src[i]); + __m256 b = _mm256_loadu_ps(&src[i + 8]); + + __m256i c = cvtfp32_fp16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, Half* dst, int64_t n) { + auto load_float = [](const double *src) -> __m256 { + // Load one float vector from an array of doubles + __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src)); + __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4)); + return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = load_float(&src[i]); + __m256 b = load_float(&src[i + 8]); + + __m256i c = cvtfp32_fp16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + __m256 c_lo, c_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + cvtfp16_fp32(__m256i(c), c_lo, c_hi); + auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_fp16(o1, o2); +} + +#define CONVERT_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + __m256 o1, o2; \ + cvt_to_fp32(__m256i(a), o1, o2); \ + return std::make_tuple(o1, o2); \ +} \ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + return cvt_from_fp32(__m256(a), __m256(b)); \ +} +CONVERT_VECTORIZED_INIT(BFloat16, bfloat16); +CONVERT_VECTORIZED_INIT(Half, half); + +#else // defined(CPU_CAPABILITY_AVX2) + +#define CONVERT_NON_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr2); \ + convert(arr2, arr, K); \ + return std::make_tuple( \ + Vectorized::loadu(arr), \ + Vectorized::loadu(arr + Vectorized::size())); \ +} \ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr); \ + b.store(arr + Vectorized::size()); \ + convert(arr, arr2, K); \ + return Vectorized::loadu(arr2); \ +} +CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16); +#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +inline std::tuple, Vectorized> convert_half_float(const Vectorized& a) { + static_assert(Vectorized::size() == 2 * Vectorized::size()); +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + float16x8x2_t arr = a; + float16x8_t x = arr.val[0]; + float16x8_t y = arr.val[1]; +#else + auto arr = reinterpret_cast(a.operator const Half*()); + float16x8_t x = vld1q_f16(arr); + float16x8_t y = vld1q_f16(arr + Vectorized::size()); +#endif + float32x4_t x1 = vcvt_f32_f16(vget_low_f16(x)); + float32x4_t x2 = vcvt_f32_f16(vget_high_f16(x)); + float32x4_t y1 = vcvt_f32_f16(vget_low_f16(y)); + float32x4_t y2 = vcvt_f32_f16(vget_high_f16(y)); + return { Vectorized(x1, x2), Vectorized(y1, y2) }; +} +inline Vectorized convert_float_half(const Vectorized& a, const Vectorized& b) { + static_assert(Vectorized::size() == 2 * Vectorized::size()); + float32x4x2_t x = a; + float32x4x2_t y = b; + float16x4_t x1 = vcvt_f16_f32(x.val[0]); + float16x4_t x2 = vcvt_f16_f32(x.val[1]); + float16x4_t y1 = vcvt_f16_f32(y.val[0]); + float16x4_t y2 = vcvt_f16_f32(y.val[1]); +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + return Vectorized(vcombine_f16(x1, x2), vcombine_f16(y1, y2)); +#else + Vectorized rc; + auto arr = reinterpret_cast(rc.operator Half*()); + vst1q_f16(arr, vcombine_f16(x1, x2)); + vst1q_f16(arr + Vectorized::size(), vcombine_f16(y1, y2)); + return rc; +#endif +} +#else +CONVERT_NON_VECTORIZED_INIT(Half, half); +#endif + +#endif // defined(CPU_CAPABILITY_AVX2) + +#if defined(CPU_CAPABILITY_AVX2) +#define LOAD_FP32_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + auto values = _mm_loadu_si128(reinterpret_cast(data)); \ + __m256 out_values; \ + cvt_to_fp32(values, out_values); \ + out = out_values; \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + auto vec = Vectorized::loadu(data); \ + __m256 out1_values, out2_values; \ + cvt_to_fp32(vec, out1_values, out2_values); \ + out1 = out1_values; \ + out2 = out2_values; \ +} +LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_VECTORIZED_INIT(Half, fp16); + +#else // defined(CPU_CAPABILITY_AVX2) +#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + __at_align__ float values[Vectorized::size()]; \ + for (const auto k : c10::irange(Vectorized::size())) { \ + values[k] = data[k]; \ + } \ + out = Vectorized::loadu(values); \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + load_fp32_from_##name(data, out1); \ + data += Vectorized::size(); \ + load_fp32_from_##name(data, out2); \ +} +LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16); + +#endif +}} // namsepace at::vec::CPU_CAPABILITY + +#pragma GCC diagnostic pop diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h new file mode 100644 index 0000000000000000000000000000000000000000..6c198fb37d3d19be314294c582538e2f652aaab8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h @@ -0,0 +1,432 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized> { +private: + __m256d values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 2; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(c10::complex val) { + double real_value = val.real(); + double imag_value = val.imag(); + values = _mm256_setr_pd(real_value, imag_value, + real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2) { + values = _mm256_setr_pd(val1.real(), val1.imag(), + val2.real(), val2.imag()); + } + operator __m256d() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert (mask > -1 && mask < 4, "Unexpected mask value"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_pd(a.values, b.values, 0x03); + case 2: + return _mm256_blend_pd(a.values, b.values, 0x0c); + case 3: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values); + return _mm256_blendv_pd(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + __at_align__ double tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[2*size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256d abs_2_() const { + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b + } + __m256d abs_() const { + auto real = _mm256_movedup_pd(values); // real real + // movehdup_pd does not exist... + auto imag = _mm256_permute_pd(values, 0xf); // imag imag + return Sleef_hypotd4_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(abs_(), real_mask); // abs 0 + } + __m256d angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_pd(values, 0x05); // b a + return Sleef_atan2d4_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle + return _mm256_and_pd(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_pd(); + auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ); + auto div = _mm256_div_pd(values, abs); + return _mm256_blendv_pd(div, zero, mask); + } + __m256d real_() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256d imag_() const { + const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF)); + return _mm256_and_pd(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_pd(imag_(), 0x05); //b a + } + __m256d conj_() const { + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + return _mm256_xor_pd(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256d log2_ = _mm256_set1_pd(std::log(2)); + return _mm256_div_pd(log(), log2_); + } + Vectorized> log10() const { + const __m256d log10_ = _mm256_set1_pd(std::log(10)); + return _mm256_div_pd(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256d one = _mm256_set1_pd(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_pd(conj, 0x05); //-b a + auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab + auto im = _mm256_add_pd(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a + re = _mm256_sub_pd(one, re); + + auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln() + } + Vectorized> acos() const { + // acos(x) = pi/2 - asin(x) + constexpr auto pi_2d = c10::pi / 2; + const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0); + return _mm256_sub_pd(pi_2, asin()); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expd4_u10(values); //exp(a) exp(b) + exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05), + sin_cos.x, 0x0A); //cos(b) sin(b) + return _mm256_mul_pd(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m256d ln_2 = _mm256_set1_pd(c10::ln_2); + Vectorized> scaled_values = _mm256_mul_pd(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized> floor() const { + return _mm256_floor_pd(values); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_pd(); + return _mm256_sub_pd(zero, values); + } + Vectorized> round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_pd(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_pd(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_pd(a, b); //ac bd + + auto d_c = _mm256_permute_pd(b, 0x05); //d c + d_c = _mm256_xor_pd(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc + + auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm256_set1_pd(-0.f); + auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d| + auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c| + auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc + auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc + auto acbd2 = _mm256_mul_pd(a2, b2); + + const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc + dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm256_div_pd(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const{ + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_pd(sign_mask, values); //c -d + return _mm256_div_pd(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized> Vectorized>::eq(const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm256_set1_pd(1.0)); +} + +inline Vectorized> Vectorized>::ne(const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm256_set1_pd(1.0)); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h new file mode 100644 index 0000000000000000000000000000000000000000..c72d4d49274a0e8ca97f23c0157744913057d6eb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h @@ -0,0 +1,469 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized> { +private: + __m256 values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256 v) : values(v) {} + Vectorized(c10::complex val) { + float real_value = val.real(); + float imag_value = val.imag(); + values = _mm256_setr_ps(real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value + ); + } + Vectorized(c10::complex val1, c10::complex val2, c10::complex val3, c10::complex val4) { + values = _mm256_setr_ps(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag() + ); + } + operator __m256() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert(mask > -1 && mask < 16, "Unexpected mask range"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011 + case 2: + return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100 + case 3: + return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111 + case 4: + return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000 + case 5: + return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011 + case 6: + return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100 + case 7: + return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111 + case 8: + return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000 + case 9: + return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011 + case 10: + return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100 + case 11: + return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111 + case 12: + return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000 + case 13: + return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011 + case 14: + return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100 + default: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values); + return _mm256_blendv_ps(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step, + base + c10::complex(2)*step, + base + c10::complex(3)*step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_ps(reinterpret_cast(ptr)); + + __at_align__ float tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_ps(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[2*size()]; + _mm256_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256 abs_2_() const { + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b + return _mm256_permute_ps(ret, 0xD8); + } + __m256 abs_() const { + auto real = _mm256_moveldup_ps(values); // real real + auto imag = _mm256_movehdup_ps(values); // imag imag + return Sleef_hypotf8_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(abs_(), real_mask); // abs 0 + } + __m256 angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_ps(values, 0xB1); // b a + return Sleef_atan2f8_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle + return _mm256_and_ps(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_ps(); + auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ); + auto div = _mm256_div_ps(values, abs); + return _mm256_blendv_ps(div, zero, mask); + } + __m256 real_() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256 imag_() const { + const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF)); + return _mm256_and_ps(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_ps(imag_(), 0xB1); //b a + } + __m256 conj_() const { + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm256_xor_ps(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256 log2_ = _mm256_set1_ps(std::log(2)); + return _mm256_div_ps(log(), log2_); + } + Vectorized> log10() const { + const __m256 log10_ = _mm256_set1_ps(std::log(10)); + return _mm256_div_ps(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256 one = _mm256_set1_ps(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a + auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab + auto im = _mm256_add_ps(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a + re = _mm256_permute_ps(re, 0xD8); + re = _mm256_sub_ps(one, re); + + auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln() + } + Vectorized> acos() const { + return map(std::acos); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expf8_u10(values); //exp(a) exp(b) + exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1), + sin_cos.x, 0xAA); //cos(b) sin(b) + return _mm256_mul_ps(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m256 ln_2 = _mm256_set1_ps(c10::ln_2); + Vectorized> scaled_values = _mm256_mul_ps(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_ps(values); + } + Vectorized> floor() const { + return _mm256_floor_ps(values); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_ps(); + return _mm256_sub_ps(zero, values); + } + Vectorized> round() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_ps(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_ps(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_ps(a, b); //ac bd + + auto d_c = _mm256_permute_ps(b, 0xB1); //d c + d_c = _mm256_xor_ps(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc + + auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc + ret = _mm256_permute_ps(ret, 0xD8); + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm256_set1_ps(-0.f); + auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d| + auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c| + auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc + auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc + auto acbd2 = _mm256_mul_ps(a2, b2); + + const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc + dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + res2 = _mm256_permute_ps(res2, 0xD8); + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm256_div_ps(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_ps(sign_mask, values); //c -d + return _mm256_div_ps(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_ps(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_ps(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_ps(a, b); +} + +inline Vectorized> Vectorized>::eq( + const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +inline Vectorized> Vectorized>::ne( + const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_convert.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_convert.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f109fc87502636d8e2324026e3d8da23df3cfe --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_convert.h @@ -0,0 +1,308 @@ +#pragma once + +#include +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + __m256 value; + cvtbf16_fp32(_mm256_castsi256_si128(src[0]), value); + result[0] = value; + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + __m256 value; + cvtfp16_fp32(_mm256_castsi256_si128(src[0]), value); + result[0] = value; + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + result[0] = _mm256_castsi128_si256(cvtfp32_bf16(src[0])); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + result[0] = convert_float_bfloat16(src[0], src[1]); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + std::tie(result[0], result[1]) = convert_bfloat16_float(src[0]); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + result[0] = _mm256_castsi128_si256(cvtfp32_fp16(src[0])); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + result[0] = convert_float_half(src[0], src[1]); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + std::tie(result[0], result[1]) = convert_half_float(src[0]); + return result; + } +}; + +template <> +inline Vectorized convert_to_fp_of_same_size( + const Vectorized& src); + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto low_double = at::vec::convert_to_fp_of_same_size(src[0]); + auto low = _mm256_cvtpd_ps(low_double); + auto high_double = at::vec::convert_to_fp_of_same_size(src[1]); + auto high = _mm256_cvtpd_ps(high_double); + return Vectorized( + _mm256_insertf128_ps(_mm256_castps128_ps256(low), high, 1)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + // Scalarization is the most reliable way of converting fp to int64 on AVX2. + // Check: https://stackoverflow.com/questions/41144668 + float buffer[8]; + src.store(buffer); + at::vec::VectorizedN result; + result[0] = Vectorized( + static_cast(buffer[0]), + static_cast(buffer[1]), + static_cast(buffer[2]), + static_cast(buffer[3])); + result[1] = Vectorized( + static_cast(buffer[4]), + static_cast(buffer[5]), + static_cast(buffer[6]), + static_cast(buffer[7])); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto low = _mm256_shuffle_epi32(src[0], _MM_SHUFFLE(2, 0, 2, 0)); + auto high = _mm256_shuffle_epi32(src[1], _MM_SHUFFLE(2, 0, 2, 0)); + auto low_perm = _mm256_permute4x64_epi64(low, _MM_SHUFFLE(3, 1, 2, 0)); + auto high_perm = _mm256_permute4x64_epi64(high, _MM_SHUFFLE(3, 1, 2, 0)); + return Vectorized(_mm256_blend_epi32(low_perm, high_perm, 0xF0)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + at::vec::VectorizedN result; + result[0] = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src[0])); + result[1] = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src[0], 1)); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto src128 = _mm256_castsi256_si128(src[0]); + return Vectorized(_mm256_cvtepi8_epi32(src128)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto src128 = _mm256_castsi256_si128(src[0]); + return Vectorized(_mm256_cvtepu8_epi32(src128)); + } +}; + + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + return Vectorized(_mm256_cvttps_epi32(src[0])); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + return Vectorized(_mm256_cvtepi32_ps(src[0])); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto src128 = _mm256_castsi256_si128(src[0]); + return Vectorized(_mm256_cvtepu8_epi16(src128)); + } +}; + +template +struct VecConvert< + dst_t, + 1, + src_t, + 1, + typename std::enable_if_t< + (is_reduced_floating_point_v && is_8bit_integer_v) || + (is_reduced_floating_point_v && is_8bit_integer_v), + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN tmp_fp32 = VecConvert::apply(src); + return VecConvert::apply(tmp_fp32); + } +}; + +template +struct VecConvert< + dst_t, + 1, + float, + 1, + typename std::enable_if_t, + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + return convert_float_to_int8(src[0]); + } +}; + + +template +struct VecConvert< + dst_t, + 1, + int64_t, + 2, + typename std::enable_if< + std::is_same_v || + std::is_same_v>::type> { + static inline VectorizedN apply( + const VectorizedN& src) { + return VecConvert::apply( + VecConvert::apply(src)); + } +}; + +#endif /* defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) */ + + +#if (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)) || defined(CPU_CAPABILITY_NEON) +template +struct VecConvert< + float, + 1, + src_t, + 1, + typename std::enable_if_t, + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + return convert_int8_to_float(src[0]); + } +}; +#endif + +#if defined(CPU_CAPABILITY_NEON) +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + uint16x8_t u16_8 = vld1q_u16(reinterpret_cast(&src[0])); + int32x4_t shift = vdupq_n_s32(16); + auto u16_low1 = vget_low_u16(u16_8); + auto u16_high1 = vget_high_u16(u16_8); + float32x4_t f32x4_0 = vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(u16_low1), shift)); + float32x4_t f32x4_1 = vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(u16_high1), shift)); + result[0] = {f32x4_0, f32x4_1}; + return result; + } +}; +#endif + +template +struct VecConvert< + float, + 1, + src_t, + 1, + typename std::enable_if_t, void>> { + static inline VectorizedN apply(const VectorizedN& src) { + auto [res_vec1, res_vec2] = convert_to_float(src[0]); + return res_vec1; + } +}; + +template +struct VecConvert< + dst_t, + 1, + float, + 1, + typename std::enable_if_t, void>> { + static inline VectorizedN apply(const VectorizedN& src) { + return convert_from_float(src[0], src[0]); + } +}; + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h new file mode 100644 index 0000000000000000000000000000000000000000..168fe4ed7f9693fcf31b8b4c94cbbf7c4ce3fe40 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h @@ -0,0 +1,447 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized { +private: + __m256d values; +public: + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(double val) { + values = _mm256_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4) { + values = _mm256_setr_pd(val1, val2, val3, val4); + } + operator __m256d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm256_blend_pd(a.values, b.values, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_pd(a.values, b.values, mask.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + + __at_align__ double tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(double)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(double)); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ); + return _mm256_movemask_pd(cmp); + } + Vectorized isnan() const { + return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q); + } + bool has_inf_nan() const { + __m256d self_sub = _mm256_sub_pd(values, values); + return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0; + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm256_set1_pd(-0.f); + return _mm256_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm256_set1_pd(0.f); + const auto nan_vec = _mm256_set1_pd(NAN); + const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_pd(c10::pi); + + const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask); + angle = _mm256_blendv_pd(angle, nan_vec, nan_mask); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd4_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshd4_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind4_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand4_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhd4_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d4_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd4(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd4_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd4_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd4_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2d4_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d4_u10(values)); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd4(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd4_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd4_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d4_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d4_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd4_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind4_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd4_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd4_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd4_u10(values)); + } + Vectorized ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized floor() const { + return _mm256_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm256_xor_pd(_mm256_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd4(values, b)); + } + Vectorized round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand4_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd4_u10(values)); + } + Vectorized trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad4_u10(values)); + } + Vectorized sqrt() const { + return _mm256_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm256_div_pd(_mm256_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd4_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + + Vectorized operator!=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ); + } + + Vectorized operator<=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ); + } + + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ); + } + + Vectorized operator>=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm256_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + Vectorized max = _mm256_max_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + Vectorized min = _mm256_min_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm256_min_pd(max, _mm256_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm256_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm256_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i)); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +#ifdef CPU_CAPABILITY_AVX2 +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmadd_pd(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmsub_pd(a, b, c); +} +#endif + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h new file mode 100644 index 0000000000000000000000000000000000000000..dab1790b26ab010e04c444df0aebaef27caa4d4b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h @@ -0,0 +1,656 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized { +private: + __m256 values; +public: + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m256 v) : values(v) {} + Vectorized(float val) { + values = _mm256_set1_ps(val); + } + Vectorized(float val1, float val2, float val3, float val4, + float val5, float val6, float val7, float val8) { + values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8); + } + operator __m256() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm256_blend_ps(a.values, b.values, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_ps(a.values, b.values, mask.values); + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_ps(reinterpret_cast(ptr)); + __at_align__ float tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(float)); + return _mm256_loadu_ps(tmp_values); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + _mm256_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[size()]; + _mm256_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float)); + } + } + const float& operator[](int idx) const = delete; + float& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ); + return _mm256_movemask_ps(cmp); + } + Vectorized isnan() const { + return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + } + + bool has_inf_nan() const { + __m256 self_sub = _mm256_sub_ps(values, values); + return (_mm256_movemask_epi8(_mm256_castps_si256(self_sub)) & 0x77777777) != 0; + } + + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm256_set1_ps(-0.f); + return _mm256_andnot_ps(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm256_set1_ps(0.f); + const auto nan_vec = _mm256_set1_ps(NAN); + const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_ps(c10::pi); + + const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); + angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_ps(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosf8_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshf8_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asinf8_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atanf8_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhf8_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2f8_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignf8(values, sign)); + } + Vectorized erf() const { + // constants + const auto neg_zero_vec = _mm256_set1_ps(-0.f); + const auto one_vec = _mm256_set1_ps(1.0f); + const auto p = _mm256_set1_ps(0.3275911f); + const auto p1 = _mm256_set1_ps(0.254829592f); + const auto p2 = _mm256_set1_ps(-0.284496736f); + const auto p3 = _mm256_set1_ps(1.421413741f); + const auto p4 = _mm256_set1_ps(-1.453152027f); + const auto p5 = _mm256_set1_ps(1.061405429f); + // sign(x) + auto sign_mask = _mm256_and_ps(neg_zero_vec, values); + auto abs_vec = _mm256_xor_ps(sign_mask, values); + // t = 1 / (p * abs(x) + 1) + auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec); + auto t = _mm256_div_ps(one_vec, tmp0); + // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1 + auto tmp1 = _mm256_fmadd_ps(p5, t, p4); + auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3); + auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2); + auto r = _mm256_fmadd_ps(tmp3, t, p1); + // - exp(- x * x) + auto pow_2 = _mm256_mul_ps(values, values); + auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2); + // auto tmp4 = exp(neg_pow_2); + auto tmp4 = Vectorized(Sleef_expf8_u10(neg_pow_2)); + auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4); + // erf(x) = sign(x) * (1 - r * t * exp(- x * x)) + auto tmp6 = _mm256_mul_ps(tmp5, t); + auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec); + return _mm256_xor_ps(sign_mask, tmp7); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcf8_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expf8_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2f8_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1f8_u10(values)); + } + Vectorized exp_u20() const { + // A faster version of exp with ULP=20 + static __m256 vec_factorial_1 = + _mm256_set1_ps(0.999999701f); // 1/factorial(1) + static __m256 vec_factorial_2 = + _mm256_set1_ps(0.499991506f); // 1/factorial(2) + static __m256 vec_factorial_3 = + _mm256_set1_ps(0.166676521f); // 1/factorial(3) + static __m256 vec_factorial_4 = + _mm256_set1_ps(0.0418978221f); // 1/factorial(4) + static __m256 vec_factorial_5 = + _mm256_set1_ps(0.00828929059f); // 1/factorial(5) + static __m256 vec_exp_log2ef = + _mm256_castsi256_ps(_mm256_set1_epi32(0x3fb8aa3b)); // log2(e) + static __m256 vec_half = _mm256_set1_ps(0.5f); + static __m256 vec_one = _mm256_set1_ps(1.f); + static __m256 vec_zero = _mm256_set1_ps(0.f); + static __m256 vec_two = _mm256_set1_ps(2.f); + static __m256 vec_ln2f = _mm256_castsi256_ps(_mm256_set1_epi32(0x3f317218)); // ln(2) + static __m256 vec_ln_flt_min = _mm256_castsi256_ps(_mm256_set1_epi32(0xc2aeac50)); + static __m256 vec_ln_flt_max = _mm256_castsi256_ps(_mm256_set1_epi32(0x42b17218)); + static __m256i vec_127 = _mm256_set1_epi32(0x0000007f); + static int n_mantissa_bits = 23; + + // exp(x) = + // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem + // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression + + auto less_ln_flt_min_mask = + _mm256_cmp_ps(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/); + auto vec_src = _mm256_min_ps(values, vec_ln_flt_max); + vec_src = _mm256_max_ps(vec_src, vec_ln_flt_min); + + // fx = floorf(x * log2ef + 0.5) + auto vec_fx = _mm256_fmadd_ps(vec_src, vec_exp_log2ef, vec_half); + vec_fx = _mm256_floor_ps(vec_fx); + + // x = x - fx * ln2 + auto vec_exp_poly = _mm256_fnmadd_ps(vec_fx, vec_ln2f, vec_src); + + // compute polynomial + auto vec_res = + _mm256_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4); + vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3); + vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2); + vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1); + vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_one); + + // compute 2^(n-1) + auto vec_exp_number = _mm256_sub_ps(vec_fx, vec_one); + auto vec_exp_number_i = _mm256_cvtps_epi32(vec_exp_number); + auto vec_two_pow_n_i = _mm256_add_epi32(vec_exp_number_i, vec_127); + vec_two_pow_n_i = _mm256_slli_epi32(vec_two_pow_n_i, n_mantissa_bits); + auto vec_two_pow_n = _mm256_castsi256_ps(vec_two_pow_n_i); + vec_two_pow_n = + _mm256_blendv_ps(vec_two_pow_n, vec_zero, less_ln_flt_min_mask); + + // y = y * 2^n + vec_res = _mm256_mul_ps(vec_res, vec_two_pow_n); + vec_res = _mm256_mul_ps(vec_res, vec_two); + return vec_res; + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodf8(values, q)); + } + Vectorized log() const { + return Vectorized(Sleef_logf8_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2f8_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10f8_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pf8_u10(values)); + } + Vectorized frac() const; + Vectorized sin() const { + return Vectorized(Sleef_sinf8_u35(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhf8_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosf8_u35(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshf8_u10(values)); + } + Vectorized ceil() const { + return _mm256_ceil_ps(values); + } + Vectorized floor() const { + return _mm256_floor_ps(values); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotf8_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized neg() const { + return _mm256_xor_ps(_mm256_set1_ps(-0.f), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterf8(values, b)); + } + Vectorized round() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tanf8_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhf8_u10(values)); + } + Vectorized trunc() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammaf8_u10(values)); + } + Vectorized sqrt() const { + return _mm256_sqrt_ps(values); + } + Vectorized reciprocal() const { + return _mm256_div_ps(_mm256_set1_ps(1), values); + } + Vectorized rsqrt() const { + return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powf8_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); + } + + Vectorized operator!=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); + } + + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ); + } + + Vectorized operator<=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ); + } + + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ); + } + + Vectorized operator>=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_ps(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_ps(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mul_ps(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm256_div_ps(a, b); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + Vectorized max = _mm256_max_ps(a, b); + Vectorized isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_ps(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + Vectorized min = _mm256_min_ps(a, b); + Vectorized isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm256_min_ps(max, _mm256_max_ps(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm256_min_ps(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm256_max_ps(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_ps(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_ps(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_ps(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, float* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i)); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = src[i]; + } +} + + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmadd_ps(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmsub_ps(a, b, c); +} + +// Used by Inductor CPP codegen +template<> +inline void transpose_mxn( + const float* src, + int64_t ld_src, + float* dst, + int64_t ld_dst) { + // load from src to registers + // a: a0 a1 a2 a3 a4 a5 a6 a7 + // b: b0 b1 b2 b3 b4 b5 b6 b7 + // c: c0 c1 c2 c3 c4 c5 c6 c7 + // d: d0 d1 d2 d3 d4 d5 d6 d7 + // e: e0 e1 e2 e3 e4 e5 e6 e7 + // f: f0 f1 f2 f3 f4 f5 f6 f7 + // g: g0 g1 g2 g3 g4 g5 g6 g7 + // h: h0 h1 h2 h3 h4 h5 h6 h7 + __m256 a = _mm256_loadu_ps(&src[0 * ld_src]); + __m256 b = _mm256_loadu_ps(&src[1 * ld_src]); + __m256 c = _mm256_loadu_ps(&src[2 * ld_src]); + __m256 d = _mm256_loadu_ps(&src[3 * ld_src]); + __m256 e = _mm256_loadu_ps(&src[4 * ld_src]); + __m256 f = _mm256_loadu_ps(&src[5 * ld_src]); + __m256 g = _mm256_loadu_ps(&src[6 * ld_src]); + __m256 h = _mm256_loadu_ps(&src[7 * ld_src]); + + __m256 ta, tb, tc, td, te, tf, tg, th; + // unpacking and interleaving 32-bit elements + // a0 b0 a1 b1 a4 b4 a5 b5 + // a2 b2 a3 b3 a6 b6 a7 b7 + // c0 d0 c1 d1 ... + // c2 d2 c3 d3 ... + // e0 f0 e1 f1 ... + // e2 f2 e3 f3 ... + // g0 h0 g1 h1 ... + // g2 h2 g3 h3 ... + ta = _mm256_unpacklo_ps(a, b); + tb = _mm256_unpackhi_ps(a, b); + tc = _mm256_unpacklo_ps(c, d); + td = _mm256_unpackhi_ps(c, d); + te = _mm256_unpacklo_ps(e, f); + tf = _mm256_unpackhi_ps(e, f); + tg = _mm256_unpacklo_ps(g, h); + th = _mm256_unpackhi_ps(g, h); + + // unpacking and interleaving 64-bit elements + // a0 b0 c0 d0 a4 b4 c4 d4 + // a1 b1 c1 d1 ... + // a2 b2 c2 d2 ... + // a3 b3 c3 d3 ... + // e0 f0 g0 h0 e4 f4 g4 h4 + // e1 f1 g1 h1 ... + // e2 f2 g2 h2 ... + // e3 f3 g3 h3 ... + a = _mm256_castpd_ps( + _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc))); + b = _mm256_castpd_ps( + _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc))); + c = _mm256_castpd_ps( + _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td))); + d = _mm256_castpd_ps( + _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td))); + e = _mm256_castpd_ps( + _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg))); + f = _mm256_castpd_ps( + _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg))); + g = _mm256_castpd_ps( + _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th))); + h = _mm256_castpd_ps( + _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th))); + + // shuffle 128-bits (composed of 4 32-bit elements) + // a0 b0 c0 d0 e0 f0 g0 h0 + // a1 b1 c1 d1 ... + // a2 b2 c2 d2 ... + // a3 b3 c3 d3 ... + // a4 b4 c4 d4 ... + // a5 b5 c5 d5 ... + // a6 b6 c6 d6 ... + // a7 b7 c7 d7 ... + ta = _mm256_permute2f128_ps(a, e, 0x20); + tb = _mm256_permute2f128_ps(b, f, 0x20); + tc = _mm256_permute2f128_ps(c, g, 0x20); + td = _mm256_permute2f128_ps(d, h, 0x20); + te = _mm256_permute2f128_ps(a, e, 0x31); + tf = _mm256_permute2f128_ps(b, f, 0x31); + tg = _mm256_permute2f128_ps(c, g, 0x31); + th = _mm256_permute2f128_ps(d, h, 0x31); + + // store from registers to dst + _mm256_storeu_ps(&dst[0 * ld_dst], ta); + _mm256_storeu_ps(&dst[1 * ld_dst], tb); + _mm256_storeu_ps(&dst[2 * ld_dst], tc); + _mm256_storeu_ps(&dst[3 * ld_dst], td); + _mm256_storeu_ps(&dst[4 * ld_dst], te); + _mm256_storeu_ps(&dst[5 * ld_dst], tf); + _mm256_storeu_ps(&dst[6 * ld_dst], tg); + _mm256_storeu_ps(&dst[7 * ld_dst], th); +} + +template<> +inline void transpose_mxn( + const float* src, + int64_t ld_src, + float* dst, + int64_t ld_dst) { + transpose_mxn( + src , ld_src, dst, ld_dst); + transpose_mxn( + src + 8, ld_src, dst + 8 * ld_dst, ld_dst); + transpose_mxn( + src + 8 * ld_src, ld_src, dst + 8, ld_dst); + transpose_mxn( + src + 8 * ld_src + 8, ld_src, dst + 8 * ld_dst + 8, ld_dst); +} +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..fdf9d66898646069265fedde88735125fbbfbb55 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h @@ -0,0 +1,909 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) +#include +#endif + +// Sleef offers vectorized versions of some transcedentals +// such as sin, cos, tan etc.. +// However for now opting for STL, since we are not building +// with Sleef for mobile yet. + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +// Right now contains only aarch64 implementation. +// Due to follow two reasons aarch32 is not currently supported. +// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics +// that work for aarch64 dont work for aarch32. +// 2. Android NDK r21 has problems with compiling aarch32. +// Clang seg faults. +// https://github.com/android/ndk/issues/1248 +// https://bugs.llvm.org/show_bug.cgi?id=45824 +// Most likely we will do aarch32 support with inline asm. +#if defined(__aarch64__) + +#ifdef __BIG_ENDIAN__ +#error "Big endian is not supported." +#endif + +#if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) +#define USE_SLEEF(sleef_code, non_sleef_code) sleef_code +#else +#define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code +#endif + +template +struct BlendRegs { + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res); +}; + +template +struct BlendRegs{ + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res) { + return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index); + } +}; + +template +struct BlendRegs{ + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res) { + return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index); + } +}; + +template <> class Vectorized { +private: + float32x4x2_t values; +public: + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(float32x4x2_t v) : values(v) {} + Vectorized(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {} + Vectorized(float val0, float val1, float val2, float val3, + float val4, float val5, float val6, float val7) : + values{val0, val1, val2, val3, val4, val5, val6, val7} {} + Vectorized(float32x4_t val0, float32x4_t val1) : values{val0, val1} {} + operator float32x4x2_t() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + Vectorized vec; + // 0. + vec.values.val[0] = + BlendRegs<0, (mask & 0x01)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<1, (mask & 0x02)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<2, (mask & 0x04)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<3, (mask & 0x08)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + // 1. + vec.values.val[1] = + BlendRegs<0, (mask & 0x10)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<1, (mask & 0x20)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<2, (mask & 0x40)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<3, (mask & 0x80)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + return vec; + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + // TODO + // NB: This requires that each value, i.e., each uint value, + // of the mask either all be zeros or all be 1s. + // We perhaps need some kind of an assert? + // But that will affect performance. + Vectorized vec(mask.values); + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + const Vectorized base_vec(base); + const Vectorized step_vec(step); + const Vectorized step_sizes(0, 1, 2, 3, 4, 5, 6, 7); + return fmadd(step_sizes, step_vec, base_vec); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 2: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 3: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 4: + return Vectorized(b.values.val[0], a.values.val[1]); + case 5: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + case 6: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + case 7: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) { + return vld1q_f32_x2(reinterpret_cast(ptr)); + } + else if (count == (size() >> 1)) { + Vectorized res; + res.values.val[0] = vld1q_f32(reinterpret_cast(ptr)); + res.values.val[1] = vdupq_n_f32(0.f); + return res; + } + else { + __at_align__ float tmp_values[size()]; + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(float)); + return vld1q_f32_x2(reinterpret_cast(tmp_values)); + } + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + vst1q_f32_x2(reinterpret_cast(ptr), values); + } + else if (count == (size() >> 1)) { + vst1q_f32(reinterpret_cast(ptr), values.val[0]); + } + else { + float tmp_values[size()]; + vst1q_f32_x2(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float)); + } + } + inline const float32x4_t& get_low() const { + return values.val[0]; + } + inline float32x4_t& get_low() { + return values.val[0]; + } + inline const float32x4_t& get_high() const { + return values.val[1]; + } + inline float32x4_t& get_high() { + return values.val[1]; + } + // Very slow implementation of indexing. + // Only required because vec256_qint refers to this. + // Once we specialize that implementation for ARM + // this should be removed. TODO (kimishpatel) + float operator[](int idx) const { + __at_align__ float tmp[size()]; + store(tmp); + return tmp[idx]; + } + float operator[](int idx) { + __at_align__ float tmp[size()]; + store(tmp); + return tmp[idx]; + } + // For boolean version where we want to if any 1/all zero + // etc. can be done faster in a different way. + int zero_mask() const { + __at_align__ float tmp[size()]; + store(tmp); + int mask = 0; + for (int i = 0; i < size(); ++ i) { + if (tmp[i] == 0.f) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + __at_align__ float tmp[size()]; + __at_align__ float res[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i])) { + std::memset(static_cast(&res[i]), 0xFF, sizeof(float)); + } else { + std::memset(static_cast(&res[i]), 0, sizeof(float)); + } + } + return loadu(res); + }; + bool has_inf_nan() const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if(_isnan(tmp[i]) || _isinf(tmp[i])) { + return true; + } + } + return false; + } + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + return Vectorized(vabsq_f32(values.val[0]), vabsq_f32(values.val[1])); + } + Vectorized angle() const { + auto zero = Vectorized(0); + auto pi = Vectorized(c10::pi); + auto tmp = blendv(zero, pi, *this < zero); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized(0.f); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return USE_SLEEF( + Vectorized(Sleef_acosf4_u10(values.val[0]), Sleef_acosf4_u10(values.val[1])), + map(std::acos) + ); + } + Vectorized acosh() const { + return USE_SLEEF( + Vectorized(Sleef_acoshf4_u10(values.val[0]), Sleef_acoshf4_u10(values.val[1])), + map(std::acosh) + ); + } + Vectorized asin() const { + return USE_SLEEF( + Vectorized(Sleef_asinf4_u10(values.val[0]), Sleef_asinf4_u10(values.val[1])), + map(std::asin) + ); + } + Vectorized atan() const { + return USE_SLEEF( + Vectorized(Sleef_atanf4_u10(values.val[0]), Sleef_atanf4_u10(values.val[1])), + map(std::atan) + ); + } + Vectorized atanh() const { + return USE_SLEEF( + Vectorized(Sleef_atanhf4_u10(values.val[0]), Sleef_atanhf4_u10(values.val[1])), + map(std::atanh) + ); + } + Vectorized atan2(const Vectorized &exp) const { + USE_SLEEF( + { + return Vectorized(Sleef_atan2f4_u10(values.val[0], exp.values.val[0]), + Sleef_atan2f4_u10(values.val[1], exp.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_exp[size()]; + store(tmp); + exp.store(tmp_exp); + for (const auto i : c10::irange(size())) { + tmp[i] = std::atan2(tmp[i], tmp_exp[i]); + } + return loadu(tmp); + } + ) + } + Vectorized copysign(const Vectorized &sign) const { + USE_SLEEF( + { + return Vectorized(Sleef_copysignf4(values.val[0], sign.values.val[0]), + Sleef_copysignf4(values.val[1], sign.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_sign[size()]; + store(tmp); + sign.store(tmp_sign); + for (size_type i = 0; i < size(); i++) { + tmp[i] = std::copysign(tmp[i], tmp_sign[i]); + } + return loadu(tmp); + } + ) + } + Vectorized erf() const; + Vectorized erfc() const { + return USE_SLEEF( + Vectorized(Sleef_erfcf4_u15(values.val[0]), Sleef_erfcf4_u15(values.val[1])), + map(std::erfc) + ); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return USE_SLEEF( + Vectorized(Sleef_expf4_u10(values.val[0]), Sleef_expf4_u10(values.val[1])), + map(std::exp) + ); + } + Vectorized exp2() const { + return USE_SLEEF( + Vectorized(Sleef_exp2f4_u10(values.val[0]), Sleef_exp2f4_u10(values.val[1])), + map(std::exp2) + ); + } + Vectorized expm1() const { + return USE_SLEEF( + Vectorized(Sleef_expm1f4_u10(values.val[0]), Sleef_expm1f4_u10(values.val[1])), + map(std::expm1) + ); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized& q) const { + USE_SLEEF( + { + return Vectorized(Sleef_fmodf4(values.val[0], q.values.val[0]), + Sleef_fmodf4(values.val[1], q.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_q[size()]; + store(tmp); + q.store(tmp_q); + for (const auto i : c10::irange(size())) { + tmp[i] = std::fmod(tmp[i], tmp_q[i]); + } + return loadu(tmp); + } + ) + } + Vectorized hypot(const Vectorized &b) const { + USE_SLEEF( + { + return Vectorized(Sleef_hypotf4_u05(values.val[0], b.values.val[0]), + Sleef_hypotf4_u05(values.val[1], b.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_b[size()]; + store(tmp); + b.store(tmp_b); + for (const auto i : c10::irange(size())) { + tmp[i] = std::hypot(tmp[i], tmp_b[i]); + } + return loadu(tmp); + } + ) + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return USE_SLEEF( + Vectorized(Sleef_logf4_u10(values.val[0]), Sleef_logf4_u10(values.val[1])), + map(std::log) + ); + } + Vectorized log10() const { + return USE_SLEEF( + Vectorized(Sleef_log10f4_u10(values.val[0]), Sleef_log10f4_u10(values.val[1])), + map(std::log10) + ); + } + Vectorized log1p() const { + return USE_SLEEF( + Vectorized(Sleef_log1pf4_u10(values.val[0]), Sleef_log1pf4_u10(values.val[1])), + map(std::log1p) + ); + } + Vectorized log2() const { + return USE_SLEEF( + Vectorized(Sleef_log2f4_u10(values.val[0]), Sleef_log2f4_u10(values.val[1])), + map(std::log2) + ); + } + Vectorized nextafter(const Vectorized &b) const { + USE_SLEEF( + { + return Vectorized(Sleef_nextafterf4(values.val[0], b.values.val[0]), + Sleef_nextafterf4(values.val[1], b.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_b[size()]; + store(tmp); + b.store(tmp_b); + for (const auto i : c10::irange(size())) { + tmp[i] = std::nextafter(tmp[i], tmp_b[i]); + } + return loadu(tmp); + } + ) + } + Vectorized frac() const; + Vectorized sin() const { + return USE_SLEEF( + Vectorized(Sleef_sinf4_u10(values.val[0]), Sleef_sinf4_u10(values.val[1])), + map(std::sin) + ); + } + Vectorized sinh() const { + return USE_SLEEF( + Vectorized(Sleef_sinhf4_u10(values.val[0]), Sleef_sinhf4_u10(values.val[1])), + map(std::sinh) + ); + } + Vectorized cos() const { + return USE_SLEEF( + Vectorized(Sleef_cosf4_u10(values.val[0]), Sleef_cosf4_u10(values.val[1])), + map(std::cos) + ); + } + Vectorized cosh() const { + return USE_SLEEF( + Vectorized(Sleef_coshf4_u10(values.val[0]), Sleef_coshf4_u10(values.val[1])), + map(std::cosh) + ); + } + Vectorized ceil() const { + return map(at::native::ceil_impl); + } + Vectorized floor() const { + return map(at::native::floor_impl); + } + Vectorized neg() const { + return Vectorized( + vnegq_f32(values.val[0]), + vnegq_f32(values.val[1])); + } + Vectorized round() const { + // We do not use std::round because we would like to round midway numbers to the nearest even integer. + return map(at::native::round_impl); + } + Vectorized tan() const { + return USE_SLEEF( + Vectorized(Sleef_tanf4_u10(values.val[0]), Sleef_tanf4_u10(values.val[1])), + map(std::tan) + ); + } + Vectorized tanh() const { + return USE_SLEEF( + Vectorized(Sleef_tanhf4_u10(values.val[0]), Sleef_tanhf4_u10(values.val[1])), + map(std::tanh) + ); + } + Vectorized trunc() const { + float32x4_t r0 = vrndq_f32(values.val[0]); + float32x4_t r1 = vrndq_f32(values.val[1]); + return Vectorized(r0, r1); + } + Vectorized lgamma() const { + return USE_SLEEF( + Vectorized(Sleef_lgammaf4_u10(values.val[0]), Sleef_lgammaf4_u10(values.val[1])), + map(std::lgamma) + ); + } + Vectorized sqrt() const { + return Vectorized( + vsqrtq_f32(values.val[0]), + vsqrtq_f32(values.val[1])); + } + Vectorized reciprocal() const { + auto r0 = vdivq_f32(vdupq_n_f32(1.0f), values.val[0]); + auto r1 = vdivq_f32(vdupq_n_f32(1.0f), values.val[1]); + return Vectorized(r0, r1); + } + Vectorized rsqrt() const { + return this->sqrt().reciprocal(); + } + Vectorized pow(const Vectorized &exp) const { + USE_SLEEF( + { + return Vectorized(Sleef_powf4_u10(values.val[0], exp.values.val[0]), + Sleef_powf4_u10(values.val[1], exp.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_exp[size()]; + store(tmp); + exp.store(tmp_exp); + for (const auto i : c10::irange(size())) { + tmp[i] = std::pow(tmp[i], tmp_exp[i]); + } + return loadu(tmp); + } + ) + } + Vectorized operator==(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator!=(const Vectorized& other) const { + float32x4_t r0 = vreinterpretq_f32_u32( + vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0]))); + float32x4_t r1 = vreinterpretq_f32_u32( + vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1]))); + return Vectorized(r0, r1); + } + + Vectorized operator<(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator<=(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>=(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +//Added sleef Implementation for Maximum +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + if(!a.has_inf_nan() && !b.has_inf_nan()){ + return USE_SLEEF( + Vectorized(Sleef_fmaxf4(a.get_low(), b.get_low()),Sleef_fmaxf4(a.get_high(), b.get_high())), + Vectorized(vmaxq_f32(a.get_low(), b.get_low()),vmaxq_f32(a.get_high(), b.get_high()))); + } + else{ + return Vectorized(vmaxq_f32(a.get_low(), b.get_low()),vmaxq_f32(a.get_high(), b.get_high())); + } + } + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vminq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vminq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return minimum(max, maximum(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return minimum(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return maximum(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(vandq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(vandq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(vorrq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(vorrq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(veorq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(veorq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, int32_t* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i))); + vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4))); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t* src, float* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i))); + vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4))); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low()); + float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + float32x4_t r0 = vfmsq_f32(c.get_low(), a.get_low(), b.get_low()); + float32x4_t r1 = vfmsq_f32(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +inline Vectorized Vectorized::erf() const{ + // constants + const Vectorized neg_zero_vec(-0.f); + const Vectorized one_vec(1.0f); + const Vectorized p(0.3275911f); + const Vectorized p1(0.254829592f); + const Vectorized p2(-0.284496736f); + const Vectorized p3(1.421413741f); + const Vectorized p4(-1.453152027f); + const Vectorized p5(1.061405429f); + // sign(x) + auto sign_mask = neg_zero_vec & *this; + auto abs_vec = this->abs(); + // t = 1 / (p * abs(x) + 1) + auto tmp0 = fmadd(p, abs_vec, one_vec); + auto t = one_vec / tmp0; + // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1 + auto tmp1 = fmadd(p5, t, p4); + auto tmp2 = fmadd(tmp1, t, p3); + auto tmp3 = fmadd(tmp2, t, p2); + auto r = fmadd(tmp3, t, p1); + // - exp(- x * x) + auto pow_2 = (*this) * (*this); + auto neg_pow_2 = pow_2 ^ neg_zero_vec; + auto tmp4 = neg_pow_2.map(std::exp); // This can be swapped for a faster implementation of exp. + auto tmp5 = tmp4 ^ neg_zero_vec; + // erf(x) = sign(x) * (1 - r * t * exp(- x * x)) + auto tmp6 = t * tmp5; + auto tmp7 = fmadd(tmp6, r, one_vec); + return tmp7 ^ sign_mask; +} +#endif /* defined(aarch64) */ + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..0b51972a029b445cfd596a91d943ef208632edea --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h @@ -0,0 +1,826 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#include + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +// Right now contains only aarch64 implementation. +// Due to follow two reasons aarch32 is not currently supported. +// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics +// that work for aarch64 dont work for aarch32. +// 2. Android NDK r21 has problems with compiling aarch32. +// Clang seg faults. +// https://github.com/android/ndk/issues/1248 +// https://bugs.llvm.org/show_bug.cgi?id=45824 +// Most likely we will do aarch32 support with inline asm. +#if !defined(C10_MOBILE) && defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#ifdef __BIG_ENDIAN__ +#error "Big endian is not supported." +#endif + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res); +}; + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res) { + return vsetq_lane_f16(vgetq_lane_f16(b, index), res, index); + } +}; + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res) { + return vsetq_lane_f16(vgetq_lane_f16(a, index), res, index); + } +}; + +// On ARM, Half type supports float16_t->Half constructor and Half->float16_t +// conversion +template <> +class Vectorized { + private: + float16x8x2_t values; + + public: + // value_type should be c10::Half to fit interface with vec_base.h + using value_type = c10::Half; + using size_type = int; + static constexpr size_type size() { + static_assert(sizeof(float16x8x2_t) == 16 * sizeof(value_type)); + return 16; + } + + private: + // We use these private map functions to implement various methods + Vectorized map2( + const Vectorized& second, + c10::Half (*const f)(c10::Half, c10::Half)) const { + __at_align__ c10::Half tmp_first[size()]; + __at_align__ c10::Half tmp_second[size()]; + store(tmp_first); // store this to tmp_first + second.store(tmp_second); + for (const auto i : c10::irange(size())) { + tmp_first[i] = f(tmp_first[i], tmp_second[i]); + } + return loadu(tmp_first); + } + + Vectorized map_with_vec_float_method( + Vectorized (Vectorized::*m)() const) const { + // Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0])); + float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0])); + Vectorized mv0 = (Vectorized(v00, v01).*m)(); + float16x4_t r00 = vcvt_f16_f32(mv0.get_low()); + float16x4_t r01 = vcvt_f16_f32(mv0.get_high()); + + // Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1])); + float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1])); + Vectorized mv1 = (Vectorized(v10, v11).*m)(); + float16x4_t r10 = vcvt_f16_f32(mv1.get_low()); + float16x4_t r11 = vcvt_f16_f32(mv1.get_high()); + + // Pack result into Vectorized + return Vectorized( + vcombine_f16(r00, r01), vcombine_f16(r10, r11)); + } + + Vectorized map2_with_vec_float_method( + const Vectorized& second, + Vectorized (Vectorized::*m)(const Vectorized&) + const) const { + // Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0])); + float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0])); + float32x4_t second_v00 = vcvt_f32_f16(vget_low_f16(second.get_low())); + float32x4_t second_v01 = vcvt_f32_f16(vget_high_f16(second.get_low())); + Vectorized mv0 = (Vectorized(v00, v01).*m)( + Vectorized(second_v00, second_v01)); + float16x4_t r00 = vcvt_f16_f32(mv0.get_low()); + float16x4_t r01 = vcvt_f16_f32(mv0.get_high()); + + // Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1])); + float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1])); + float32x4_t second_v10 = vcvt_f32_f16(vget_low_f16(second.get_high())); + float32x4_t second_v11 = vcvt_f32_f16(vget_high_f16(second.get_high())); + Vectorized mv1 = (Vectorized(v10, v11).*m)( + Vectorized(second_v10, second_v11)); + float16x4_t r10 = vcvt_f16_f32(mv1.get_low()); + float16x4_t r11 = vcvt_f16_f32(mv1.get_high()); + + // Pack result into Vectorized + return Vectorized( + vcombine_f16(r00, r01), vcombine_f16(r10, r11)); + } + + public: + // constructor + Vectorized() {} + Vectorized(float16x8x2_t v) : values(v) {} + + // A ctor that accepts c10::Half is needed to fit interface with vec_base.h + // A second constructor that takes float16_t is also included + Vectorized(c10::Half val) + : values{vdupq_n_f16((float16_t)val), vdupq_n_f16((float16_t)val)} { + } + Vectorized(float16_t val) : values{vdupq_n_f16(val), vdupq_n_f16(val)} {} + Vectorized( + float16_t val0, + float16_t val1, + float16_t val2, + float16_t val3, + float16_t val4, + float16_t val5, + float16_t val6, + float16_t val7, + float16_t val8, + float16_t val9, + float16_t val10, + float16_t val11, + float16_t val12, + float16_t val13, + float16_t val14, + float16_t val15) + : values{ + val0, + val1, + val2, + val3, + val4, + val5, + val6, + val7, + val8, + val9, + val10, + val11, + val12, + val13, + val14, + val15} {} + Vectorized(float16x8_t val0, float16x8_t val1) : values{val0, val1} {} + operator float16x8x2_t() const { + return values; + } + template + static Vectorized blend( + const Vectorized& a, + const Vectorized& b) { + Vectorized vec; + // 0. + vec.values.val[0] = BlendHalfRegs<0, (mask & 0x01) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<1, (mask & 0x02) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<2, (mask & 0x04) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<3, (mask & 0x08) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + + vec.values.val[0] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + + // 1. + vec.values.val[1] = BlendHalfRegs<0, (mask & 0x10) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<1, (mask & 0x20) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<2, (mask & 0x40) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<3, (mask & 0x80) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + + vec.values.val[1] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + + return vec; + } + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // Note: using blendv is very awkward because 0xFFFF is one of many NaN's in + // FP16 It's unfortunate that the mask has type Half (required from + // vec_base) + + // TODO + // NB: This requires that each value, i.e., each uint value, + // of the mask either all be zeros or all be 1s. + // We perhaps need some kind of an assert? + // But that will affect performance. + Vectorized vec(mask.values); + vec.values.val[0] = vbslq_f16( + vreinterpretq_u16_f16(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + vec.values.val[1] = vbslq_f16( + vreinterpretq_u16_f16(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + template + static Vectorized arange( + c10::Half base = 0.0, + step_t step = static_cast(1)) { + const Vectorized base_vec(base); + const Vectorized step_vec(step); + const Vectorized step_sizes( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return fmadd(step_sizes, step_vec, base_vec); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + int64_t count = size()) { + uint16_t pre_mask[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + for (int i = 0; i < count; i++) { + pre_mask[i] = 0xFFFF; + } + uint16x8x2_t mask = vld1q_u16_x2(pre_mask); + + // Using blendv is awkward because 0xFFFF is one of many NaN's in FP16 + // so we directly use vbslq_f16 instead + Vectorized vec( + vbslq_f16( + // Low bits + mask.val[0], + b.values.val[0], + a.values.val[0]), + // High bits + vbslq_f16(mask.val[1], b.values.val[1], a.values.val[1])); + + return vec; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) { + return vld1q_f16_x2(reinterpret_cast(ptr)); + } else if (count == (size() >> 1)) { + Vectorized res; + res.values.val[0] = vld1q_f16(reinterpret_cast(ptr)); + std::memset(&res.values.val[1], 0, sizeof(res.values.val[1])); + return res; + } + __at_align__ float16_t tmp_values[size()]; + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(float16_t)); + return vld1q_f16_x2(reinterpret_cast(tmp_values)); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + vst1q_f16_x2(reinterpret_cast(ptr), values); + return; + } else if (count == (size() >> 1)) { + vst1q_f16(reinterpret_cast(ptr), values.val[0]); + } else { + float16_t tmp_values[size()]; + vst1q_f16_x2(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float16_t)); + } + } + inline const float16x8_t& get_low() const { + return values.val[0]; + } + inline float16x8_t& get_low() { + return values.val[0]; + } + inline const float16x8_t& get_high() const { + return values.val[1]; + } + inline float16x8_t& get_high() { + return values.val[1]; + } + // Very slow implementation of indexing. + // Only required because vec256_qint refers to this. + // Once we specialize that implementation for ARM + // this should be removed. TODO (kimishpatel) + c10::Half operator[](int idx) const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + return tmp[idx]; + } + c10::Half operator[](int idx) { + __at_align__ c10::Half tmp[size()]; + store(tmp); + return tmp[idx]; + } + // For boolean version where we want to if any 1/all zero + // etc. can be done faster in a different way. + int zero_mask() const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + int mask = 0; + for (int i = 0; i < size(); ++i) { + if (tmp[i] == 0) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + __at_align__ c10::Half tmp[size()]; + __at_align__ c10::Half res[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i])) { + std::memset(static_cast(&res[i]), 0xFF, sizeof(c10::Half)); + } else { + std::memset(static_cast(&res[i]), 0, sizeof(c10::Half)); + } + } + return loadu(res); + }; + bool has_inf_nan() const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i]) || _isinf(tmp[i])) { + return true; + } + } + return false; + } + Vectorized map(c10::Half (*const f)(c10::Half)) const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + return Vectorized( + vabsq_f16(values.val[0]), vabsq_f16(values.val[1])); + } + Vectorized angle() const { + auto zero = Vectorized(0); + auto pi = Vectorized(c10::pi); + auto tmp = blendv(zero, pi, *this < zero); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized(0); + } + Vectorized conj() const { + return *this; + } + + // Sleef does not support FP16, so many math functions are applied by + // converting to FP32, applying the math function, and then converting back to + // FP16. + Vectorized acos() const { + return map_with_vec_float_method(&Vectorized::acos); + } + Vectorized acosh() const { + return map_with_vec_float_method(&Vectorized::acosh); + } + Vectorized asin() const { + return map_with_vec_float_method(&Vectorized::asin); + } + Vectorized atan() const { + return map_with_vec_float_method(&Vectorized::atan); + } + Vectorized atanh() const { + return map_with_vec_float_method(&Vectorized::atanh); + } + Vectorized atan2(const Vectorized& exp) const { + return map2_with_vec_float_method(exp, &Vectorized::atan2); + } + Vectorized copysign(const Vectorized& sign) const { + return map2_with_vec_float_method(sign, &Vectorized::copysign); + } + Vectorized erf() const { + return map_with_vec_float_method(&Vectorized::erf); + } + Vectorized erfc() const { + return map_with_vec_float_method(&Vectorized::erfc); + } + Vectorized erfinv() const { + return map_with_vec_float_method(&Vectorized::erfinv); + } + Vectorized exp() const { + return map_with_vec_float_method(&Vectorized::exp); + } + Vectorized exp2() const { + return map_with_vec_float_method(&Vectorized::exp2); + } + Vectorized expm1() const { + return map_with_vec_float_method(&Vectorized::expm1); + } + Vectorized exp_u20() const { + return map_with_vec_float_method(&Vectorized::exp_u20); + } + Vectorized fmod(const Vectorized& q) const { + // This function is questionable with a conversion, so we use map2 + return map2(q, std::fmod); + } + Vectorized hypot(const Vectorized& b) const { + return map2_with_vec_float_method(b, &Vectorized::hypot); + } + Vectorized i0() const { + return map_with_vec_float_method(&Vectorized::i0); + } + Vectorized i0e() const { + return map_with_vec_float_method(&Vectorized::i0e); + } + Vectorized digamma() const { + return map_with_vec_float_method(&Vectorized::digamma); + } + Vectorized igamma(const Vectorized& x) const { + return map2_with_vec_float_method(x, &Vectorized::igamma); + } + Vectorized igammac(const Vectorized& x) const { + return map2_with_vec_float_method(x, &Vectorized::igammac); + } + Vectorized log() const { + return map_with_vec_float_method(&Vectorized::log); + } + Vectorized log10() const { + return map_with_vec_float_method(&Vectorized::log10); + } + Vectorized log1p() const { + return map_with_vec_float_method(&Vectorized::log1p); + } + Vectorized log2() const { + return map_with_vec_float_method(&Vectorized::log2); + } + Vectorized nextafter(const Vectorized& b) const { + // This function does not make sense with conversion, so we use map2 + return map2(b, std::nextafter); + } + Vectorized frac() const; + Vectorized sin() const { + return map_with_vec_float_method(&Vectorized::sin); + } + Vectorized sinh() const { + return map_with_vec_float_method(&Vectorized::sinh); + } + Vectorized cos() const { + return map_with_vec_float_method(&Vectorized::cos); + } + Vectorized cosh() const { + return map_with_vec_float_method(&Vectorized::cosh); + } + Vectorized ceil() const { + // This function is questionable with a conversion, so we use map + return map(at::native::ceil_impl); + } + Vectorized floor() const { + // This function is questionable with a conversion, so we use map + return map(at::native::floor_impl); + } + Vectorized neg() const { + return Vectorized( + vnegq_f16(values.val[0]), vnegq_f16(values.val[1])); + } + inline Vectorized round() const { + // This function is questionable with a conversion, so we use map + return map(at::native::round_impl); + } + inline Vectorized tan() const { + return map_with_vec_float_method(&Vectorized::tan); + } + inline Vectorized tanh() const { + return map_with_vec_float_method(&Vectorized::tanh); + } + Vectorized trunc() const { + float16x8_t r0 = vrndq_f16(values.val[0]); + float16x8_t r1 = vrndq_f16(values.val[1]); + return Vectorized(r0, r1); + } + Vectorized lgamma() const { + return map_with_vec_float_method(&Vectorized::lgamma); + } + Vectorized sqrt() const { + return Vectorized( + vsqrtq_f16(values.val[0]), vsqrtq_f16(values.val[1])); + } + Vectorized reciprocal() const { + auto ones = vdupq_n_f16(1.0f); + auto r0 = vdivq_f16(ones, values.val[0]); + auto r1 = vdivq_f16(ones, values.val[1]); + return Vectorized(r0, r1); + } + Vectorized rsqrt() const { + return this->sqrt().reciprocal(); + } + Vectorized pow(const Vectorized& exp) const { + return map2_with_vec_float_method(exp, &Vectorized::pow); + } + Vectorized operator==(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vceqq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vceqq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator!=(const Vectorized& other) const { + float16x8_t r0 = vreinterpretq_f16_u16( + vmvnq_u16(vceqq_f16(values.val[0], other.values.val[0]))); + float16x8_t r1 = vreinterpretq_f16_u16( + vmvnq_u16(vceqq_f16(values.val[1], other.values.val[1]))); + return Vectorized(r0, r1); + } + + Vectorized operator<(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcltq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcltq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator<=(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcleq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcleq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcgtq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcgtq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>=(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcgeq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcgeq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; // Vectorized + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vaddq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vaddq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator-( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vsubq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vsubq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vmulq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vmulq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator/( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vdivq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vdivq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vmaxq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vmaxq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vminq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vminq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline clamp( + const Vectorized& a, + const Vectorized& min, + const Vectorized& max) { + return minimum(max, maximum(min, a)); +} + +template <> +Vectorized inline clamp_max( + const Vectorized& a, + const Vectorized& max) { + return minimum(max, a); +} + +template <> +Vectorized inline clamp_min( + const Vectorized& a, + const Vectorized& min) { + return maximum(min, a); +} + +template <> +Vectorized inline operator&( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(vandq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(vandq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator|( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(vorrq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(vorrq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator^( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(veorq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(veorq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +inline Vectorized Vectorized::eq( + const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne( + const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt( + const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge( + const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt( + const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le( + const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +template <> +inline void convert(const float16_t* src, int16_t* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i))); + vst1q_s16(dst + i + 8, vcvtq_s16_f16(vld1q_f16(src + i + 8))); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int16_t* src, float16_t* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i))); + vst1q_f16(dst + i + 8, vcvtq_f16_s16(vld1q_s16(src + i + 8))); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +Vectorized inline fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + float16x8_t r0 = vfmaq_f16(c.get_low(), a.get_low(), b.get_low()); + float16x8_t r1 = vfmaq_f16(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline fmsub( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + float16x8_t r0 = vfmsq_f16(c.get_low(), a.get_low(), b.get_low()); + float16x8_t r1 = vfmsq_f16(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +#endif /* defined(aarch64) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(C10_MOBILE) */ + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h new file mode 100644 index 0000000000000000000000000000000000000000..6263efd2039ce650c5dd480257e5757aff20540f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h @@ -0,0 +1,1586 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#ifdef CPU_CAPABILITY_AVX2 + +struct Vectorizedi { +protected: + __m256i values; + + static inline __m256i invert(const __m256i& v) { + const auto ones = _mm256_set1_epi64x(-1); + return _mm256_xor_si256(ones, v); + } +public: + Vectorizedi() {} + Vectorizedi(__m256i v) : values(v) {} + operator __m256i() const { + return values; + } +}; + +#else + +struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined + +#endif // CPU_CAPABILITY_AVX2 + +#ifdef CPU_CAPABILITY_AVX2 + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int64_t; + using size_type = int; + static constexpr size_type size() { + return 4; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); } + Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) { + values = _mm256_setr_epi64x(val1, val2, val3, val4); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int64_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi64(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi64(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi64(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi64(b.values, 3); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int64_t base = 0, step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ int64_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int64_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int64_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int64_t)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + Vectorized abs() const { + auto zero = _mm256_set1_epi64x(0); + auto is_larger = _mm256_cmpgt_epi64(zero, values); + auto inverse = _mm256_xor_si256(values, is_larger); + return _mm256_sub_epi64(inverse, is_larger); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi64x(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi64(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi64(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi64(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi64(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int32_t; + static constexpr int size() { + return 8; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int32_t v) { values = _mm256_set1_epi32(v); } + Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4, + int32_t val5, int32_t val6, int32_t val7, int32_t val8) { + values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm256_blend_epi32(a, b, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int32_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int32_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int32_t count) { + __at_align__ int32_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int32_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int32_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int32_t)); + } + } + const int32_t& operator[](int idx) const = delete; + int32_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi32(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi32(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi32(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi32(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi32(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi32(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(other.values, values)); + } + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +inline void convert(const int32_t *src, float *dst, int64_t n) { + int64_t i; + // int32_t and float have same size +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_vec = _mm256_loadu_si256(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_ps(input_vec); + _mm256_storeu_ps(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t *src, double *dst, int64_t n) { + int64_t i; + // int32_t has half the size of double +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_128_vec = _mm_loadu_si128(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_pd(input_128_vec); + _mm256_storeu_pd(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int16_t; + static constexpr int size() { + return 16; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int16_t v) { values = _mm256_set1_epi16(v); } + Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4, + int16_t val5, int16_t val6, int16_t val7, int16_t val8, + int16_t val9, int16_t val10, int16_t val11, int16_t val12, + int16_t val13, int16_t val14, int16_t val15, int16_t val16) { + values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int16_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int16_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + const int16_t& operator[](int idx) const = delete; + int16_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi16(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi16(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi16(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi16(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi16(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template +class Vectorized8 : public Vectorizedi { + static_assert( + std::is_same_v || std::is_same_v, + "Only int8_t/uint8_t are supported"); +protected: + static const Vectorized ones; +public: + using value_type = T; + static constexpr int size() { + return 32; + } + using Vectorizedi::Vectorizedi; + Vectorized8() {} + Vectorized8(T v) { values = _mm256_set1_epi8(v); } + Vectorized8(T val1, T val2, T val3, T val4, + T val5, T val6, T val7, T val8, + T val9, T val10, T val11, T val12, + T val13, T val14, T val15, T val16, + T val17, T val18, T val19, T val20, + T val21, T val22, T val23, T val24, + T val25, T val26, T val27, T val28, + T val29, T val30, T val31, T val32) { + values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16, + val17, val18, val19, val20, val21, val22, val23, val24, + val25, val26, val27, val28, val29, val30, val31, val32); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ T tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi8(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi8(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi8(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi8(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi8(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi8(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi8(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi8(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi8(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi8(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi8(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi8(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi8(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi8(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi8(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi8(b.values, 15); + if (mask & 0x010000) + tmp_values[16] = _mm256_extract_epi8(b.values, 16); + if (mask & 0x020000) + tmp_values[17] = _mm256_extract_epi8(b.values, 17); + if (mask & 0x040000) + tmp_values[18] = _mm256_extract_epi8(b.values, 18); + if (mask & 0x080000) + tmp_values[19] = _mm256_extract_epi8(b.values, 19); + if (mask & 0x100000) + tmp_values[20] = _mm256_extract_epi8(b.values, 20); + if (mask & 0x200000) + tmp_values[21] = _mm256_extract_epi8(b.values, 21); + if (mask & 0x400000) + tmp_values[22] = _mm256_extract_epi8(b.values, 22); + if (mask & 0x800000) + tmp_values[23] = _mm256_extract_epi8(b.values, 23); + if (mask & 0x1000000) + tmp_values[24] = _mm256_extract_epi8(b.values, 24); + if (mask & 0x2000000) + tmp_values[25] = _mm256_extract_epi8(b.values, 25); + if (mask & 0x4000000) + tmp_values[26] = _mm256_extract_epi8(b.values, 26); + if (mask & 0x8000000) + tmp_values[27] = _mm256_extract_epi8(b.values, 27); + if (mask & 0x10000000) + tmp_values[28] = _mm256_extract_epi8(b.values, 28); + if (mask & 0x20000000) + tmp_values[29] = _mm256_extract_epi8(b.values, 29); + if (mask & 0x40000000) + tmp_values[30] = _mm256_extract_epi8(b.values, 30); + if (mask & 0x80000000) + tmp_values[31] = _mm256_extract_epi8(b.values, 31); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(T base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, T count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<0x1>(a, b); + case 2: + return blend<0x3>(a, b); + case 3: + return blend<0x7>(a, b); + case 4: + return blend<0xF>(a, b); + case 5: + return blend<0x1F>(a, b); + case 6: + return blend<0x3F>(a, b); + case 7: + return blend<0x7F>(a, b); + case 8: + return blend<0xFF>(a, b); + case 9: + return blend<0x1FF>(a, b); + case 10: + return blend<0x3FF>(a, b); + case 11: + return blend<0x7FF>(a, b); + case 12: + return blend<0xFFF>(a, b); + case 13: + return blend<0x1FFF>(a, b); + case 14: + return blend<0x3FFF>(a, b); + case 15: + return blend<0x7FFF>(a, b); + case 16: + return blend<0xFFFF>(a, b); + case 17: + return blend<0x1FFFF>(a, b); + case 18: + return blend<0x3FFFF>(a, b); + case 19: + return blend<0x7FFFF>(a, b); + case 20: + return blend<0xFFFFF>(a, b); + case 21: + return blend<0x1FFFFF>(a, b); + case 22: + return blend<0x3FFFFF>(a, b); + case 23: + return blend<0x7FFFFF>(a, b); + case 24: + return blend<0xFFFFFF>(a, b); + case 25: + return blend<0x1FFFFFF>(a, b); + case 26: + return blend<0x3FFFFFF>(a, b); + case 27: + return blend<0x7FFFFFF>(a, b); + case 28: + return blend<0xFFFFFFF>(a, b); + case 29: + return blend<0x1FFFFFFF>(a, b); + case 30: + return blend<0x3FFFFFFF>(a, b); + case 31: + return blend<0x7FFFFFFF>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu_one_fourth(const void* ptr) { + // Fast path if only load element number of 8. + // Note: We didn't merge it as fast path of loadu(const void* ptr, T count), + // Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits. + // However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined. + // TODO We can use _mm256_zextsi128_si256 in the furture, + // since gcc 9.3 doesn't support it now. + __m128i input_128 = _mm_loadl_epi64(reinterpret_cast(ptr)); + return _mm256_castsi128_si256(input_128); + } + static Vectorized loadu(const void* ptr, T count) { + __at_align__ T tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(T)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + if (count == 8) { + // Fast path if only store element number of 8 + _mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values)); + } else { + __at_align__ T tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(T)); + } + } + } + const T& operator[](int idx) const = delete; + T& operator[](int idx) = delete; + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi8(0); + } + Vectorized conj() const { + return *this; + } +}; + +template<> +class Vectorized: public Vectorized8 { +public: + using Vectorized8::Vectorized8; + + Vectorized neg() const; + + Vectorized abs() const { + return _mm256_abs_epi8(values); + } + + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi8(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi8(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi8(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi8(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return other < *this; + } + Vectorized operator>=(const Vectorized& other) const { + return other <= *this; + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template<> +class Vectorized: public Vectorized8 { +public: + using Vectorized8::Vectorized8; + + Vectorized neg() const; + + Vectorized abs() const { + return *this; + } + + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi8(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi8(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + __m256i max = _mm256_max_epu8(values, other.values); + return invert(_mm256_cmpeq_epi8(max, values)); + } + Vectorized operator<=(const Vectorized& other) const { + __m256i max = _mm256_max_epu8(values, other.values); + return _mm256_cmpeq_epi8(max, other.values); + } + Vectorized operator>(const Vectorized& other) const { + return other < *this; + } + Vectorized operator>=(const Vectorized& other) const { + return other <= *this; + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi64(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi16(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi8(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi64(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi32(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi16(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi8(a, b); +} + +// Negation. Defined here so we can utilize operator- +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +// Emulate operations with no native 64-bit support in avx, +// by extracting each element, performing the operation pointwise, +// then combining the results into a vector. +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = op(a0, b0); + int64_t c1 = op(a1, b1); + int64_t c2 = op(a2, b2); + int64_t c3 = op(a3, b3); + + return _mm256_set_epi64x(c3, c2, c1, c0); +} + +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const Vectorized& c, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = _mm256_extract_epi64(c, 0); + int64_t c1 = _mm256_extract_epi64(c, 1); + int64_t c2 = _mm256_extract_epi64(c, 2); + int64_t c3 = _mm256_extract_epi64(c, 3); + + int64_t d0 = op(a0, b0, c0); + int64_t d1 = op(a1, b1, c1); + int64_t d2 = op(a2, b2, c2); + int64_t d3 = op(a3, b3, c3); + + return _mm256_set_epi64x(d3, d2, d1, d0); +} + +// AVX2 has no intrinsic for int64_t multiply so it needs to be emulated +// This could be implemented more efficiently using epi32 instructions +// This is also technically avx compatible, but then we'll need AVX +// code for add as well. +// Note: intentionally ignores undefined behavior like (-lowest * -1). +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;}); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi16(a, b); +} + +template +Vectorized inline int_elementwise_binary_256(const Vectorized& a, const Vectorized& b, Op op) { + T values_a[Vectorized::size()]; + T values_b[Vectorized::size()]; + a.store(values_a); + b.store(values_b); + for (int i = 0; i != Vectorized::size(); i++) { + values_a[i] = op(values_a[i], values_b[i]); + } + return Vectorized::loadu(values_a); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying int8_t +#ifndef CPU_CAPABILITY_AVX2 + return int_elementwise_binary_256(a, b, std::multiplies()); +#else + __m256i mask00FF = _mm256_set1_epi16(0x00FF); + __m256i a_lo = _mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8); + __m256i b_lo = _mm256_srai_epi16(_mm256_slli_epi16(b, 8), 8); + __m256i a_hi = _mm256_srai_epi16(a, 8); + __m256i b_hi = _mm256_srai_epi16(b, 8); + __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF); + __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8); + __m256i res = _mm256_or_si256(res_hi, res_lo); + return res; +#endif +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying uint8_t +#ifndef CPU_CAPABILITY_AVX2 + return int_elementwise_binary_256(a, b, std::multiplies()); +#else + __m256i mask00FF = _mm256_set1_epi16(0x00FF); + __m256i a_lo = _mm256_and_si256 (a, mask00FF); + __m256i b_lo = _mm256_and_si256 (b, mask00FF); + __m256i a_hi = _mm256_srli_epi16(a, 8); + __m256i b_hi = _mm256_srli_epi16(b, 8); + __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF); + __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8); + __m256i res = _mm256_or_si256(res_hi, res_lo); + return res; +#endif +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);}); +#else + __m256i cmp = _mm256_cmpgt_epi64(a, b); + return _mm256_blendv_epi8(a, b, cmp); +#endif +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi32(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi16(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi8(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epu8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);}); +#else + __m256i cmp = _mm256_cmpgt_epi64(a, b); + return _mm256_blendv_epi8(b, a, cmp); +#endif +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi32(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi16(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epu8(a, b); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));}); +#else + return minimum(maximum(a, min_val), max_val); +#endif +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);}); +#else + return minimum(max_val, a); +#endif +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epu8(max_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);}); +#else + return maximum(min_val, a); +#endif +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi32(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi16(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi8(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epu8(min_val, a); +} + +template +Vectorized inline convert_to_int32(const T* ptr) { + return Vectorized::loadu(ptr); +} + +template<> +Vectorized inline convert_to_int32(const int8_t* ptr) { + return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template<> +Vectorized inline convert_to_int32(const uint8_t* ptr) { + return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + return _mm256_xor_si256(a, _mm256_set1_epi32(-1)); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +template +Vectorized inline shift_256_16(const Vectorized& a, const Vectorized& b) { + // No vector instruction for shifting int16_t, so emulating it instead. + + // Control masks for shuffle operation, treating 256 bits as an + // array of 16-bit elements, and considering pairs of neighboring + // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and + // M!=N) is set so that shuffle will move element with index M from + // input pair into element with index N in output pair, and element + // with index M in output pair will be set to all 0s. + __m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80, + 21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80, + 13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80, + 5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80); + __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26, + 0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18, + 0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10, + 0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2); + + // Masks for bitwise and operation, treating 256 bits as an array of + // 16-bit elements, and considering them in pairs of neighboring + // elements. A mask named "keep_M" (M in [0,1]) is set so that + // bitwise and will copy element with index M from input pair into + // element with the same index in output pair, while the other + // element in output pair will be set to all 0s. + __m256i keep_0 = _mm256_set1_epi32(0xFFFF); + __m256i keep_1 = _mm256_set1_epi32(0xFFFF0000); + + // Take each 16-bit element with idx%2==0 from input array to be + // shifted and extend it to 32 bits so that 0s are added to the + // right. Then, perform shifting on this 32-bit number. Upper 16 + // bits will be proper result of shifting original 16-bit number, so + // write them to result array, into the same position from which + // corresponding input element is taken. Also, make sure that + // result array elements with idx%2!=0 are set to all 0s. + // + // Note that number of bits to shift for is extended to 32 bits by + // adding 0s to the left. That means this number is not properly + // sign-extended for negative values. However, number of bits to + // shift is treated as an unsigned integer by respective shift + // intrinsics anyway so if negative then either with or without + // proper sign extension, it will be interpreted as a number greater + // than 32, and the shifting result will be the same. + __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1); + __m256i b0 = _mm256_and_si256(b, keep_0); + __m256i c0; + if (left_shift) + c0 = _mm256_sllv_epi32(a0, b0); + else + c0 = _mm256_srav_epi32(a0, b0); + c0 = _mm256_shuffle_epi8(c0, ctl_1_0); + + // Peform shifting the same way for input array elements with + // idx%2==1. + __m256i a1 = _mm256_and_si256(a, keep_1); + __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0); + __m256i c1; + if (left_shift) + c1 = _mm256_sllv_epi32(a1, b1); + else + c1 = _mm256_srav_epi32(a1, b1); + c1 = _mm256_and_si256(c1, keep_1); + + // Merge partial results into the final result. + __m256i c = _mm256_or_si256(c0, c1); + + return c; +} + +template || std::is_same_v, int> = 0> +Vectorized inline shift_256_8(const Vectorized& a, const Vectorized& b) { + // No vector instruction for shifting int8_t/uint8_t, so emulating + // it instead. + + // Control masks for shuffle operation, treating 256 bits as an + // array of 8-bit elements, and considering quadruples of + // neighboring elements. Specifially, a mask named "ctl_M_N" (M,N + // in [0,1,2,3], and M!=N) is set so that shuffle will move element + // with index M from input quadruple into element with index N in + // output quadruple, and other elements in output quadruple will be + // set to all 0s. + __m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80, + 20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80, + 12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80, + 4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80); + __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25, + 0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17, + 0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9, + 0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1); + __m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80, + 21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80, + 13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80, + 5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80); + __m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26, + 0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18, + 0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10, + 0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2); + __m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80, + 22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80, + 14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80, + 6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80); + __m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, + 0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, + 0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, + 0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3); + __m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, + 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, + 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, + 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80); + __m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80, + 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80, + 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80, + 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80); + + // Masks for bitwise and operation, treating 256 bits as an array of + // 8-bit elements, and considering them in quadruples of neighboring + // elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that + // bitwise and will copy element with index M from input quadruple + // into element with the same index in output quadruple, while the + // other elements in output quadruple will be set to all 0s. + __m256i keep_0 = _mm256_set1_epi32(0xFF); + __m256i keep_3 = _mm256_set1_epi32(0xFF000000); + + // Take each 8-bit element with idx%4==0 from input array to be + // shifted and extend it to 32 bits so that 0s are added to the + // right. Then, perform shifting on this 32-bit number. Upper 8 + // bits will be proper result of shifting original 8-bit number, so + // write them to result array, into the same position from which + // corresponding input element is taken. Also, make sure that + // result array elements with idx%4!=0 are set to all 0s. + // + // Note that number of bits to shift for is extended to 32 bits by + // adding 0s to the left. That means this number is not properly + // sign-extended for negative values. However, number of bits to + // shift is treated as an unsigned integer by respective shift + // intrinsics anyway so if negative then either with or without + // proper sign extension, it will be interpreted as a number greater + // than 32, and the shifting result will be the same. + __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3); + __m256i b0 = _mm256_and_si256(b, keep_0); + __m256i c0; + if (left_shift) + c0 = _mm256_sllv_epi32(a0, b0); + else + if constexpr (std::is_same_v) + c0 = _mm256_srav_epi32(a0, b0); + else + c0 = _mm256_srlv_epi32(a0, b0); + c0 = _mm256_shuffle_epi8(c0, ctl_3_0); + + // Peform shifting the same way for input array elements with + // idx%4==1. + __m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3); + __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0); + __m256i c1; + if (left_shift) + c1 = _mm256_sllv_epi32(a1, b1); + else + if constexpr (std::is_same_v) + c1 = _mm256_srav_epi32(a1, b1); + else + c1 = _mm256_srlv_epi32(a1, b1); + c1 = _mm256_shuffle_epi8(c1, ctl_3_1); + + // Peform shifting the same way for input array elements with + // idx%4==2. + __m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3); + __m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0); + __m256i c2; + if (left_shift) + c2 = _mm256_sllv_epi32(a2, b2); + else + if constexpr (std::is_same_v) + c2 = _mm256_srav_epi32(a2, b2); + else + c2 = _mm256_srlv_epi32(a2, b2); + c2 = _mm256_shuffle_epi8(c2, ctl_3_2); + + // Peform shifting the same way for input array elements with + // idx%4==3. + __m256i a3 = _mm256_and_si256(a, keep_3); + __m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0); + __m256i c3; + if (left_shift) + c3 = _mm256_sllv_epi32(a3, b3); + else + if constexpr (std::is_same_v) + c3 = _mm256_srav_epi32(a3, b3); + else + c3 = _mm256_srlv_epi32(a3, b3); + c3 = _mm256_and_si256(c3, keep_3); + + // Merge partial results into the final result. + __m256i c01 = _mm256_or_si256(c0, c1); + __m256i c23 = _mm256_or_si256(c2, c3); + __m256i c = _mm256_or_si256(c01, c23); + + return c; +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return _mm256_sllv_epi64(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return _mm256_sllv_epi32(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_16(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + // No vector instruction for right arithmetic shifting int64_t, so emulating it + // instead. + + // Clamp the shift values such that shift values < 0 and > 64 are changed to 64 + // which results in -1 for negative input and 0 for non-negative input. + __m256i zero = _mm256_set1_epi64x(0); + __m256i max_shift = _mm256_set1_epi64x(64); + __m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift)); + __m256i shift = _mm256_blendv_epi8(b, max_shift, mask); + // Shift the number logically to the right, thus filling the most + // significant bits with 0s. Then, replace these bits with the sign + // bit. + __m256i sign_bits = _mm256_cmpgt_epi64(zero, a); + __m256i sign_shift = _mm256_sub_epi64(max_shift, shift); + __m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift); + __m256i c = _mm256_srlv_epi64(a, shift); + c = _mm256_or_si256(c, sign_ext); + + return c; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return _mm256_srav_epi32(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_16(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..3460abe17e159d821d51c421e120117126761434 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h @@ -0,0 +1,298 @@ +#pragma once + +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template +struct VecMaskLoad< + T, + dst_n, + mask_t, + mask_n, + typename std::enable_if_t< + (mask_n == dst_n * 2 && dst_n >= 1) && + (std::is_same_v || std::is_same_v), + void>> { + static inline VectorizedN apply( + const T* ptr, + const VecMask& vec_mask) { + VectorizedN tmp_vec; + VectorizedN result; + for (int i = 0; i < dst_n; i++) { + tmp_vec[0] = vec_mask[2 * i]; + tmp_vec[1] = vec_mask[2 * i + 1]; + auto int64_mask = VecMask(tmp_vec).template cast(); + auto int_mask = int64_mask.template cast()[0]; + if constexpr (std::is_same_v) { + result[i] = Vectorized( + _mm256_maskload_ps(ptr + i * Vectorized::size(), int_mask)); + } else { + result[i] = Vectorized( + _mm256_maskload_epi32(ptr + i * Vectorized::size(), int_mask)); + } + } + return result; + } +}; + +template +struct VecMaskLoad< + T, + dst_n, + mask_t, + dst_n, + typename std::enable_if_t< + std::is_same_v || std::is_same_v, + void>> { + static inline VectorizedN apply( + const T* ptr, + const VecMask& vec_mask) { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < dst_n; i++) { + auto tmp_mask = VecMask(vec_mask[i]); + auto int_mask = tmp_mask.template cast()[0]; + if constexpr (std::is_same_v) { + result[i] = Vectorized( + _mm256_maskload_ps(ptr + i * Vectorized::size(), int_mask)); + } else { + result[i] = Vectorized( + _mm256_maskload_epi32(ptr + i * Vectorized::size(), int_mask)); + } + } + return result; + } +}; + +template +struct VecMaskLoad< + T, + 2, + mask_t, + 1, + typename std::enable_if_t< + std::is_same_v || std::is_same_v>> { + static inline VectorizedN apply( + const T* ptr, + const VecMask& vec_mask) { + auto int64_mask = vec_mask.template cast(); + auto result = at::vec::VectorizedN(); + if constexpr (std::is_same_v) { + result[0] = _mm256_maskload_pd(ptr, int64_mask[0]); + result[1] = _mm256_maskload_pd( + ptr + at::vec::Vectorized::size(), int64_mask[1]); + } else { + result[0] = _mm256_maskload_epi64( + reinterpret_cast(ptr), int64_mask[0]); + result[1] = _mm256_maskload_epi64( + reinterpret_cast( + ptr + at::vec::Vectorized::size()), + int64_mask[1]); + } + return result; + } +}; + +// TODO: add specialization of VecMaskLoad for bfloat16/half and int8/uint8 + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result[i] = _mm256_castsi256_ps(vec_mask[i]); + } + return result; + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result[i] = _mm256_castps_si256(vec_mask[i]); + } + return result; + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result[i] = _mm256_castpd_si256(vec_mask[i]); + } + return result; + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result[i] = _mm256_castsi256_pd(vec_mask[i]); + } + return result; + } +}; + +template +struct VecMaskCast< + int64_t, + dst_n, + mask_t, + mask_n, + typename std::enable_if_t< + (dst_n == 2 * mask_n) && + (std::is_same_v || std::is_same_v), + void>> { + static inline VecMask apply( + const VecMask& vec_mask) { + VectorizedN result; + auto int_mask = vec_mask.template cast(); +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < mask_n; ++i) { + auto int64_vec = + convert(VectorizedN(int_mask[i])); + result[2 * i] = int64_vec[0]; + result[2 * i + 1] = int64_vec[1]; + } + return VecMask(result); + } +}; + +template +struct VecMaskCast< + dst_t, + dst_n, + int64_t, + mask_n, + typename std::enable_if_t< + (mask_n == 2 * dst_n) && + (std::is_same_v || std::is_same_v), + void>> { + static inline VecMask apply( + const VecMask& vec_mask) { + VectorizedN result; + VectorizedN int64_vec; + for (int i = 0; i < dst_n; ++i) { + int64_vec[0] = vec_mask[2 * i]; + int64_vec[1] = vec_mask[2 * i + 1]; + result[i] = convert(int64_vec); + } + return VecMask(result).template cast(); + } +}; + +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + auto int64_mask = VecMaskCast::apply(vec_mask); + return VecMaskCast::apply(int64_mask); + } +}; +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + auto int64_mask = VecMaskCast::apply(vec_mask); + return VecMaskCast::apply(int64_mask); + } +}; + +template <> +inline bool VecMask::all_zero() const { + return _mm256_testz_si256(mask_[0], mask_[0]); +} + +template <> +inline bool VecMask::is_masked(int i) const { + return _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0])) & (1 << i); +} + +template <> +inline bool VecMask::all_masked() const { + int mask = _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0])); + return mask == 0xff; +} + +template +struct VecMaskCheck { + static inline bool all_zero(const VectorizedN& vec_mask) { + bool all_zero = true; + for (int i = 0; i < N; ++i) { + all_zero = all_zero && (_mm256_testz_si256(vec_mask[i], vec_mask[i]) > 0); + if (!all_zero) { + return all_zero; + } + } + return all_zero; + } + + static inline bool is_masked(const VectorizedN& vec_mask, int i) { + for (int j = 0; j < N; ++j) { + if (i < (j + 1) * 4) { + return _mm256_movemask_pd(_mm256_castsi256_pd(vec_mask[j])) & + (1 << (i - j * 4)); + } + } + return false; + } + + static inline bool all_masked(const VectorizedN& vec_mask) { + bool all_masked = true; + for (int i = 0; i < N; ++i) { + all_masked = all_masked && + (_mm256_movemask_pd(_mm256_castsi256_pd(vec_mask[i])) == 0x0f); + if (!all_masked) { + return all_masked; + } + } + return all_masked; + } +}; + +#define VEC_MASK_METHOD_WITH_CAST_TO_INT( \ + T, N, return_type, method, args_def, args) \ + template <> \ + inline return_type VecMask::method args_def const { \ + return cast().method args; \ + } + +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_masked, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_masked, (), ()) + +#undef VEC_MASK_DEFINE_METHOD_WITH_CAST_TO_INT + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h new file mode 100644 index 0000000000000000000000000000000000000000..8659fbb20f9665dabfb6dd52b224b5f8d60da8ff --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h @@ -0,0 +1,1369 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// Vectorized -> 4x Vectorized +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +#ifdef _MSC_VER +__declspec(align(64)) struct Vectorizedqi { + protected: + __m256i vals; +#else +struct Vectorizedqi { + protected: + __m256i vals __attribute__((aligned(64))); +#endif + + public: + Vectorizedqi() {} + Vectorizedqi(__m256i v) : vals(v) {} + operator __m256i() const { + return vals; + } +}; + +template +__m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + T min_val, + T max_val); + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i /*first*/, + __m256i /*second*/, + int32_t /*min_val*/, + int32_t /*max_val*/) { + // This function is for linkage only, will not be used + AT_ERROR("pack_saturate_and_clamp is not supported"); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + int8_t min_val, + int8_t max_val) { + __m256i packed_and_sat = _mm256_packs_epi16(first, second); + return _mm256_max_epi8( + _mm256_set1_epi8(min_val), + _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + uint8_t min_val, + uint8_t max_val) { + __m256i packed_and_sat = _mm256_packus_epi16(first, second); + return _mm256_max_epu8( + _mm256_set1_epi8(min_val), + _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_int8_to_float(at::vec::Vectorized src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + // Only handle first 8*8 bits + __m128i input_128 = _mm256_castsi256_si128(src); + // Convert from 8*uint8/int8 to 8*int32 + __m256i input_256_int32; + if constexpr (std::is_same_v) + input_256_int32 = _mm256_cvtepu8_epi32(input_128); + else + input_256_int32 = _mm256_cvtepi8_epi32(input_128); + // Convert from 8*int32 to 8*float + return _mm256_cvtepi32_ps(input_256_int32); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_float_to_int8(at::vec::Vectorized src) { + // Convert from float32 to int32 with truncation + __m256i x_values_int32 = _mm256_cvttps_epi32(src); + + // Convert from int32 to int16 using signed saturation + __m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32); + + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + + // Convert from int16 to uint8/int8 using unsigned saturation + __m256i xyzw_clamped_v = pack_saturate_and_clamp( + xy_packed_v, xy_packed_v, min_val, max_val); + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); +} + +template +__FORCE_INLINE void QuantizeAvx2( + const float* src, + T* dst, + int len, + float inverse_scale, + int64_t zero_point) { + constexpr int VLEN = 8; + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + const __m256i min_v = _mm256_set1_epi32(min_val); + const __m256i max_v = _mm256_set1_epi32(max_val); + // This is the largest int32 value < int32_max exactly representable in float + constexpr int32_t int32_float_max_val = + std::numeric_limits::max() - 127; + int i = 0; + __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale); + // clang-format off + static const __m256i shuffle_mask_v = _mm256_set_epi8( + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00); + // clang-format on + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256i permute_mask_l8_v = + _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00); + int len_aligned = len / (VLEN * 4) * (VLEN * 4); + for (; i < len_aligned; i += 4 * VLEN) { + // x + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + // If the floating point value is greater than int32_max, + // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to + // Clip at int32_float_max_val to avoid this. + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // y + __m256 y_vals = _mm256_load_ps(src + i + VLEN); + __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v); + y_transformed_v = + _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // z + __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN); + __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v); + z_transformed_v = + _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // w + __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN); + __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v); + w_transformed_v = + _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val)); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v); + + // add zero point + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point)); + z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point)); + w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point)); + + __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v); + __m256i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + xyzw_clamped_v = + _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v); + } + + // Additional 8-lane AVX2 version to take advantage when len is smaller + // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM) + for (; i < len / VLEN * VLEN; i += VLEN) { + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + __m256i x_clipped_v = + _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v)); + + x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v); + x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v); + _mm_storel_epi64( + reinterpret_cast<__m128i*>(dst + i), + _mm256_castsi256_si128(x_clipped_v)); + } + + for (; i < len; ++i) { + float transformed = src[i] * inverse_scale; + + // Not exactly the same behavior as the vectorized code. + // The vectorized code above always rounds to even in halfway cases + // (https://software.intel.com/en-us/node/523819), but std::nearbyint + // does the same only when the current rounding mode is FE_TONEAREST. + // However, in practice, this should not be a problem because most cases + // use the default rounding mode FE_TONEAREST. + // Note that we cannot implement the same behavior as the vectorized code + // using std::round because it does rounding away from zero in halfway + // cases. + transformed = zero_point + std::nearbyint(transformed); + float clipped = + std::min(std::max(transformed, float(min_val)), float(max_val)); + dst[i] = clipped; + } +} + +template<> +struct Vectorized : public Vectorizedqi { + using size_type = int; + static constexpr size_type size() { + return 8; + } + + static constexpr int float_num_vecs() { + return 1; + } + + static constexpr int int_num_vecs() { + return 1; + } + + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint32& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi32(uw); + } + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m256 float_vals = _mm256_cvtepi32_ps(vals); + return {vec::fmadd(scale, Vectorized(float_vals), scale_zp_premul)}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m256 float_vals = _mm256_cvtepi32_ps(vals); + return {(Vectorized(float_vals) - zero_point) * scale}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + Vectorized retval; + auto rhs_data = (__m256)rhs[0]; + at::native::quantize_vec( + scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8); + return retval; + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi32(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi32(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi32( + _mm256_max_epi32(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {_mm256_sub_epi32(vals, b)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + + __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v); + __m256i rounded = _mm256_cvtps_epi32(scaled); + return _mm256_add_epi32(rounded, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +/* + * Convert values from int32 back to int8/uint8 + */ +template +__m256i RequantizeAvx2( + const std::array, 4>& inp, + __m256 multiplier, + __m256i zp) { + static_assert( + std::is_same_v || std::is_same_v, + "Only int8_t/uint8_t are supported"); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier); + __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier); + __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier); + __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v); + + /* Add zero point */ + __m256i x_v = _mm256_add_epi32(x_rounded_v, zp); + __m256i y_v = _mm256_add_epi32(y_rounded_v, zp); + __m256i z_v = _mm256_add_epi32(z_rounded_v, zp); + __m256i w_v = _mm256_add_epi32(w_rounded_v, zp); + + /* Pack to int16_t and saturate */ + __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v); + + __m256i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + /* + * xyzw_clamped_v has results in the following layout so we need to + * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 + */ + xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + return xyzw_clamped_v; +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + + Vectorized() {} + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // This is needed because the compiler emits awful code for the default + // constructor for moving the enum + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepi8_epi32(__m128i epi8_vals) const { + return _mm256_cvtepi8_epi32(epi8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_neg_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_neg_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_neg_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_neg_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_neg_zp_premul); + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + int8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi8( + _mm256_max_epi8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepi8_epi32(int_val0); + __m256i int32_val1 = cvtepi8_epi32(int_val1); + __m256i int32_val2 = cvtepi8_epi32(int_val2); + __m256i int32_val3 = cvtepi8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepi8_epi32(int_b0); + __m256i int32_b1 = cvtepi8_epi32(int_b1); + __m256i int32_b2 = cvtepi8_epi32(int_b2); + __m256i int32_b3 = cvtepi8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::quint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepu8_epi32(__m128i epu8_vals) const { + return _mm256_cvtepu8_epi32(epu8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_zp_premul); + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + uint8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epu8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epu8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epu8( + _mm256_max_epu8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepu8_epi32(int_val0); + __m256i int32_val1 = cvtepu8_epi32(int_val1); + __m256i int32_val2 = cvtepu8_epi32(int_val2); + __m256i int32_val3 = cvtepu8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepu8_epi32(int_b0); + __m256i int32_b1 = cvtepu8_epi32(int_b1); + __m256i int32_b2 = cvtepu8_epi32(int_b2); + __m256i int32_b3 = cvtepu8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#else + +// NOTE: These are low-performance implementations that we fall back on +// if we are not building with AVX2. This may not be an issue, because +// currently for quantization we assume the user has at least AVX512 +// installed, so these can simply act as a reference implementation. +// +// If in the future we relax this requirement (AVX2+), we should probably +// revisit these implementations + +template < + typename T, + typename float_vec_return_type_, + typename int_vec_return_type_, + int size_> +struct VectorizedQuantizedConverter { + static constexpr int size() { + return size_; + } + + static constexpr int float_num_vecs() { + return size() / 8; + } + + static constexpr int int_num_vecs() { + return size() / 8; + } + + using float_vec_return_type = float_vec_return_type_; + using int_vec_return_type = int_vec_return_type_; + + using value_type = typename T::underlying; + std::array vals; + + VectorizedQuantizedConverter(T val) { + for (const auto i : c10::irange(size())) { + vals[i] = val.val_; + } + } + + VectorizedQuantizedConverter(const void* ptr) { + memcpy(vals.data(), ptr, sizeof(value_type) * size()); + } + + void store(void* ptr, int count = size()) const { + memcpy(ptr, vals.data(), count * sizeof(value_type)); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized /*scale_zp_premul*/) const { + float_vec_return_type rv; + for (const auto i : c10::irange(float_num_vecs())) { + float tmp_vals[8]; + for (const auto j : c10::irange(8)) { + tmp_vals[j] = at::native::dequantize_val( + scale[j], zero_point[j], T(vals[8 * i + j])); + } + rv[i] = Vectorized(tmp_vals[0], + tmp_vals[1], + tmp_vals[2], + tmp_vals[3], + tmp_vals[4], + tmp_vals[5], + tmp_vals[6], + tmp_vals[7]); + } + return rv; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + Vectorized scale_zp_premul; + return dequantize(scale, zero_point, scale_zp_premul); + } + + protected: + VectorizedQuantizedConverter() {} +}; + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>() {} + Vectorized(c10::qint32 val) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint32*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + for (const auto i : c10::irange(size())) { + retval[0].vals[i] = vals[i] - b.vals[i]; + } + return retval; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = + std::nearbyint(static_cast(inp[0].vals[i]) * multiplier) + + zero_point; + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] * b.vals[i]; + } + return retval; +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] + b.vals[i]; + } + return retval; +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::qint8 val) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::quint8 val) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::quint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#endif // if defined(CPU_CAPABILITY_AVX2) + +#if defined(CPU_CAPABILITY_NEON) +template +typename std::enable_if_t, at::vec::Vectorized> +inline convert_int8_to_float(at::vec::Vectorized src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + auto s8x8 = vld1_s8(src.operator const int8_t*()); + auto s16x8 = vmovl_s8(s8x8); + + auto s32x4_hi = vmovl_s16(vget_high_s16(s16x8)); + auto s32x4_lo = vmovl_s16(vget_low_s16(s16x8)); + + return Vectorized(vcvtq_f32_s32(s32x4_lo), vcvtq_f32_s32(s32x4_hi)); +} + +template +typename std::enable_if_t, at::vec::Vectorized> +inline convert_int8_to_float(at::vec::Vectorized src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + auto u8x8 = vld1_u8(src.operator const uint8_t*()); + auto u16x8 = vmovl_u8(u8x8); + auto u32x4_hi = vmovl_u16(vget_high_u16(u16x8)); + auto u32x4_lo = vmovl_u16(vget_low_u16(u16x8)); + + return Vectorized(vcvtq_f32_u32(u32x4_lo), vcvtq_f32_u32(u32x4_hi)); +} + +#endif +}} // namespace at::vec::CPU_CAPABILITY diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..2d8afd9ef29525e1acedd10f35cb3e7c21e646af --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h @@ -0,0 +1,73 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline std::tuple, Vectorized> convert_bfloat16_float( + const Vectorized& a) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr2); + convert(arr2, arr, K); + return std::make_tuple( + Vectorized::loadu(arr), + Vectorized::loadu(arr + Vectorized::size())); +} + +inline Vectorized convert_float_bfloat16( + const Vectorized& a, + const Vectorized& b) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr); + b.store(arr + Vectorized::size()); + convert(arr, arr2, K); + return Vectorized::loadu(arr2); +} + +inline void load_fp32_from_bf16(const c10::BFloat16* data, Vectorized& out) { + __at_align__ float values[Vectorized::size()]; + for (const auto k : c10::irange(Vectorized::size())) { + values[k] = data[k]; + } + out = Vectorized::loadu(values); +} + +inline void load_fp32_from_bf16( + const c10::BFloat16* data, + Vectorized& out1, + Vectorized& out2) { + load_fp32_from_bf16(data, out1); + data += Vectorized::size(); + load_fp32_from_bf16(data, out2); +} + +inline void load_fp32_from_fp16(const c10::Half* data, Vectorized& out) { + __at_align__ float values[Vectorized::size()]; + for (const auto k : c10::irange(Vectorized::size())) { + values[k] = data[k]; + } + out = Vectorized::loadu(values); +} + +inline void load_fp32_from_fp16( + const c10::Half* data, + Vectorized& out1, + Vectorized& out2) { + load_fp32_from_fp16(data, out1); + data += Vectorized::size(); + load_fp32_from_fp16(data, out2); +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..2c74847758d84e866df7e1c3cc802a6e61cea8d1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h @@ -0,0 +1,584 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +using ComplexDbl = c10::complex; + +template <> +class Vectorized { + union { + struct { + vfloat64 _vec0; + vfloat64 _vec1; + }; + struct { + vbool64 _vecb0; + vbool64 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = ComplexDbl; + using vec_internal_type = vfloat64; + using vec_internal_mask_type = vbool64; + using size_type = int; + static constexpr size_type size() { + return 2; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} + + Vectorized(ComplexDbl val) { + double real_value = val.real(); + double imag_value = val.imag(); + _vec0 = vfloat64{real_value, imag_value}; + _vec1 = vfloat64{real_value, imag_value}; + } + Vectorized(ComplexDbl val1, ComplexDbl val2) { + _vec0 = vfloat64{val1.real(), val1.imag()}; + _vec1 = vfloat64{val2.real(), val2.imag()}; + } + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {a._vec0, b._vec1}; + } + + template + static Vectorized C10_ALWAYS_INLINE + el_blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_1st = VsxDblMask1(mask); + const vbool64 mask_2nd = VsxDblMask2(mask); + return { + (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // convert std::complex index mask to V index mask: xy -> xxyy + auto mask_complex = + Vectorized(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0)); + return { + vec_sel(a._vec0, b._vec0, mask_complex._vecb0), + vec_sel(a._vec1, b._vec1, mask_complex._vecb1)}; + } + + static Vectorized C10_ALWAYS_INLINE elwise_blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + template + static Vectorized arange( + ComplexDbl base = 0., + step_t step = static_cast(1)) { + return Vectorized(base, base + step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + } + return b; + } + + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return { + vec_vsx_ld(offset0, reinterpret_cast(tmp_values)), + vec_vsx_ld(offset16, reinterpret_cast(tmp_values))}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, reinterpret_cast(tmp_values)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(tmp_values)); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + const ComplexDbl& operator[](int idx) const = delete; + ComplexDbl& operator[](int idx) = delete; + + Vectorized map(ComplexDbl (*const f)(ComplexDbl)) const { + __at_align__ ComplexDbl tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + Vectorized map(ComplexDbl (*const f)(const ComplexDbl&)) const { + __at_align__ ComplexDbl tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + Vectorized el_swapped() const { + vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2); + vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2); + return {v0, v1}; + } + + Vectorized el_madd( + const Vectorized& multiplier, + const Vectorized& val) const { + return { + vec_madd(_vec0, multiplier._vec0, val._vec0), + vec_madd(_vec1, multiplier._vec1, val._vec1)}; + } + + Vectorized el_mergeo() const { + vfloat64 v0 = vec_splat(_vec0, 1); + vfloat64 v1 = vec_splat(_vec1, 1); + return {v0, v1}; + } + + Vectorized el_mergee() const { + vfloat64 v0 = vec_splat(_vec0, 0); + vfloat64 v1 = vec_splat(_vec1, 0); + return {v0, v1}; + } + + static Vectorized el_mergee( + Vectorized& first, + Vectorized& second) { + return { + vec_mergeh(first._vec0, second._vec0), + vec_mergeh(first._vec1, second._vec1)}; + } + + static Vectorized el_mergeo( + Vectorized& first, + Vectorized& second) { + return { + vec_mergel(first._vec0, second._vec0), + vec_mergel(first._vec1, second._vec1)}; + } + + Vectorized abs_2_() const { + auto a = (*this).elwise_mult(*this); + auto permuted = a.el_swapped(); + a = a + permuted; + return a; + } + + Vectorized abs_() const { + auto vi = el_mergeo(); + auto vr = el_mergee(); + return {Sleef_hypotd2_u05vsx(vr._vec0, vi._vec0), Sleef_hypotd2_u05vsx(vr._vec1, vi._vec1)}; + } + + Vectorized abs() const { + return abs_() & vd_real_mask; + } + + Vectorized angle_() const { + // angle = atan2(b/a) + // auto b_a = _mm256_permute_pd(values, 0x05); // b a + // return Sleef_atan2d4_u10(values, b_a); // 90-angle angle + Vectorized ret; + ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]); + ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]); + return ret; + } + + Vectorized angle() const { + return angle_() & vd_real_mask; + } + + Vectorized real_() const { + return *this & vd_real_mask; + } + Vectorized real() const { + return *this & vd_real_mask; + } + Vectorized imag_() const { + return *this & vd_imag_mask; + } + Vectorized imag() const { + return imag_().el_swapped(); + } + + Vectorized conj_() const { + return *this ^ vd_isign_mask; + } + Vectorized conj() const { + return *this ^ vd_isign_mask; + } + + Vectorized log() const { + // Most trigonomic ops use the log() op to improve complex number + // performance. + return map(std::log); + } + + Vectorized log2() const { + // log2eB_inv + auto ret = log(); + return ret.elwise_mult(vd_log2e_inv); + } + Vectorized log10() const { + auto ret = log(); + return ret.elwise_mult(vd_log10e_inv); + } + + Vectorized log1p() const { + return map(std::log1p); + } + + Vectorized asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + auto conj = conj_(); + auto b_a = conj.el_swapped(); + auto ab = conj.elwise_mult(b_a); + auto im = ab + ab; + auto val_2 = (*this).elwise_mult(*this); + auto val_2_swapped = val_2.el_swapped(); + auto re = horizontal_sub(val_2, val_2_swapped); + re = Vectorized(vd_one) - re; + auto root = el_blend<0x0A>(re, im).sqrt(); + auto ln = (b_a + root).log(); + return ln.el_swapped().conj(); + } + + Vectorized acos() const { + // acos(x) = pi/2 - asin(x) + return Vectorized(vd_pi_2) - asin(); + } + + Vectorized atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + auto ione = Vectorized(vd_imag_one); + auto sum = ione + *this; + auto sub = ione - *this; + auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) + return ln * vd_imag_half; // i/2*ln() + } + Vectorized atanh() const { + return map(std::atanh); + } + + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + Vectorized floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + Vectorized neg() const { + auto z = Vectorized(vd_zero); + return z - *this; + } + Vectorized round() const { + return {vec_rint(_vec0), vec_rint(_vec1)}; + } + + Vectorized trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized elwise_sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + + Vectorized sqrt() const { + return map(std::sqrt); + } + + Vectorized reciprocal() const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() = c/abs_2() + // im = (bc - ad)/abs_2() = d/abs_2() + auto c_d = *this ^ vd_isign_mask; // c -d + auto abs = abs_2_(); + return c_d.elwise_div(abs); + } + + Vectorized rsqrt() const { + return sqrt().reciprocal(); + } + + static Vectorized horizontal_add( + Vectorized& first, + Vectorized& second) { + // Operates on individual floats, see _mm_hadd_ps + // {f0+f1, s0+s1, f2+f3, s2+s3, ...} + // i.e. it sums the re and im of each value and interleaves first and second: + // {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...} + return el_mergee(first, second) + el_mergeo(first, second); + } + + static Vectorized horizontal_sub( + Vectorized& first, + Vectorized& second) { + // we will simulate it differently with 6 instructions total + // lets permute second so that we can add it getting horizontal sums + auto first_perm = first.el_swapped(); // 2perm + auto second_perm = second.el_swapped(); // 2perm + // summ + auto first_ret = first - first_perm; // 2sub + auto second_ret = second - second_perm; // 2 sub + // now lets choose evens + return el_mergee(first_ret, second_ret); // 2 mergee's + } + + Vectorized inline operator*(const Vectorized& b) const { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i +#if 1 + // this is more vsx friendly than simulating horizontal from x86 + auto vi = b.el_mergeo(); + auto vr = b.el_mergee(); + vi = vi ^ vd_rsign_mask; + auto ret = elwise_mult(vr); + auto vx_swapped = el_swapped(); + ret = vx_swapped.el_madd(vi, ret); +#else + auto ac_bd = elwise_mult(b); + auto d_c = b.el_swapped(); + d_c = d_c ^ vd_isign_mask; + auto ad_bc = elwise_mult(d_c); + auto ret = horizontal_sub(ac_bd, ad_bc); +#endif + return ret; + } + + Vectorized inline operator/(const Vectorized& b) const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() + // im = (bc - ad)/abs_2() + auto fabs_cd = Vectorized{ + vec_andc(b._vec0, vd_sign_mask), + vec_andc(b._vec1, vd_sign_mask)}; // |c| |d| + auto fabs_dc = fabs_cd.el_swapped(); // |d| |c| + auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|) + auto a2 = elwise_div(scale); // a/sc b/sc + auto b2 = b.elwise_div(scale); // c/sc d/sc + auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2 + auto dc2 = b2.el_swapped(); // d/sc c/sc + dc2 = dc2 ^ vd_rsign_mask; // -d/sc c/sc + auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2 + auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2 + auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + ret = ret.elwise_div(denom2); + return ret; + } + + Vectorized exp() const { + return map(std::exp); + } + Vectorized exp2() const { + return map(exp2_impl); + } + Vectorized expm1() const { + return map(std::expm1); + } + + Vectorized pow(const Vectorized& exp) const { + __at_align__ ComplexDbl x_tmp[size()]; + __at_align__ ComplexDbl y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + Vectorized operator<(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator<=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator>(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator>=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized eq(const Vectorized& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & vd_one; + } + Vectorized ne(const Vectorized& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & vd_one; + } + + DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne) + + DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add) + DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub) + DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and) + DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or) + DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor) + // elementwise helpers + DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul) + DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div) + DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt) + DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge) + DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt) + DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple) + DEFINE_MEMBER_OP(elwise_max, ComplexDbl, vec_max) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + // auto max = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_lt(abs_b); + auto max = Vectorized::elwise_blendv(a, b, mask); + + return max; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + // auto min = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_gt(abs_b); + auto min = Vectorized::elwise_blendv(a, b, mask); + return min; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..58fdd34b18d862e843473a684de070167e8a662e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h @@ -0,0 +1,660 @@ + +#pragma once +#include +#include +#include +#include +#include + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +using ComplexFlt = c10::complex; + +template <> +class Vectorized { + private: + union { + struct { + vfloat32 _vec0; + vfloat32 _vec1; + }; + struct { + vbool32 _vecb0; + vbool32 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = ComplexFlt; + using vec_internal_type = vfloat32; + using vec_internal_mask_type = vbool32; + using size_type = int; + + static constexpr size_type size() { + return 4; + } + Vectorized() {} + + C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} + + Vectorized(ComplexFlt val) { + float real_value = val.real(); + float imag_value = val.imag(); + _vec0 = vfloat32{real_value, imag_value, real_value, imag_value}; + _vec1 = vfloat32{real_value, imag_value, real_value, imag_value}; + } + + Vectorized(ComplexFlt val1, ComplexFlt val2, ComplexFlt val3, ComplexFlt val4) { + _vec0 = vfloat32{val1.real(), val1.imag(), val2.real(), val2.imag()}; + _vec1 = vfloat32{val3.real(), val3.imag(), val4.real(), val4.imag()}; + } + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {a._vec0, b._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxComplexMask1(mask); + return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxComplexMask1(mask); + return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_2nd = VsxComplexMask2(mask); + // generated masks + return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_2nd = VsxComplexMask2(mask); + // generated masks + return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxComplexMask1(mask); + const vbool32 mask_2nd = VsxComplexMask2(mask); + return { + (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static Vectorized C10_ALWAYS_INLINE + el_blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxMask1(mask); + const vbool32 mask_2nd = VsxMask2(mask); + return { + (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // convert std::complex index mask to V index mask: xy -> xxyy + auto mask_complex = Vectorized( + vec_mergeh(mask._vec0, mask._vec0), vec_mergeh(mask._vec1, mask._vec1)); + return { + vec_sel(a._vec0, b._vec0, reinterpret_cast(mask_complex._vec0)), + vec_sel(a._vec1, b._vec1, reinterpret_cast(mask_complex._vec1)), + }; + } + + static Vectorized elwise_blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + return { + vec_sel(a._vec0, b._vec0, reinterpret_cast(mask._vec0)), + vec_sel(a._vec1, b._vec1, reinterpret_cast(mask._vec1)), + }; + } + + template + static Vectorized arange( + ComplexFlt base = 0., + step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + ComplexFlt(2) * step, + base + ComplexFlt(3) * step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return { + vec_vsx_ld(offset0, reinterpret_cast(tmp_values)), + vec_vsx_ld(offset16, reinterpret_cast(tmp_values))}; + } + + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, reinterpret_cast(tmp_values)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(tmp_values)); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + const ComplexFlt& operator[](int idx) const = delete; + ComplexFlt& operator[](int idx) = delete; + + Vectorized map(ComplexFlt (*const f)(ComplexFlt)) const { + __at_align__ ComplexFlt tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + Vectorized map(ComplexFlt (*const f)(const ComplexFlt&)) const { + __at_align__ ComplexFlt tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + static Vectorized horizontal_add( + Vectorized& first, + Vectorized& second) { + // Operates on individual floats, see _mm_hadd_ps + // {f0+f1, s0+s1, f2+f3, s2+s3, ...} + // i.e. it sums the re and im of each value and interleaves first and second: + // {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...} + return el_mergee(first, second) + el_mergeo(first, second); + } + + static Vectorized horizontal_sub_permD8( + Vectorized& first, + Vectorized& second) { + // we will simulate it differently with 6 instructions total + // lets permute second so that we can add it getting horizontal sums + auto first_perm = first.el_swapped(); // 2perm + auto second_perm = second.el_swapped(); // 2perm + // sum + auto first_ret = first - first_perm; // 2sub + auto second_ret = second - second_perm; // 2 sub + // now lets choose evens + return el_mergee(first_ret, second_ret); // 2 mergee's + } + + Vectorized abs_2_() const { + auto a = (*this).elwise_mult(*this); + auto permuted = a.el_swapped(); + a = a + permuted; + return a.el_mergee(); + } + + Vectorized abs_() const { + auto vi = el_mergeo(); + auto vr = el_mergee(); + return {Sleef_hypotf4_u05vsx(vr._vec0, vi._vec0), Sleef_hypotf4_u05vsx(vr._vec1, vi._vec1)}; + } + + Vectorized abs() const { + return abs_() & real_mask; + } + + Vectorized real_() const { + return *this & real_mask; + } + Vectorized real() const { + return *this & real_mask; + } + Vectorized imag_() const { + return *this & imag_mask; + } + Vectorized imag() const { + // we can use swap_mask or sldwi + auto ret = imag_(); + return { + vec_sldw(ret._vec0, ret._vec0, 3), vec_sldw(ret._vec1, ret._vec1, 3)}; + } + + Vectorized conj_() const { + return *this ^ isign_mask; + } + Vectorized conj() const { + return *this ^ isign_mask; + } + + Vectorized log() const { + // Most trigonomic ops use the log() op to improve complex number + // performance. + return map(std::log); + } + + Vectorized log2() const { + // log2eB_inv + auto ret = log(); + return ret.elwise_mult(log2e_inv); + } + Vectorized log10() const { + auto ret = log(); + return ret.elwise_mult(log10e_inv); + } + + Vectorized log1p() const { + return map(std::log1p); + } + + Vectorized el_swapped() const { + vfloat32 v0 = vec_perm(_vec0, _vec0, swap_mask); + vfloat32 v1 = vec_perm(_vec1, _vec1, swap_mask); + return {v0, v1}; + } + + Vectorized el_mergee() const { + // as mergee phased in , we can use vec_perm with mask + return {vec_mergee(_vecb0, _vecb0), vec_mergee(_vecb1, _vecb1)}; + } + + Vectorized el_mergeo() const { + // as mergeo phased in , we can use vec_perm with mask + return {vec_mergeo(_vecb0, _vecb0), vec_mergeo(_vecb1, _vecb1)}; + } + + Vectorized el_madd( + const Vectorized& multiplier, + const Vectorized& val) const { + return { + vec_madd(_vec0, multiplier._vec0, val._vec0), + vec_madd(_vec1, multiplier._vec1, val._vec1)}; + } + + static Vectorized el_mergee( + Vectorized& first, + Vectorized& second) { + return { + vec_mergee(first._vecb0, second._vecb0), + vec_mergee(first._vecb1, second._vecb1)}; + } + + static Vectorized el_mergeo( + Vectorized& first, + Vectorized& second) { + return { + vec_mergeo(first._vecb0, second._vecb0), + vec_mergeo(first._vecb1, second._vecb1)}; + } + + Vectorized angle_() const { + // angle = atan2(b/a) + // auto b_a = _mm256_permute_ps(values, 0xB1); // b a + // return Sleef_atan2f8_u10(values, b_a); // 90-angle angle + Vectorized ret; + for (int i = 0; i < 4; i += 2) { + ret._vec0[i] = std::atan2(_vec0[i + 1], _vec0[i]); + ret._vec1[i] = std::atan2(_vec1[i + 1], _vec1[i]); + } + return ret; + } + + Vectorized angle() const { + return angle_() & real_mask; + } + + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + Vectorized ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + Vectorized floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + Vectorized neg() const { + auto z = Vectorized(zero); + return z - *this; + } + Vectorized round() const { + return {vec_round(_vec0), vec_round(_vec1)}; + } + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized elwise_sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + + Vectorized sqrt() const { + return map(std::sqrt); + } + + Vectorized reciprocal() const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() = c/abs_2() + // im = (bc - ad)/abs_2() = d/abs_2() + auto c_d = *this ^ isign_mask; // c -d + auto abs = abs_2_(); + return c_d.elwise_div(abs); + } + + Vectorized rsqrt() const { + return sqrt().reciprocal(); + } + + Vectorized pow(const Vectorized& exp) const { + __at_align__ ComplexFlt x_tmp[size()]; + __at_align__ ComplexFlt y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + + Vectorized atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + auto ione = Vectorized(imag_one); + auto sum = ione + *this; + auto sub = ione - *this; + auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) + return ln * imag_half; // i/2*ln() + } + Vectorized atanh() const { + return map(std::atanh); + } + + Vectorized acos() const { + // acos(x) = pi/2 - asin(x) + return Vectorized(pi_2) - asin(); + } + + Vectorized inline operator*(const Vectorized& b) const { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + +#if 1 + // this is more vsx friendly than simulating horizontal from x86 + + auto vi = b.el_mergeo(); + auto vr = b.el_mergee(); + vi = vi ^ rsign_mask; + auto ret = elwise_mult(vr); + auto vx_swapped = el_swapped(); + ret = vx_swapped.el_madd(vi, ret); + return ret; + +#else + + auto ac_bd = elwise_mult(b); + auto d_c = b.el_swapped(); + d_c = d_c ^ isign_mask; + auto ad_bc = elwise_mult(d_c); + auto ret = horizontal_sub_permD8(ac_bd, ad_bc); + return ret; +#endif + } + + Vectorized inline operator/(const Vectorized& b) const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() + // im = (bc - ad)/abs_2() + auto fabs_cd = Vectorized{ + vec_andc(b._vec0, sign_mask), + vec_andc(b._vec1, sign_mask)}; // |c| |d| + auto fabs_dc = fabs_cd.el_swapped(); // |d| |c| + auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|) + auto a2 = elwise_div(scale); // a/sc b/sc + auto b2 = b.elwise_div(scale); // c/sc d/sc + auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2 + auto dc2 = b2.el_swapped(); // d/sc c/sc + dc2 = dc2 ^ rsign_mask; // -d/sc c/sc + auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2 + auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2 + auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + ret = ret.elwise_div(denom2); + return ret; + } + + Vectorized asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + +#if 1 + auto conj = conj_(); + auto b_a = conj.el_swapped(); + auto ab = conj.elwise_mult(b_a); + auto im = ab + ab; + auto val_2 = (*this).elwise_mult(*this); + auto val_2_swapped = val_2.el_swapped(); + auto re = horizontal_sub_permD8(val_2, val_2_swapped); + re = Vectorized(one) - re; + auto root = el_blend<0xAA>(re, im).sqrt(); + auto ln = (b_a + root).log(); + return ln.el_swapped().conj(); +#else + return map(std::asin); +#endif + } + + Vectorized exp() const { + return map(std::exp); + } + Vectorized exp2() const { + return map(exp2_impl); + } + Vectorized expm1() const { + return map(std::expm1); + } + + Vectorized eq(const Vectorized& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & one; + } + Vectorized ne(const Vectorized& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & one; + } + + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + Vectorized operator<(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized operator<=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized operator>(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized operator>=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + DEFINE_MEMBER_OP(operator==, ComplexFlt, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, ComplexFlt, vec_cmpne) + + DEFINE_MEMBER_OP(operator+, ComplexFlt, vec_add) + DEFINE_MEMBER_OP(operator-, ComplexFlt, vec_sub) + DEFINE_MEMBER_OP(operator&, ComplexFlt, vec_and) + DEFINE_MEMBER_OP(operator|, ComplexFlt, vec_or) + DEFINE_MEMBER_OP(operator^, ComplexFlt, vec_xor) + // elementwise helpers + DEFINE_MEMBER_OP(elwise_mult, ComplexFlt, vec_mul) + DEFINE_MEMBER_OP(elwise_div, ComplexFlt, vec_div) + DEFINE_MEMBER_OP(elwise_gt, ComplexFlt, vec_cmpgt) + DEFINE_MEMBER_OP(elwise_ge, ComplexFlt, vec_cmpge) + DEFINE_MEMBER_OP(elwise_lt, ComplexFlt, vec_cmplt) + DEFINE_MEMBER_OP(elwise_le, ComplexFlt, vec_cmple) + DEFINE_MEMBER_OP(elwise_max, ComplexFlt, vec_max) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + // auto max = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_lt(abs_b); + auto max = Vectorized::elwise_blendv(a, b, mask); + + return max; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + // auto min = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_gt(abs_b); + auto min = Vectorized::elwise_blendv(a, b, mask); + return min; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..c472706d3db1e520a2af0c1c44fecd609fb13703 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h @@ -0,0 +1,477 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { +namespace vec { + +inline namespace CPU_CAPABILITY { + + +template <> +class Vectorized { + private: + union { + struct { + vfloat64 _vec0; + vfloat64 _vec1; + }; + struct { + vbool64 _vecb0; + vbool64 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = double; + using vec_internal_type = vfloat64; + using vec_internal_mask_type = vbool64; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(double scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + C10_ALWAYS_INLINE Vectorized( + double scalar1, + double scalar2, + double scalar3, + double scalar4) + : _vec0{vfloat64{scalar1, scalar2}}, _vec1{vfloat64{scalar3, scalar4}} {} + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + int zero_mask() const { + auto cmp = (*this == vd_zero); + return (cmp._vecb0[0] & 1) | (cmp._vecb0[1] & 2) | (cmp._vecb1[0] & 4) | + (cmp._vecb1[1] & 8); + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return { b._vec0, a._vec1 }; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return { a._vec0, b._vec1 }; + } + + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_1st = VsxDblMask1(mask); + return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1 }; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_1st = VsxDblMask1(mask); + return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1 }; + } + + + template + static std::enable_if_t> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_2nd = VsxDblMask2(mask); + // generated masks + return { a._vec0, + (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_2nd = VsxDblMask2(mask); + // generated masks + return { b._vec0, + (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_1st = VsxDblMask1(mask); + const vbool64 mask_2nd = VsxDblMask2(mask); + return { + (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; + } + + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + + static Vectorized C10_ALWAYS_INLINE + set(const Vectorized& a, const Vectorized& b, size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + Vectorized map(double (*const f)(double)) const { + Vectorized ret; + for (const auto i : c10::irange(size()/2)) { + ret._vec0[i] = f(_vec0[i]); + } + for (const auto i : c10::irange(size()/2)) { + ret._vec1[i] = f(_vec1[i]); + } + return ret; + } + + Vectorized mapbi(double (*const f)(double, double), const Vectorized& other) + const { + Vectorized ret; + for (const auto i : c10::irange(size()/2)) { + ret._vec0[i] = f(_vec0[i], other._vec0[i]); + } + for (const auto i : c10::irange(size()/2)) { + ret._vec1[i] = f(_vec1[i], other._vec1[i]); + } + return ret; + } + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE acos() const { + return {Sleef_acosd2_u10(_vec0), Sleef_acosd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE acosh() const { + return {Sleef_acoshd2_u10(_vec0), Sleef_acoshd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE asin() const { + return {Sleef_asind2_u10(_vec0), Sleef_asind2_u10(_vec1)}; + } + Vectorized atan() const { + return {Sleef_atand2_u10(_vec0), Sleef_atand2_u10(_vec1)}; + } + Vectorized atanh() const { + return {Sleef_atanhd2_u10(_vec0), Sleef_atanhd2_u10(_vec1)}; + } + Vectorized atan2(const Vectorized& b) const { + return {Sleef_atan2d2_u10(_vec0, b._vec0), Sleef_atan2d2_u10(_vec1, b._vec1)}; + } + Vectorized copysign(const Vectorized &sign) const { + return {Sleef_copysignd2(_vec0, sign._vec0), Sleef_copysignd2(_vec1, sign._vec1)}; + } + Vectorized erf() const { + return {Sleef_erfd2_u10(_vec0), Sleef_erfd2_u10(_vec1)}; + } + Vectorized erfc() const { + return {Sleef_erfcd2_u15(_vec0), Sleef_erfcd2_u15(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE exp() const { + return {Sleef_expd2_u10(_vec0), Sleef_expd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE exp2() const { + return {Sleef_exp2d2_u10(_vec0), Sleef_exp2d2_u10(_vec1)}; + } + Vectorized expm1() const { + return {Sleef_expm1d2_u10(_vec0), Sleef_expm1d2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE exp_u20() const { + return exp(); + } + + Vectorized lgamma() const __ubsan_ignore_undefined__ { + return {Sleef_lgammad2_u10(_vec0), Sleef_lgammad2_u10(_vec1)}; + } + + Vectorized erfinv() const { + return map(calc_erfinv); + } + + Vectorized angle() const { + auto tmp = blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE log() const { + return {Sleef_logd2_u10(_vec0), Sleef_logd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log10() const { + return {Sleef_log10d2_u10(_vec0), Sleef_log10d2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log1p() const { + return {Sleef_log1pd2_u10(_vec0), Sleef_log1pd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log2() const { + return {Sleef_log2d2_u10(_vec0), Sleef_log2d2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE cos() const { + return {Sleef_cosd2_u10(_vec0), Sleef_cosd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE cosh() const { + return {Sleef_coshd2_u10(_vec0), Sleef_coshd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE round() const { + return {vec_rint(_vec0), vec_rint(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE sin() const { + return {Sleef_sind2_u10(_vec0), Sleef_sind2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE sinh() const { + return {Sleef_sinhd2_u10(_vec0), Sleef_sinhd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE tan() const { + return {Sleef_tand2_u10(_vec0), Sleef_tand2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE tanh() const { + return {Sleef_tanhd2_u10(_vec0), Sleef_tanhd2_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE frac() const { + return *this - trunc(); + } + + Vectorized C10_ALWAYS_INLINE sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE reciprocal() const { + return { + vec_div(vd_one, _vec0), // vec_re(_vec0) is estimated one. + vec_div(vd_one, _vec1)}; + } + Vectorized C10_ALWAYS_INLINE rsqrt() const { + return sqrt().reciprocal(); + } + + Vectorized C10_ALWAYS_INLINE pow(const Vectorized& b) const { + return {Sleef_powd2_u10(_vec0, b._vec0), Sleef_powd2_u10(_vec1, b._vec1)}; + } + Vectorized C10_ALWAYS_INLINE fmod(const Vectorized& b) const { + return {Sleef_fmodd2(_vec0, b._vec0),Sleef_fmodd2(_vec1, b._vec1)}; + } + + Vectorized hypot(const Vectorized& b) const { + return {Sleef_hypotd2_u05(_vec0, b._vec0), Sleef_hypotd2_u05(_vec1, b._vec1)}; + } + + Vectorized nextafter(const Vectorized& b) const { + return {Sleef_nextafterd2(_vec0, b._vec0), Sleef_nextafterd2(_vec1, b._vec1)}; + } + + Vectorized igamma(const Vectorized& x) const { + return mapbi(calc_igamma, x); + } + + Vectorized igammac(const Vectorized& x) const { + return mapbi(calc_igammac, x); + } + + + Vectorized i0() const { + return map(calc_i0); + } + + Vectorized i0e() const { + return map(calc_i0e); + } + + Vectorized digamma() const { + return map(calc_digamma); + } + + Vectorized _nor() const { + return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)}; + } + + Vectorized isnan() const { + auto x = *this; + auto ret = (x == x); + return ret._nor(); + } + bool has_inf_nan() const { + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec0[i]) || _isinf(_vec0[i])) { + return true; + } + } + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec1[i]) || _isinf(_vec1[i])) { + return true; + } + } + return false; + } + + DEFINE_MEMBER_OP(operator==, double, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, double, vec_cmpne) + DEFINE_MEMBER_OP(operator<, double, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, double, vec_cmple) + DEFINE_MEMBER_OP(operator>, double, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, double, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, double, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, double, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, double, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, double, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, double, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, double, vec_cmpge) + DEFINE_MEMBER_OP(operator+, double, vec_add) + DEFINE_MEMBER_OP(operator-, double, vec_sub) + DEFINE_MEMBER_OP(operator*, double, vec_mul) + DEFINE_MEMBER_OP(operator/, double, vec_div) + DEFINE_MEMBER_OP(maximum, double, vec_max_nan2) + DEFINE_MEMBER_OP(minimum, double, vec_min_nan2) + DEFINE_MEMBER_OP(operator&, double, vec_and) + DEFINE_MEMBER_OP(operator|, double, vec_or) + DEFINE_MEMBER_OP(operator^, double, vec_xor) + DEFINE_MEMBER_TERNARY_OP(madd, double, vec_madd) +}; +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_div(a.vec0(), b.vec0()), vec_div(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..b5955ad86f048b30745683785c451868119c0353 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h @@ -0,0 +1,499 @@ +#pragma once + +#include +#include +#include +#include +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] + +inline namespace CPU_CAPABILITY { + +template <> +class Vectorized { + private: + union { + struct { + vfloat32 _vec0; + vfloat32 _vec1; + }; + struct { + vbool32 _vecb0; + vbool32 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = float; + using vec_internal_type = vfloat32; + using vec_internal_mask_type = vbool32; + using size_type = int; + + static constexpr size_type size() { + return 8; + } + Vectorized() {} + + C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(float scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + C10_ALWAYS_INLINE Vectorized( + float scalar1, + float scalar2, + float scalar3, + float scalar4, + float scalar5, + float scalar6, + float scalar7, + float scalar8) + : _vec0{vfloat32{scalar1, scalar2, scalar3, scalar4}}, + _vec1{vfloat32{scalar5, scalar6, scalar7, scalar8}} {} + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {a._vec0, b._vec1}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxMask1(mask); + return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxMask1(mask); + return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_2nd = VsxMask2(mask); + // generated masks + return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_2nd = VsxMask2(mask); + // generated masks + return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + const vbool32 mask_1st = VsxMask1(mask); + const vbool32 mask_2nd = VsxMask2(mask); + return { + (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + // assuming this we can use the same mask directly with vec_sel + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + const float& operator[](int idx) const = delete; + float& operator[](int idx) = delete; + + Vectorized map(float (*const f)(float)) const { + Vectorized ret; + for (int i = 0; i < size() / 2; i++) { + ret._vec0[i] = f(_vec0[i]); + } + for (int i = 0; i < size() / 2; i++) { + ret._vec1[i] = f(_vec1[i]); + } + return ret; + } + + Vectorized mapbi(float (*const f)(float, float), const Vectorized& other) + const { + Vectorized ret; + for (int i = 0; i < size() / 2; i++) { + ret._vec0[i] = f(_vec0[i], other._vec0[i]); + } + for (int i = 0; i < size() / 2; i++) { + ret._vec1[i] = f(_vec1[i], other._vec1[i]); + } + return ret; + } + + Vectorized _nor() const { + return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)}; + } + + Vectorized isnan() const { + auto x = *this; + auto ret = (x == x); + return ret._nor(); + } + + bool has_inf_nan() const { + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec0[i]) || _isinf(_vec0[i])) { + return true; + } + } + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec1[i]) || _isinf(_vec1[i])) { + return true; + } + } + return false; + } + + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit + // and others are translated to 0-bit + //__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ); + auto cmp = (*this == zero); + // return _mm256_movemask_ps(cmp); + // possible simulation //mask= lvsl ( 0 ) vbpermq( vec, mask <<5) + vuint64 result0 = vec_vbpermq((vuint8)cmp._vecb0, mask_zero_bits); + vuint64 result1 = vec_vbpermq((vuint8)cmp._vecb1, mask_zero_bits); + return (result0[1] >> 12 | (result1[1] >> 8)); + } + + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE acos() const { + return {Sleef_acosf4_u10(_vec0), Sleef_acosf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE acosh() const { + return {Sleef_acoshf4_u10(_vec0), Sleef_acoshf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE asin() const { + return {Sleef_asinf4_u10(_vec0), Sleef_asinf4_u10(_vec1)}; + } + Vectorized atan() const { + return {Sleef_atanf4_u10(_vec0), Sleef_atanf4_u10(_vec1)}; + } + Vectorized atanh() const { + return {Sleef_atanhf4_u10(_vec0), Sleef_atanhf4_u10(_vec1)}; + } + Vectorized atan2(const Vectorized& b) const { + return {Sleef_atan2f4_u10(_vec0, b._vec0), Sleef_atan2f4_u10(_vec1, b._vec1)}; + } + Vectorized copysign(const Vectorized &sign) const { + return {Sleef_copysignf4(_vec0, sign._vec0), Sleef_copysignf4(_vec1, sign._vec1)}; + } + Vectorized lgamma() const { + return {Sleef_lgammaf4_u10(_vec0), Sleef_lgammaf4_u10(_vec1)}; + } + Vectorized erf() const { + return {Sleef_erff4_u10(_vec0), Sleef_erff4_u10(_vec1)}; + } + + Vectorized erfc() const { + return {Sleef_erfcf4_u15(_vec0), Sleef_erfcf4_u15(_vec1)}; + } + + Vectorized erfinv() const { + return map(calc_erfinv); + } + + Vectorized angle() const { + auto tmp = blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE exp() const { + return {Sleef_expf4_u10(_vec0), Sleef_expf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE exp2() const { + return {Sleef_exp2f4_u10(_vec0), Sleef_exp2f4_u10(_vec1)}; + } + Vectorized expm1() const { + return {Sleef_expm1f4_u10(_vec0), Sleef_expm1f4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE exp_u20() const { + return exp(); + } + + Vectorized C10_ALWAYS_INLINE log() const { + return {Sleef_logf4_u10(_vec0), Sleef_logf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log10() const { + return {Sleef_log10f4_u10(_vec0), Sleef_log10f4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log1p() const { + return {Sleef_log1pf4_u10(_vec0), Sleef_log1pf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE log2() const { + return {Sleef_log2f4_u10(_vec0), Sleef_log2f4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE cos() const { + return {Sleef_cosf4_u10(_vec0), Sleef_cosf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE cosh() const { + return {Sleef_coshf4_u10(_vec0), Sleef_coshf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE round() const { + return {vec_round(_vec0), vec_round(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE sin() const { + return {Sleef_sinf4_u10(_vec0), Sleef_sinf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE sinh() const { + return {Sleef_sinhf4_u10(_vec0), Sleef_sinhf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE tan() const { + return {Sleef_tanf4_u10(_vec0), Sleef_tanf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE tanh() const { + return {Sleef_tanhf4_u10(_vec0), Sleef_tanhf4_u10(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE frac() const { + return *this - trunc(); + } + + Vectorized C10_ALWAYS_INLINE sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE reciprocal() const { + return Vectorized(one) / (*this); + } + Vectorized C10_ALWAYS_INLINE rsqrt() const { + return sqrt().reciprocal(); + } + + Vectorized C10_ALWAYS_INLINE pow(const Vectorized& exp) const { + return {Sleef_powf4_u10(_vec0, exp._vec0), Sleef_powf4_u10(_vec1, exp._vec1)}; + } + + Vectorized fmod(const Vectorized& b) const { + return {Sleef_fmodf4(_vec0, b._vec0),Sleef_fmodf4(_vec1, b._vec1)}; + } + + Vectorized hypot(const Vectorized& b) const { + return {Sleef_hypotf4_u05(_vec0, b._vec0), Sleef_hypotf4_u05(_vec1, b._vec1)}; + } + + Vectorized nextafter(const Vectorized& b) const { + return {Sleef_nextafterf4(_vec0, b._vec0), Sleef_nextafterf4(_vec1, b._vec1)}; + } + + Vectorized igamma(const Vectorized& x) const { + return mapbi(calc_igamma, x); + } + + Vectorized igammac(const Vectorized& x) const { + return mapbi(calc_igammac, x); + } + + Vectorized i0() const { + return map(calc_i0); + } + + Vectorized i0e() const { + return map(calc_i0e); + } + + Vectorized digamma() const { + return map(calc_digamma); + } + + DEFINE_MEMBER_OP(operator==, float, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, float, vec_cmpne) + DEFINE_MEMBER_OP(operator<, float, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, float, vec_cmple) + DEFINE_MEMBER_OP(operator>, float, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, float, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, float, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, float, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, float, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, float, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, float, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, float, vec_cmpge) + DEFINE_MEMBER_OP(operator+, float, vec_add) + DEFINE_MEMBER_OP(operator-, float, vec_sub) + DEFINE_MEMBER_OP(operator*, float, vec_mul) + DEFINE_MEMBER_OP(operator/, float, vec_div) + DEFINE_MEMBER_OP(maximum, float, vec_max_nan2) + DEFINE_MEMBER_OP(minimum, float, vec_min_nan2) + DEFINE_MEMBER_OP(operator&, float, vec_and) + DEFINE_MEMBER_OP(operator|, float, vec_or) + DEFINE_MEMBER_OP(operator^, float, vec_xor) + DEFINE_MEMBER_TERNARY_OP(madd, float, vec_madd) +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_div(a.vec0(), b.vec0()), vec_div(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..ae146dae4d42a50bbe733ee22b0c88c0a13569eb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h @@ -0,0 +1,402 @@ +#pragma once + +#include +#include +#include +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +template <> +class Vectorized { + private: + union { + struct { + vint16 _vec0; + vint16 _vec1; + }; + struct { + vbool16 _vecb0; + vbool16 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = int16_t; + using vec_internal_type = vint16; + using vec_internal_mask_type = vbool16; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vint16 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool16 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint16 v1, vint16 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool16 v1, vbool16 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(int16_t scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + + C10_ALWAYS_INLINE Vectorized( + int16_t scalar1, + int16_t scalar2, + int16_t scalar3, + int16_t scalar4, + int16_t scalar5, + int16_t scalar6, + int16_t scalar7, + int16_t scalar8, + int16_t scalar9, + int16_t scalar10, + int16_t scalar11, + int16_t scalar12, + int16_t scalar13, + int16_t scalar14, + int16_t scalar15, + int16_t scalar16) + : _vec0{vint16{ + scalar1, + scalar2, + scalar3, + scalar4, + scalar5, + scalar6, + scalar7, + scalar8}}, + _vec1{vint16{ + scalar9, + scalar10, + scalar11, + scalar12, + scalar13, + scalar14, + scalar15, + scalar16}} {} + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t<(mask & 65535) == 65535, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t<(mask > 0 && mask < 255), Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr int16_t g0 = (mask & 1) * 0xffff; + constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff; + constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff; + constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff; + constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff; + constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff; + constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff; + constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff; + const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7}; + + return {(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), a._vec1}; + } + + template + static std::enable_if_t< + (mask > 255 && (mask & 65535) != 65535 && ((mask & 255) == 255)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr int16_t g0_2 = (mask & 1) * 0xffff; + constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; + constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; + constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; + constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; + constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; + constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; + constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; + + const vint16 mask_2nd = + vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; + // generated masks + return {b._vec0, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) == 0)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr int16_t mask2 = (mask & 65535) >> 16; + constexpr int16_t g0_2 = (mask & 1) * 0xffff; + constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; + constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; + constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; + constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; + constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; + constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; + constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; + + const vint16 mask_2nd = + vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; + // generated masks + return {a, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) != 0) && + ((mask & 255) != 255)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr int16_t g0 = (mask & 1) * 0xffff; + constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff; + constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff; + constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff; + constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff; + constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff; + constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff; + constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff; + constexpr int16_t mask2 = (mask & 65535) >> 16; + constexpr int16_t g0_2 = (mask & 1) * 0xffff; + constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; + constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; + constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; + constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; + constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; + constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; + constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; + + const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7}; + const vint16 mask_2nd = + vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; + // generated masks + return { + (vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), + (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + // assuming this we can use the same mask directly with vec_sel + // warning intel style mask will not work properly + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + + template + static Vectorized arange(int16_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step, + base + 8 * step, + base + 9 * step, + base + 10 * step, + base + 11 * step, + base + 12 * step, + base + 13 * step, + base + 14 * step, + base + 15 * step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy(ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + const int16_t& operator[](int idx) const = delete; + int16_t& operator[](int idx) = delete; + + Vectorized angle() const { + return blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + + DEFINE_MEMBER_UNARY_OP(operator~, int16_t, vec_not) + DEFINE_MEMBER_OP(operator==, int16_t, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, int16_t, vec_cmpne) + DEFINE_MEMBER_OP(operator<, int16_t, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, int16_t, vec_cmple) + DEFINE_MEMBER_OP(operator>, int16_t, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, int16_t, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, int16_t, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, int16_t, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, int16_t, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, int16_t, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, int16_t, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, int16_t, vec_cmpge) + DEFINE_MEMBER_OP(operator+, int16_t, vec_add) + DEFINE_MEMBER_OP(operator-, int16_t, vec_sub) + DEFINE_MEMBER_OP(operator*, int16_t, vec_mul) + DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int16_t, /) + DEFINE_MEMBER_OP(maximum, int16_t, vec_max) + DEFINE_MEMBER_OP(minimum, int16_t, vec_min) + DEFINE_MEMBER_OP(operator&, int16_t, vec_and) + DEFINE_MEMBER_OP(operator|, int16_t, vec_or) + DEFINE_MEMBER_OP(operator^, int16_t, vec_xor) +}; + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + vuint16 shift_vec0 = reinterpret_cast(b.vec0()); + vuint16 shift_vec1 = reinterpret_cast(b.vec1()); + return Vectorized{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + vuint16 shift_vec0 = reinterpret_cast(b.vec0()); + vuint16 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{a.vec0()/b.vec0(), a.vec1()/b.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..98401381c6e822ff571949bb602e501240114e18 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h @@ -0,0 +1,333 @@ +#pragma once + +#include +#include +#include +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +template <> +class Vectorized { + private: + union { + struct { + vint32 _vec0; + vint32 _vec1; + }; + struct { + vbool32 _vecb0; + vbool32 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = int32_t; + using vec_internal_type = vint32; + using vec_internal_mask_type = vbool32; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(int32_t scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + C10_ALWAYS_INLINE Vectorized( + int32_t scalar1, + int32_t scalar2, + int32_t scalar3, + int32_t scalar4, + int32_t scalar5, + int32_t scalar6, + int32_t scalar7, + int32_t scalar8) + : _vec0{vint32{scalar1, scalar2, scalar3, scalar4}}, + _vec1{vint32{scalar5, scalar6, scalar7, scalar8}} {} + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t<(mask & 255) == 255, Vectorized> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t<(mask > 0 && mask < 15), Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint32_t g0 = (mask & 1) * 0xffffffff; + constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; + constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; + constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; + const vbool32 mask_1st = (vbool32){g0, g1, g2, g3}; + + return {(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), a._vec1}; + } + + template + static std::enable_if_t< + (mask > 15 && (mask & 255) != 255 && ((mask & 15) == 15)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint32_t mask2 = (mask & 255) >> 4; + constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; + constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; + constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; + constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; + + const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; + // generated masks + return {b._vec0, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 15 && ((mask & 255) != 255) && ((mask & 15) == 0)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint32_t mask2 = (mask & 255) >> 4; + constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; + constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; + constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; + constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; + + const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; + // generated masks + return {a, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 15 && ((mask & 255) != 255) && ((mask & 15) != 0) && + ((mask & 15) != 15)), + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint32_t g0 = (mask & 1) * 0xffffffff; + constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; + constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; + constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; + constexpr uint32_t mask2 = (mask & 255) >> 4; + constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; + constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; + constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; + constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; + + const vbool32 mask_1st = (vbool32){g0, g1, g2, g3}; + const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; + // generated masks + return { + (vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), + (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + // assuming this we can use the same mask directly with vec_sel + // warning intel style mask will not work properly + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + + template + static Vectorized arange(int32_t base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + const int32_t& operator[](int idx) const = delete; + int32_t& operator[](int idx) = delete; + + Vectorized angle() const { + return blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + + DEFINE_MEMBER_UNARY_OP(operator~, int32_t, vec_not) + DEFINE_MEMBER_OP(operator==, int32_t, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, int32_t, vec_cmpne) + DEFINE_MEMBER_OP(operator<, int32_t, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, int32_t, vec_cmple) + DEFINE_MEMBER_OP(operator>, int32_t, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, int32_t, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, int32_t, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, int32_t, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, int32_t, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, int32_t, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, int32_t, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, int32_t, vec_cmpge) + DEFINE_MEMBER_OP(operator+, int32_t, vec_add) + DEFINE_MEMBER_OP(operator-, int32_t, vec_sub) + DEFINE_MEMBER_OP(operator*, int32_t, vec_mul) + DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int32_t, /) + DEFINE_MEMBER_OP(maximum, int32_t, vec_max) + DEFINE_MEMBER_OP(minimum, int32_t, vec_min) + DEFINE_MEMBER_OP(operator&, int32_t, vec_and) + DEFINE_MEMBER_OP(operator|, int32_t, vec_or) + DEFINE_MEMBER_OP(operator^, int32_t, vec_xor) +}; + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + vuint32 shift_vec0 = reinterpret_cast(b.vec0()); + vuint32 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + vuint32 shift_vec0 = reinterpret_cast(b.vec0()); + vuint32 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{a.vec0()/b.vec0(), a.vec1()/b.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..f8217930fa4989586ed134eb42dd2afe9ce7746c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h @@ -0,0 +1,286 @@ +#pragma once + +#include +#include +#include +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +template <> +class Vectorized { + private: + union { + struct { + vint64 _vec0; + vint64 _vec1; + }; + struct { + vbool64 _vecb0; + vbool64 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = int64_t; + using vec_internal_type = vint64; + using vec_internal_mask_type = vbool64; + using size_type = int; + using ElementType = signed long long; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(int64_t scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + C10_ALWAYS_INLINE Vectorized( + int64_t scalar1, + int64_t scalar2, + int64_t scalar3, + int64_t scalar4) + : _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {} + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t<(mask & 15) == 15, Vectorized> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t<(mask > 0 && mask < 3), Vectorized> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + const vbool64 mask_1st = (vbool64){g0, g1}; + return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1}; + } + + template + static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; + constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; + + const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; + return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 3) && (mask & 3) != 0 && (mask & 15) != 15, + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; + constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; + + const vbool64 mask_1st = (vbool64){g0, g1}; + const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; + return { + (vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), + (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + template + static Vectorized arange(int64_t base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + + static Vectorized C10_ALWAYS_INLINE + set(const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + static_assert(sizeof(double) == sizeof(value_type)); + const double* dptr = reinterpret_cast(ptr); + return {// treat it as double load + (vint64)vec_vsx_ld(offset0, dptr), + (vint64)vec_vsx_ld(offset16, dptr)}; + } + + __at_align__ double tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return { + (vint64)vec_vsx_ld(offset0, tmp_values), + (vint64)vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + double* dptr = reinterpret_cast(ptr); + vec_vsx_st((vfloat64)_vec0, offset0, dptr); + vec_vsx_st((vfloat64)_vec1, offset16, dptr); + } else if (count > 0) { + __at_align__ double tmp_values[size()]; + vec_vsx_st((vfloat64)_vec0, offset0, tmp_values); + vec_vsx_st((vfloat64)_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + + Vectorized angle() const { + return blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + + DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not) + DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne) + DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple) + DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge) + DEFINE_MEMBER_OP(operator+, int64_t, vec_add) + DEFINE_MEMBER_OP(operator-, int64_t, vec_sub) + DEFINE_MEMBER_OP(operator*, int64_t, vec_mul) + DEFINE_MEMBER_OP(operator/, int64_t, vec_div) + DEFINE_MEMBER_OP(maximum, int64_t, vec_max) + DEFINE_MEMBER_OP(minimum, int64_t, vec_min) + DEFINE_MEMBER_OP(operator&, int64_t, vec_and) + DEFINE_MEMBER_OP(operator|, int64_t, vec_or) + DEFINE_MEMBER_OP(operator^, int64_t, vec_xor) +}; + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + vuint64 shift_vec0 = reinterpret_cast(b.vec0()); + vuint64 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + vuint64 shift_vec0 = reinterpret_cast(b.vec0()); + vuint64 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_div(a.vec0(), b.vec0()), vec_div(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..f67d42a4cb5170123989eaf4b7759f60e071c6cb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h @@ -0,0 +1,483 @@ +#pragma once + +#include +#include +#include +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +template <> +struct Vectorized { + private: + union { + struct { + vint8 _vec0; + vint8 _vec1; + }; + struct { + vbool8 _vecb0; + vbool8 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + Vectorized() {} + using size_type = int; + static constexpr size_type size() { + return 32; + } + + static constexpr size_t float_num_vecs() { + return 4; + } + static constexpr int int_num_vecs() { + return 4; + } + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + using vec_internal_type = vint8; + using vec_internal_mask_type = vbool8; + // Broadcast constructor + C10_ALWAYS_INLINE Vectorized(const c10::qint8& val) + : _vec0{vec_splats(val.val_)}, _vec1{vec_splats(val.val_)} {} + + C10_ALWAYS_INLINE Vectorized(const Vectorized& other) + : _vec0{other._vec0}, _vec1(other._vec1) {} + + C10_ALWAYS_INLINE Vectorized(vint8 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint8 v1, vint8 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {} + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + static C10_ALWAYS_INLINE Vectorized loadu( + const void* ptr, + int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + public: + float_vec_return_type C10_ALWAYS_INLINE dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + vint16 vecshi0 = vec_unpackh(_vec0); + vint16 vecshi1 = vec_unpackl(_vec0); + + vint16 vecshi2 = vec_unpackh(_vec1); + vint16 vecshi3 = vec_unpackl(_vec1); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 veci1 = vec_unpackl(vecshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 veci3 = vec_unpackl(vecshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 veci5 = vec_unpackl(vecshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 veci7 = vec_unpackl(vecshi3); + + vfloat32 vecf0_0 = vec_float(veci0); + vfloat32 vecf1_0 = vec_float(veci1); + + vfloat32 vecf0_1 = vec_float(veci2); + vfloat32 vecf1_1 = vec_float(veci3); + + vfloat32 vecf0_2 = vec_float(veci4); + vfloat32 vecf1_2 = vec_float(veci5); + + vfloat32 vecf0_3 = vec_float(veci6); + vfloat32 vecf1_3 = vec_float(veci7); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); + vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); + return { + Vectorized{ + vec_madd(scale_vec0, vecf0_0, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_1, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_2, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_3, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}}; + } + + float_vec_return_type C10_ALWAYS_INLINE dequantize( + Vectorized scale, + Vectorized zero_point) const { + vint16 vecshi0 = vec_unpackh(_vec0); + vint16 vecshi1 = vec_unpackl(_vec0); + + vint16 vecshi2 = vec_unpackh(_vec1); + vint16 vecshi3 = vec_unpackl(_vec1); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 veci1 = vec_unpackl(vecshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 veci3 = vec_unpackl(vecshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 veci5 = vec_unpackl(vecshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 veci7 = vec_unpackl(vecshi3); + + vfloat32 vecf0_0 = vec_float(veci0); + vfloat32 vecf1_0 = vec_float(veci1); + + vfloat32 vecf0_1 = vec_float(veci2); + vfloat32 vecf1_1 = vec_float(veci3); + + vfloat32 vecf0_2 = vec_float(veci4); + vfloat32 vecf1_2 = vec_float(veci5); + + vfloat32 vecf0_3 = vec_float(veci6); + vfloat32 vecf1_3 = vec_float(veci7); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 zero_point0 = zero_point.vec0(); + vfloat32 zero_point1 = zero_point.vec1(); + return { + Vectorized{ + (vecf0_0 - zero_point0) * scale_vec0, + (vecf1_0 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_1 - zero_point0) * scale_vec0, + (vecf1_1 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_2 - zero_point0) * scale_vec0, + (vecf1_2 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_3 - zero_point0) * scale_vec0, + (vecf1_3 - zero_point1) * scale_vec1}}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + // constexpr int32_t min_val = std::numeric_limits::min(); + // constexpr int32_t max_val = std::numeric_limits::max(); + + vfloat32 inverse_scale_v = vec_splats(inverse_scale); + vfloat32 vec_zero_point = vec_splats((float)zero_point); + // vint32 vmin = vec_splats(min_val); + // vint32 vmax = vec_splats(max_val); + + Vectorized vf0 = rhs[0]; + Vectorized vf1 = rhs[1]; + Vectorized vf2 = rhs[2]; + Vectorized vf3 = rhs[3]; + vfloat32 vecf0 = vf0.vec0(); + vfloat32 vecf1 = vf0.vec1(); + vfloat32 vecf2 = vf1.vec0(); + vfloat32 vecf3 = vf1.vec1(); + + vfloat32 vecf4 = vf2.vec0(); + vfloat32 vecf5 = vf2.vec1(); + vfloat32 vecf6 = vf3.vec0(); + vfloat32 vecf7 = vf3.vec1(); + + vecf0 = vec_mul(vecf0, inverse_scale_v); + vecf1 = vec_mul(vecf1, inverse_scale_v); + vecf2 = vec_mul(vecf2, inverse_scale_v); + vecf3 = vec_mul(vecf3, inverse_scale_v); + + vecf4 = vec_mul(vecf4, inverse_scale_v); + vecf5 = vec_mul(vecf5, inverse_scale_v); + vecf6 = vec_mul(vecf6, inverse_scale_v); + vecf7 = vec_mul(vecf7, inverse_scale_v); + + vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); + vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); + vecf2 = vec_add(vec_rint(vecf2), vec_zero_point); + vecf3 = vec_add(vec_rint(vecf3), vec_zero_point); + + vecf4 = vec_add(vec_rint(vecf4), vec_zero_point); + vecf5 = vec_add(vec_rint(vecf5), vec_zero_point); + vecf6 = vec_add(vec_rint(vecf6), vec_zero_point); + vecf7 = vec_add(vec_rint(vecf7), vec_zero_point); + + vint32 veci0 = vec_signed(vecf0); + vint32 veci1 = vec_signed(vecf1); + vint32 veci2 = vec_signed(vecf2); + vint32 veci3 = vec_signed(vecf3); + + vint32 veci4 = vec_signed(vecf4); + vint32 veci5 = vec_signed(vecf5); + vint32 veci6 = vec_signed(vecf6); + vint32 veci7 = vec_signed(vecf7); + + // veci0 = vec_min(vmax, vec_max( vmin, vecf0)) ; + // veci1 = vec_min(vmax, vec_max( vmin, vecf1)) ; + // veci2 = vec_min(vmax, vec_max( vmin, vecf2)) ; + // veci3 = vec_min(vmax, vec_max( vmin, vecf3)) ; + + // veci4 = vec_min(vmax, vec_max( vmin, vecf4)) ; + // veci5 = vec_min(vmax, vec_max( vmin, vecf5)) ; + // veci6 = vec_min(vmax, vec_max( vmin, vecf6)) ; + // veci7 = vec_min(vmax, vec_max( vmin, vecf7)) ; + // vec_packs CLAMP already + vint16 vecshi0 = vec_packs(veci0, veci1); + vint16 vecshi1 = vec_packs(veci2, veci3); + vint16 vecshi2 = vec_packs(veci4, veci5); + vint16 vecshi3 = vec_packs(veci6, veci7); + + vint8 vec0 = vec_packs(vecshi0, vecshi1); + vint8 vec1 = vec_packs(vecshi2, vecshi3); + + return {vec0, vec1}; + } + + Vectorized C10_ALWAYS_INLINE relu(Vectorized zero_point) const { + return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; + } + + Vectorized C10_ALWAYS_INLINE + relu6(Vectorized zero_point, Vectorized q_six) const { + vint8 max0 = vec_max(_vec0, zero_point._vec0); + vint8 max1 = vec_max(_vec1, zero_point._vec1); + return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + vint16 vecshi0 = vec_unpackh(_vec0); + vint16 vecBshi0 = vec_unpackh(b._vec0); + vint16 vecshi1 = vec_unpackl(_vec0); + vint16 vecBshi1 = vec_unpackl(b._vec0); + + vint16 vecshi2 = vec_unpackh(_vec1); + vint16 vecBshi2 = vec_unpackh(b._vec1); + vint16 vecshi3 = vec_unpackl(_vec1); + vint16 vecBshi3 = vec_unpackl(b._vec1); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 vecBi0 = vec_unpackh(vecBshi0); + vint32 veci1 = vec_unpackl(vecshi0); + vint32 vecBi1 = vec_unpackl(vecBshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 vecBi2 = vec_unpackh(vecBshi1); + vint32 veci3 = vec_unpackl(vecshi1); + vint32 vecBi3 = vec_unpackl(vecBshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 vecBi4 = vec_unpackh(vecBshi2); + vint32 veci5 = vec_unpackl(vecshi2); + vint32 vecBi5 = vec_unpackl(vecBshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 vecBi6 = vec_unpackh(vecBshi3); + vint32 veci7 = vec_unpackl(vecshi3); + vint32 vecBi7 = vec_unpackl(vecBshi3); + + return { + Vectorized(veci0 - vecBi0, veci1 - vecBi1), + Vectorized(veci2 - vecBi2, veci3 - vecBi3), + Vectorized(veci4 - vecBi4, veci5 - vecBi5), + Vectorized(veci6 - vecBi6, veci7 - vecBi7)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + vfloat32 vec_multiplier = vec_splats(multiplier); + vint32 vec_zero_point = vec_splats(zero_point); + + Vectorized vi0 = inp[0]; + Vectorized vi1 = inp[1]; + Vectorized vi2 = inp[2]; + Vectorized vi3 = inp[3]; + + vfloat32 vecf0 = vec_float(vi0.vec0()); + vfloat32 vecf1 = vec_float(vi0.vec1()); + vfloat32 vecf2 = vec_float(vi1.vec0()); + vfloat32 vecf3 = vec_float(vi1.vec1()); + + vfloat32 vecf4 = vec_float(vi2.vec0()); + vfloat32 vecf5 = vec_float(vi2.vec1()); + vfloat32 vecf6 = vec_float(vi3.vec0()); + vfloat32 vecf7 = vec_float(vi3.vec1()); + + vecf0 = vec_mul(vecf0, vec_multiplier); + vecf1 = vec_mul(vecf1, vec_multiplier); + vecf2 = vec_mul(vecf2, vec_multiplier); + vecf3 = vec_mul(vecf3, vec_multiplier); + + vecf4 = vec_mul(vecf4, vec_multiplier); + vecf5 = vec_mul(vecf5, vec_multiplier); + vecf6 = vec_mul(vecf6, vec_multiplier); + vecf7 = vec_mul(vecf7, vec_multiplier); + + vecf0 = vec_rint(vecf0); + vecf1 = vec_rint(vecf1); + vecf2 = vec_rint(vecf2); + vecf3 = vec_rint(vecf3); + + vecf4 = vec_rint(vecf4); + vecf5 = vec_rint(vecf5); + vecf6 = vec_rint(vecf6); + vecf7 = vec_rint(vecf7); + + vint32 veci0 = vec_signed(vecf0); + vint32 veci1 = vec_signed(vecf1); + vint32 veci2 = vec_signed(vecf2); + vint32 veci3 = vec_signed(vecf3); + + vint32 veci4 = vec_signed(vecf4); + vint32 veci5 = vec_signed(vecf5); + vint32 veci6 = vec_signed(vecf6); + vint32 veci7 = vec_signed(vecf7); + + veci0 = vec_add(veci0, vec_zero_point); + veci1 = vec_add(veci1, vec_zero_point); + veci2 = vec_add(veci2, vec_zero_point); + veci3 = vec_add(veci3, vec_zero_point); + + veci4 = vec_add(veci4, vec_zero_point); + veci5 = vec_add(veci5, vec_zero_point); + veci6 = vec_add(veci6, vec_zero_point); + veci7 = vec_add(veci7, vec_zero_point); + + vint16 vecshi0 = vec_packs(veci0, veci1); + vint16 vecshi1 = vec_packs(veci2, veci3); + vint16 vecshi2 = vec_packs(veci4, veci5); + vint16 vecshi3 = vec_packs(veci6, veci7); + + vint8 vec0 = vec_packs(vecshi0, vecshi1); + vint8 vec1 = vec_packs(vecshi2, vecshi3); + + return {vec0, vec1}; + } + + DEFINE_MEMBER_OP(operator==, c10::qint8, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, c10::qint8, vec_cmpne) + DEFINE_MEMBER_OP(operator<, c10::qint8, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, c10::qint8, vec_cmple) + DEFINE_MEMBER_OP(operator>, c10::qint8, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, c10::qint8, vec_cmpge) + DEFINE_MEMBER_OP(operator+, c10::qint8, vec_add) + DEFINE_MEMBER_OP(operator-, c10::qint8, vec_sub) + DEFINE_MEMBER_OP(operator*, c10::qint8, vec_mul) + DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint8, /) + DEFINE_MEMBER_OP(maximum, c10::qint8, vec_max) + DEFINE_MEMBER_OP(minimum, c10::qint8, vec_min) + DEFINE_MEMBER_OP(operator&, c10::qint8, vec_and) + DEFINE_MEMBER_OP(operator|, c10::qint8, vec_or) + DEFINE_MEMBER_OP(operator^, c10::qint8, vec_xor) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{a.vec0()/b.vec0(), a.vec1()/b.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d77d500491b3daa3b297832917d35b573c3c43 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h @@ -0,0 +1,501 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +const vint16 mask_unsigned = vec_splats((short int)0xFF); +template <> +struct Vectorized { + private: + union { + struct { + vuint8 _vec0; + vuint8 _vec1; + }; + struct { + vbool8 _vecb0; + vbool8 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + Vectorized() {} + using size_type = int; + static constexpr size_type size() { + return 32; + } + + static constexpr size_t float_num_vecs() { + return 4; + } + static constexpr int int_num_vecs() { + return 4; + } + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + using vec_internal_type = vuint8; + using vec_internal_mask_type = vbool8; + // Broadcast constructor + C10_ALWAYS_INLINE Vectorized(const c10::quint8& val) + : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {} + + C10_ALWAYS_INLINE Vectorized(const Vectorized& other) + : _vec0{other._vec0}, _vec1(other._vec1) {} + + C10_ALWAYS_INLINE Vectorized(vuint8 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vuint8 v1, vuint8 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {} + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + static C10_ALWAYS_INLINE Vectorized loadu( + const void* ptr, + int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + public: + float_vec_return_type C10_ALWAYS_INLINE dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + // unpacking unsigned as signed + vint16 vecshi0 = vec_unpackh((vint8)_vec0); + vint16 vecshi1 = vec_unpackl((vint8)_vec0); + + vint16 vecshi2 = vec_unpackh((vint8)_vec1); + vint16 vecshi3 = vec_unpackl((vint8)_vec1); + + // signed -> unsigned + vecshi0 = vec_and(vecshi0, mask_unsigned); + vecshi1 = vec_and(vecshi1, mask_unsigned); + + vecshi2 = vec_and(vecshi2, mask_unsigned); + vecshi3 = vec_and(vecshi3, mask_unsigned); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 veci1 = vec_unpackl(vecshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 veci3 = vec_unpackl(vecshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 veci5 = vec_unpackl(vecshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 veci7 = vec_unpackl(vecshi3); + + vfloat32 vecf0_0 = vec_float(veci0); + vfloat32 vecf1_0 = vec_float(veci1); + + vfloat32 vecf0_1 = vec_float(veci2); + vfloat32 vecf1_1 = vec_float(veci3); + + vfloat32 vecf0_2 = vec_float(veci4); + vfloat32 vecf1_2 = vec_float(veci5); + + vfloat32 vecf0_3 = vec_float(veci6); + vfloat32 vecf1_3 = vec_float(veci7); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); + vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); + return { + Vectorized{ + vec_madd(scale_vec0, vecf0_0, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_1, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_2, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)}, + Vectorized{ + vec_madd(scale_vec0, vecf0_3, scale_zp_premul0), + vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}}; + } + + float_vec_return_type C10_ALWAYS_INLINE dequantize( + Vectorized scale, + Vectorized zero_point) const { + // unpacking unsigned as signed + vint16 vecshi0 = vec_unpackh((vint8)_vec0); + vint16 vecshi1 = vec_unpackl((vint8)_vec0); + + vint16 vecshi2 = vec_unpackh((vint8)_vec1); + vint16 vecshi3 = vec_unpackl((vint8)_vec1); + + // signed -> unsigned + vecshi0 = vec_and(vecshi0, mask_unsigned); + vecshi1 = vec_and(vecshi1, mask_unsigned); + + vecshi2 = vec_and(vecshi2, mask_unsigned); + vecshi3 = vec_and(vecshi3, mask_unsigned); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 veci1 = vec_unpackl(vecshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 veci3 = vec_unpackl(vecshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 veci5 = vec_unpackl(vecshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 veci7 = vec_unpackl(vecshi3); + + vfloat32 vecf0_0 = vec_float(veci0); + vfloat32 vecf1_0 = vec_float(veci1); + + vfloat32 vecf0_1 = vec_float(veci2); + vfloat32 vecf1_1 = vec_float(veci3); + + vfloat32 vecf0_2 = vec_float(veci4); + vfloat32 vecf1_2 = vec_float(veci5); + + vfloat32 vecf0_3 = vec_float(veci6); + vfloat32 vecf1_3 = vec_float(veci7); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 zero_point0 = zero_point.vec0(); + vfloat32 zero_point1 = zero_point.vec1(); + return { + Vectorized{ + (vecf0_0 - zero_point0) * scale_vec0, + (vecf1_0 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_1 - zero_point0) * scale_vec0, + (vecf1_1 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_2 - zero_point0) * scale_vec0, + (vecf1_2 - zero_point1) * scale_vec1}, + Vectorized{ + (vecf0_3 - zero_point0) * scale_vec0, + (vecf1_3 - zero_point1) * scale_vec1}}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + // constexpr int32_t min_val = std::numeric_limits::min(); + // constexpr int32_t max_val = std::numeric_limits::max(); + + vfloat32 vec_inverse = vec_splats(inverse_scale); + vfloat32 vec_zero_point = vec_splats((float)zero_point); + // vuint32 vmin = vec_splats(min_val); + // vuint32 vmax = vec_splats(max_val); + Vectorized vf0 = rhs[0]; + Vectorized vf1 = rhs[1]; + Vectorized vf2 = rhs[2]; + Vectorized vf3 = rhs[3]; + vfloat32 vecf0 = vf0.vec0(); + vfloat32 vecf1 = vf0.vec1(); + vfloat32 vecf2 = vf1.vec0(); + vfloat32 vecf3 = vf1.vec1(); + + vfloat32 vecf4 = vf2.vec0(); + vfloat32 vecf5 = vf2.vec1(); + vfloat32 vecf6 = vf3.vec0(); + vfloat32 vecf7 = vf3.vec1(); + + vecf0 = vec_mul(vecf0, vec_inverse); + vecf1 = vec_mul(vecf1, vec_inverse); + vecf2 = vec_mul(vecf2, vec_inverse); + vecf3 = vec_mul(vecf3, vec_inverse); + + vecf4 = vec_mul(vecf4, vec_inverse); + vecf5 = vec_mul(vecf5, vec_inverse); + vecf6 = vec_mul(vecf6, vec_inverse); + vecf7 = vec_mul(vecf7, vec_inverse); + + vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); + vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); + vecf2 = vec_add(vec_rint(vecf2), vec_zero_point); + vecf3 = vec_add(vec_rint(vecf3), vec_zero_point); + + vecf4 = vec_add(vec_rint(vecf4), vec_zero_point); + vecf5 = vec_add(vec_rint(vecf5), vec_zero_point); + vecf6 = vec_add(vec_rint(vecf6), vec_zero_point); + vecf7 = vec_add(vec_rint(vecf7), vec_zero_point); + + vint32 veci0 = vec_signed(vecf0); + vint32 veci1 = vec_signed(vecf1); + vint32 veci2 = vec_signed(vecf2); + vint32 veci3 = vec_signed(vecf3); + + vint32 veci4 = vec_signed(vecf4); + vint32 veci5 = vec_signed(vecf5); + vint32 veci6 = vec_signed(vecf6); + vint32 veci7 = vec_signed(vecf7); + + vint16 vecshi0 = vec_packs(veci0, veci1); + vint16 vecshi1 = vec_packs(veci2, veci3); + vint16 vecshi2 = vec_packs(veci4, veci5); + vint16 vecshi3 = vec_packs(veci6, veci7); + + vuint8 vec0 = vec_packsu(vecshi0, vecshi1); + vuint8 vec1 = vec_packsu(vecshi2, vecshi3); + + return {vec0, vec1}; + } + + Vectorized C10_ALWAYS_INLINE relu(Vectorized zero_point) const { + return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; + } + + Vectorized C10_ALWAYS_INLINE + relu6(Vectorized zero_point, Vectorized q_six) const { + vuint8 max0 = vec_max(_vec0, zero_point._vec0); + vuint8 max1 = vec_max(_vec1, zero_point._vec1); + return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + vint16 vecshi0 = vec_unpackh((vint8)_vec0); + vint16 vecBshi0 = vec_unpackh((vint8)b._vec0); + vint16 vecshi1 = vec_unpackl((vint8)_vec0); + vint16 vecBshi1 = vec_unpackl((vint8)b._vec0); + + vint16 vecshi2 = vec_unpackh((vint8)_vec1); + vint16 vecBshi2 = vec_unpackh((vint8)b._vec1); + vint16 vecshi3 = vec_unpackl((vint8)_vec1); + vint16 vecBshi3 = vec_unpackl((vint8)b._vec1); + + vecshi0 = vec_and(vecshi0, mask_unsigned); + vecBshi0 = vec_and(vecBshi0, mask_unsigned); + vecshi1 = vec_and(vecshi1, mask_unsigned); + vecBshi1 = vec_and(vecBshi1, mask_unsigned); + + vecshi2 = vec_and(vecshi2, mask_unsigned); + vecBshi2 = vec_and(vecBshi2, mask_unsigned); + vecshi3 = vec_and(vecshi3, mask_unsigned); + vecBshi3 = vec_and(vecBshi3, mask_unsigned); + + vint32 veci0 = vec_unpackh(vecshi0); + vint32 vecBi0 = vec_unpackh(vecBshi0); + vint32 veci1 = vec_unpackl(vecshi0); + vint32 vecBi1 = vec_unpackl(vecBshi0); + + vint32 veci2 = vec_unpackh(vecshi1); + vint32 vecBi2 = vec_unpackh(vecBshi1); + vint32 veci3 = vec_unpackl(vecshi1); + vint32 vecBi3 = vec_unpackl(vecBshi1); + + vint32 veci4 = vec_unpackh(vecshi2); + vint32 vecBi4 = vec_unpackh(vecBshi2); + vint32 veci5 = vec_unpackl(vecshi2); + vint32 vecBi5 = vec_unpackl(vecBshi2); + + vint32 veci6 = vec_unpackh(vecshi3); + vint32 vecBi6 = vec_unpackh(vecBshi3); + vint32 veci7 = vec_unpackl(vecshi3); + vint32 vecBi7 = vec_unpackl(vecBshi3); + + return { + Vectorized(veci0 - vecBi0, veci1 - vecBi1), + Vectorized(veci2 - vecBi2, veci3 - vecBi3), + Vectorized(veci4 - vecBi4, veci5 - vecBi5), + Vectorized(veci6 - vecBi6, veci7 - vecBi7)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + vfloat32 vec_multiplier = vec_splats(multiplier); + vint32 vec_zero_point = vec_splats(zero_point); + + Vectorized vi0 = inp[0]; + Vectorized vi1 = inp[1]; + Vectorized vi2 = inp[2]; + Vectorized vi3 = inp[3]; + + vfloat32 vecf0 = vec_float(vi0.vec0()); + vfloat32 vecf1 = vec_float(vi0.vec1()); + vfloat32 vecf2 = vec_float(vi1.vec0()); + vfloat32 vecf3 = vec_float(vi1.vec1()); + + vfloat32 vecf4 = vec_float(vi2.vec0()); + vfloat32 vecf5 = vec_float(vi2.vec1()); + vfloat32 vecf6 = vec_float(vi3.vec0()); + vfloat32 vecf7 = vec_float(vi3.vec1()); + + vecf0 = vec_mul(vecf0, vec_multiplier); + vecf1 = vec_mul(vecf1, vec_multiplier); + vecf2 = vec_mul(vecf2, vec_multiplier); + vecf3 = vec_mul(vecf3, vec_multiplier); + + vecf4 = vec_mul(vecf4, vec_multiplier); + vecf5 = vec_mul(vecf5, vec_multiplier); + vecf6 = vec_mul(vecf6, vec_multiplier); + vecf7 = vec_mul(vecf7, vec_multiplier); + + vecf0 = vec_rint(vecf0); + vecf1 = vec_rint(vecf1); + vecf2 = vec_rint(vecf2); + vecf3 = vec_rint(vecf3); + + vecf4 = vec_rint(vecf4); + vecf5 = vec_rint(vecf5); + vecf6 = vec_rint(vecf6); + vecf7 = vec_rint(vecf7); + + vint32 veci0 = vec_signed(vecf0); + vint32 veci1 = vec_signed(vecf1); + vint32 veci2 = vec_signed(vecf2); + vint32 veci3 = vec_signed(vecf3); + + vint32 veci4 = vec_signed(vecf4); + vint32 veci5 = vec_signed(vecf5); + vint32 veci6 = vec_signed(vecf6); + vint32 veci7 = vec_signed(vecf7); + + veci0 = vec_add(veci0, vec_zero_point); + veci1 = vec_add(veci1, vec_zero_point); + veci2 = vec_add(veci2, vec_zero_point); + veci3 = vec_add(veci3, vec_zero_point); + + veci4 = vec_add(veci4, vec_zero_point); + veci5 = vec_add(veci5, vec_zero_point); + veci6 = vec_add(veci6, vec_zero_point); + veci7 = vec_add(veci7, vec_zero_point); + + vint16 vecshi0 = vec_packs(veci0, veci1); + vint16 vecshi1 = vec_packs(veci2, veci3); + vint16 vecshi2 = vec_packs(veci4, veci5); + vint16 vecshi3 = vec_packs(veci6, veci7); + + vuint8 vec0 = vec_packsu(vecshi0, vecshi1); + vuint8 vec1 = vec_packsu(vecshi2, vecshi3); + + return {vec0, vec1}; + } + + DEFINE_MEMBER_OP(operator==, c10::quint8, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, c10::quint8, vec_cmpne) + DEFINE_MEMBER_OP(operator<, c10::quint8, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, c10::quint8, vec_cmple) + DEFINE_MEMBER_OP(operator>, c10::quint8, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, c10::quint8, vec_cmpge) + DEFINE_MEMBER_OP(operator+, c10::quint8, vec_add) + DEFINE_MEMBER_OP(operator-, c10::quint8, vec_sub) + DEFINE_MEMBER_OP(operator*, c10::quint8, vec_mul) + DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::quint8, /) + DEFINE_MEMBER_OP(maximum, c10::quint8, vec_max) + DEFINE_MEMBER_OP(minimum, c10::quint8, vec_min) + DEFINE_MEMBER_OP(operator&, c10::quint8, vec_and) + DEFINE_MEMBER_OP(operator|, c10::quint8, vec_or) + DEFINE_MEMBER_OP(operator^, c10::quint8, vec_xor) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{a.vec0()/b.vec0(), a.vec1()/b.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..1dc742f3cbb1c245f972babfdb26a539c5179263 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h @@ -0,0 +1,474 @@ +#pragma once +#include +#include +#include + +#if defined(__clang__) +typedef __vector __bool char vbool8; +typedef __vector __bool short vbool16; +typedef __vector __bool int vbool32; +typedef __vector __bool long long vbool64; +using vint8 = __attribute__((vector_size(16))) signed char; +using vint16 = __attribute__((vector_size(16))) signed short; +using vint32 = __attribute__((vector_size(16))) signed int; +using vint64 = __attribute__((vector_size(16))) signed long long; +using vuint8 = __attribute__((vector_size(16))) unsigned char; +using vuint16 = __attribute__((vector_size(16))) unsigned short; +using vuint32 = __attribute__((vector_size(16))) unsigned int; +using vuint64 = __attribute__((vector_size(16))) unsigned long long; +using vfloat32 = __attribute__((vector_size(16))) float; +using vfloat64 = __attribute__((vector_size(16))) double; +#else +using vbool8 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) char; +using vbool16 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) short; +using vbool32 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) int; +using vbool64 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) long long; +using vint8 = __attribute__((altivec(vector__))) signed char; +using vint16 = __attribute__((altivec(vector__))) signed short; +using vint32 = __attribute__((altivec(vector__))) signed int; +using vint64 = __attribute__((altivec(vector__))) signed long long; +using vuint8 = __attribute__((altivec(vector__))) unsigned char; +using vuint16 = __attribute__((altivec(vector__))) unsigned short; +using vuint32 = __attribute__((altivec(vector__))) unsigned int; +using vuint64 = __attribute__((altivec(vector__))) unsigned long long; +using vfloat32 = __attribute__((altivec(vector__))) float; +using vfloat64 = __attribute__((altivec(vector__))) double; +#endif + +#if !defined(vec_float) +C10_ALWAYS_INLINE vfloat32 vec_float(const vint32& vec_in) { + vfloat32 vec_out; + __asm__("xvcvsxwsp %x0,%x1" : "=wf"(vec_out) : "wa"(vec_in)); + return vec_out; +} +#endif + +#if !defined(vec_signed) +C10_ALWAYS_INLINE vint32 vec_signed(const vfloat32& vec_in) { + vint32 vec_out; + __asm__("xvcvspsxws %x0,%x1" : "=wa"(vec_out) : "wf"(vec_in)); + return vec_out; +} + +C10_ALWAYS_INLINE vint64 vec_signed(const vfloat64& vec_in) { + vint64 vec_out; + __asm__("xvcvdpsxds %x0,%x1" : "=wa"(vec_out) : "wd"(vec_in)); + return vec_out; +} +#endif + +#if !defined(vec_neg) +C10_ALWAYS_INLINE vfloat32 vec_neg(const vfloat32& vec_in) { + vfloat32 vec_out; + __asm__("xvnegsp %x0,%x1" : "=wf"(vec_out) : "wf"(vec_in)); + return vec_out; +} + +C10_ALWAYS_INLINE vfloat64 vec_neg(const vfloat64& vec_in) { + vfloat64 vec_out; + __asm__("xvnegdp %x0,%x1" : "=wd"(vec_out) : "wd"(vec_in)); + return vec_out; +} + +C10_ALWAYS_INLINE vint16 vec_neg(const vint16& vec_in) { + vint16 vint0 = {0, 0, 0, 0 ,0, 0, 0, 0}; + return vec_vsubuhm(vint0, vec_in); +} + +C10_ALWAYS_INLINE vint32 vec_neg(const vint32& vec_in) { + vint32 vint0 = {0, 0, 0, 0}; + return vec_vsubuwm(vint0, vec_in); +} + +C10_ALWAYS_INLINE vint64 vec_neg(const vint64& vec_in) { + return -vec_in; +} +#endif + +#if !defined(vec_sldw) +template +C10_ALWAYS_INLINE vfloat32 +vec_sldw_aux(const vfloat32& vec_in0, const vfloat32& vec_in1) { + vfloat32 vec_out; + __asm("xxsldwi %x0, %x1, %x2, %3 " + : "=wa"(vec_out) + : "wa"(vec_in0), "wa"(vec_in1), "I"(C)); + return vec_out; +} + +#define vec_sldw(a, b, c) vec_sldw_aux(a, b) +#endif + +#define vec_not(a) vec_nor(a, a) +#if defined(__clang__) && !defined(vec_splats) +C10_ALWAYS_INLINE vint64 vec_splats(const int64_t& a) { + return vec_splats(a); +} +#endif +// Vectorized min/max which return a if any operand is nan +template +C10_ALWAYS_INLINE T vec_min_nan(const T& a, const T& b) { + return vec_min(a, b); +} +template +C10_ALWAYS_INLINE T vec_max_nan(const T& a, const T& b) { + return vec_max(a, b); +} + +// Specializations for float/double taken from Eigen +template<> +C10_ALWAYS_INLINE vfloat32 vec_min_nan(const vfloat32& a, const vfloat32& b) +{ + // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN + vfloat32 ret; + __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} +// Specializations for float/double taken from Eigen +template<> +C10_ALWAYS_INLINE vfloat32 vec_max_nan(const vfloat32& a, const vfloat32& b) +{ + // NOTE: about 10% slower than vec_max, but consistent with std::min and SSE regarding NaN + vfloat32 ret; + __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} + +template<> +C10_ALWAYS_INLINE vfloat64 vec_min_nan(const vfloat64& a, const vfloat64& b) +{ + // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN + vfloat64 ret; + __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} +template<> +C10_ALWAYS_INLINE vfloat64 vec_max_nan(const vfloat64& a, const vfloat64& b) +{ + // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN + vfloat64 ret; + __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} + +// Vectorizes min/max function which returns nan if any side is nan +#define C10_VSX_VEC_NAN_PROPAG(name, type, btype, func) \ + C10_ALWAYS_INLINE type name(const type& a, const type& b) { \ + type tmp = func(a, b); \ + btype nan_a = vec_cmpne(a, a); \ + btype nan_b = vec_cmpne(b, b); \ + tmp = vec_sel(tmp, a, nan_a); \ + return vec_sel(tmp, b, nan_b); \ + } + +C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat32, vbool32, vec_min) +C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat32, vbool32, vec_max) +C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat64, vbool64, vec_min) +C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat64, vbool64, vec_max) + +#undef C10_VSX_VEC_NAN_PROPAG + +#define DEFINE_MEMBER_UNARY_OP(op, op_type, func) \ + Vectorized C10_ALWAYS_INLINE op() const { \ + return Vectorized{func(_vec0), func(_vec1)}; \ + } + +#define DEFINE_MEMBER_OP(op, op_type, func) \ + Vectorized C10_ALWAYS_INLINE op(const Vectorized& other) const { \ + return Vectorized{ \ + func(_vec0, other._vec0), func(_vec1, other._vec1)}; \ + } + +#define DEFINE_MEMBER_BITWISE_OP(op, op_type, func) \ + Vectorized C10_ALWAYS_INLINE op(const Vectorized& other) const { \ + return Vectorized{ \ + func(_vecb0, other._vecb0), func(_vecb1, other._vecb1)}; \ + } + +#define DEFINE_MEMBER_TERNARY_OP(op, op_type, func) \ + Vectorized C10_ALWAYS_INLINE op( \ + const Vectorized& b, const Vectorized& c) const { \ + return Vectorized{ \ + func(_vec0, b._vec0, c._vec0), func(_vec1, b._vec1, c._vec1)}; \ + } + +#define DEFINE_MEMBER_EMULATE_BINARY_OP(op, op_type, binary_op) \ + Vectorized C10_ALWAYS_INLINE op(const Vectorized& b) const { \ + Vectorized::vec_internal_type ret_0; \ + Vectorized::vec_internal_type ret_1; \ + for (int i = 0; i < Vectorized::size() / 2; i++) { \ + ret_0[i] = _vec0[i] binary_op b._vec0[i]; \ + ret_1[i] = _vec1[i] binary_op b._vec1[i]; \ + } \ + return Vectorized{ret_0, ret_1}; \ + } + + +#define DEFINE_MEMBER_OP_AND_ONE(op, op_type, func) \ + Vectorized C10_ALWAYS_INLINE op(const Vectorized& other) const { \ + using vvtype = Vectorized::vec_internal_type; \ + const vvtype v_one = vec_splats(static_cast(1.0)); \ + vvtype ret0 = (vvtype)func(_vec0, other._vec0); \ + vvtype ret1 = (vvtype)func(_vec1, other._vec1); \ + return Vectorized{vec_and(ret0, v_one), vec_and(ret1, v_one)}; \ + } + +#define DEFINE_CLAMP_FUNCS(operand_type) \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp( \ + const Vectorized& a, \ + const Vectorized& min, \ + const Vectorized& max) { \ + return Vectorized{ \ + vec_min_nan(vec_max_nan(a.vec0(), min.vec0()), max.vec0()), \ + vec_min_nan(vec_max_nan(a.vec1(), min.vec1()), max.vec1())}; \ + } \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp_min( \ + const Vectorized& a, const Vectorized& min) { \ + return Vectorized{ \ + vec_max_nan(a.vec0(), min.vec0()), \ + vec_max_nan(a.vec1(), min.vec1())}; \ + } \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp_max( \ + const Vectorized& a, const Vectorized& max) { \ + return Vectorized{ \ + vec_min_nan(a.vec0(), max.vec0()), \ + vec_min_nan(a.vec1(), max.vec1())}; \ + } + +#define DEFINE_REINTERPRET_CAST_FUNCS( \ + first_type, cast_type, cast_inner_vector_type) \ + template <> \ + C10_ALWAYS_INLINE Vectorized cast( \ + const Vectorized& src) { \ + return Vectorized{(cast_inner_vector_type)src.vec0(), \ + (cast_inner_vector_type)src.vec1()}; \ + } + +#define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(first_type) \ + DEFINE_REINTERPRET_CAST_FUNCS(first_type, double, vfloat64) \ + DEFINE_REINTERPRET_CAST_FUNCS(first_type, float, vfloat32) \ + DEFINE_REINTERPRET_CAST_FUNCS(first_type, int64_t, vint64) \ + DEFINE_REINTERPRET_CAST_FUNCS(first_type, int32_t, vint32) \ + DEFINE_REINTERPRET_CAST_FUNCS(first_type, int16_t, vint16) + +// it can be used to emulate blend faster +constexpr int blendChoice(uint32_t mask, uint32_t half1 = 0xF, uint32_t half2 = 0xF0) { + uint32_t none = 0; + uint32_t both = half1 | half2; + // clamp it between 0 and both + mask = mask & both; + // return (a._vec0, a._vec1) + if (mask == none) return 0; + // return (b._vec0,b._vec1) + else if (mask == both) + return 1; + // return (b._vec0,a._vec1) + else if (mask == half1) + return 2; + // return (a._vec0,b._vec1) + else if (mask == half2) + return 3; + // return (*_vec0,a._vec1) + else if (mask > 0 && mask < half1) + return 4; + // return (*_vec0,b._vec1) + else if ((mask & half2) == half2) + return 5; + // return (a._vec0,*_vec1) + else if ((mask & half1) == 0 && mask > half1) + return 6; + // return (b._vec0,*_vec1) + else if ((mask & half1) == half1 && mask > half1) + return 7; + // return (*_vec0,*_vec1) + return 8; +} + +// it can be used to emulate blend faster +constexpr int blendChoiceDbl(uint32_t mask) { + // clamp it 0 and 0xF + return blendChoice(mask, 0x3, 0xC); +} + +constexpr vbool32 VsxMask1(uint32_t mask) { + uint32_t g0 = (mask & 1) * 0xffffffff; + uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; + uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; + uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; + return (vbool32){g0, g1, g2, g3}; +} + +constexpr vbool32 VsxMask2(uint32_t mask) { + uint32_t mask2 = (mask & 0xFF) >> 4; + return VsxMask1(mask2); +} + +constexpr vbool64 VsxDblMask1(uint32_t mask) { + uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + return (vbool64){g0, g1}; +} + +constexpr vbool64 VsxDblMask2(uint32_t mask) { + uint32_t mask2 = (mask & 0xF) >> 2; + return VsxDblMask1(mask2); +} + +constexpr int maskForComplex(uint32_t mask) { + mask = mask & 0xF; + int complex_mask = 0; + if (mask & 1) complex_mask |= 3; + if (mask & 2) complex_mask |= (3 << 2); + if (mask & 4) complex_mask |= (3 << 4); + if (mask & 8) complex_mask |= (3 << 6); + return complex_mask; +} + +constexpr int maskForComplexDbl(uint32_t mask) { + mask = mask & 0x3; + int complex_mask = 0; + if (mask & 1) complex_mask |= 3; + if (mask & 2) complex_mask |= (3 << 2); + return complex_mask; +} + +constexpr int blendChoiceComplex(uint32_t mask) { + return blendChoice(maskForComplex(mask)); +} + +constexpr int blendChoiceComplexDbl(uint32_t mask) { + return blendChoiceDbl(maskForComplexDbl(mask)); +} + +constexpr vbool32 VsxComplexMask1(uint32_t mask) { + return VsxMask1(maskForComplex(mask)); +} + +constexpr vbool32 VsxComplexMask2(uint32_t mask) { + uint32_t mask2 = (mask & 0xF) >> 2; + return VsxMask1(maskForComplex(mask2)); +} + +constexpr vbool64 VsxComplexDblMask1(uint32_t mask) { return VsxDblMask1(mask); } + +constexpr vbool64 VsxComplexDblMask2(uint32_t mask) { + uint32_t mask2 = (mask & 0xF) >> 2; + return VsxDblMask1(mask2); +} + +// constants +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +// +constexpr int offset0 = 0; +constexpr int offset16 = 16; + +// #Constants +const vuint8 mask_zero_bits = vuint8{128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 96, 64, 32, 0}; + +const vuint8 swap_mask = + vuint8{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}; + +const vint32 v0x7f = vec_splats(0x7f); +const vint32 vi_0 = vec_splats((int)(0)); +const vint32 vi_1 = vec_splats((int)1); +const vint32 vi_2 = vec_splats((int)2); +const vint32 vi_4 = vec_splats((int)4); +const vint32 vi_inv1 = vec_splats((int)~1); +const vuint32 vu_29 = vec_splats(29u); +const vuint32 vu_23 = vec_splats(23u); + +const vbool32 inv_mant_mask = (vbool32)vec_splats((unsigned int)~0xff800000); +const vbool32 sign_mask = (vbool32)vec_splats((int)0x80000000); +const vbool32 real_mask = vbool32{0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0x0}; +const vbool32 imag_mask = vbool32{0x0, 0xFFFFFFFF, 0x0, 0xFFFFFFFF}; +const vbool32 isign_mask = vbool32{0x0, 0x80000000, 0x0, 0x80000000}; +const vbool32 rsign_mask = vbool32{0x80000000, 0x0, 0x80000000, 0x0}; + +const vbool64 vd_sign_mask = vbool64{0x8000000000000000, 0x8000000000000000}; +const vbool64 vd_imag_mask = vbool64{0x0, 0xFFFFFFFFFFFFFFFF}; +const vbool64 vd_real_mask = vbool64{0xFFFFFFFFFFFFFFFF, 0x0}; +const vbool64 vd_isign_mask = vbool64{0x0, 0x8000000000000000}; +const vbool64 vd_rsign_mask = vbool64{0x8000000000000000, 0x0}; + +const vfloat32 zero = vec_splats(0.f); +const vfloat32 half = vec_splats(0.5f); +const vfloat32 one = vec_splats(1.f); +const vfloat32 two = vec_splats(2.0f); +const vfloat32 _4div_pi = vec_splats(1.27323954473516f); +const vfloat32 v_inf = (vfloat32)vec_splats(0x7f800000u); +const vfloat32 v_minus_inf = vfloat32{ 0xff800000u, 0xff800000u, 0xff800000u, 0xff800000u }; +const vfloat32 v_nan = (vfloat32)vec_splats(0x7fffffff); +const vfloat32 log10e_inv = vec_splats(0.43429448190325176f); +const vfloat32 log2e_inv = vec_splats(1.4426950408889634f); +const vfloat32 log2eB_inv = vec_splats(1.442695036924675f); +const vfloat32 cephes_SQRTHF = vec_splats(0.707106781186547524f); +const vfloat32 coscof_p0 = vec_splats(2.443315711809948E-005f); +const vfloat32 coscof_p1 = vec_splats(-1.388731625493765E-003f); +const vfloat32 coscof_p2 = vec_splats(4.166664568298827E-002f); +const vfloat32 exp_hi = vec_splats(104.f); +const vfloat32 exp_lo = vec_splats(-104.f); +const vfloat32 exp_p0 = vec_splats(0.000198527617612853646278381f); +const vfloat32 exp_p1 = vec_splats((0.00139304355252534151077271f)); +const vfloat32 exp_p2 = vec_splats(0.00833336077630519866943359f); +const vfloat32 exp_p3 = vec_splats(0.0416664853692054748535156f); +const vfloat32 exp_p4 = vec_splats(0.166666671633720397949219f); +const vfloat32 exp_p5 = vec_splats(0.5f); +const vfloat32 log_p0 = vec_splats(7.0376836292E-2f); +const vfloat32 log_p1 = vec_splats(-1.1514610310E-1f); +const vfloat32 log_p2 = vec_splats(1.1676998740E-1f); +const vfloat32 log_p3 = vec_splats(-1.2420140846E-1f); +const vfloat32 log_p4 = vec_splats(+1.4249322787E-1f); +const vfloat32 log_p5 = vec_splats(-1.6668057665E-1f); +const vfloat32 log_p6 = vec_splats(+2.0000714765E-1f); +const vfloat32 log_p7 = vec_splats(-2.4999993993E-1f); +const vfloat32 log_p8 = vec_splats(+3.3333331174E-1f); +const vfloat32 log_q1 = vec_splats(-2.12194440e-4f); +const vfloat32 log_q2 = vec_splats(0.693359375f); +const vfloat32 max_logf = vec_splats(88.02969187150841f); +const vfloat32 max_numf = vec_splats(1.7014117331926442990585209174225846272e38f); +const vfloat32 min_inf = (vfloat32)vec_splats(0xff800000u); +const vfloat32 min_norm_pos = (vfloat32)vec_splats(0x0800000u); +const vfloat32 minus_cephes_dp1 = vec_splats(-0.78515625f); +const vfloat32 minus_cephes_dp2 = vec_splats(-2.4187564849853515625e-4f); +const vfloat32 minus_cephes_dp3 = vec_splats(-3.77489497744594108e-8f); +const vfloat32 negln2f_hi = vec_splats(-0.693145751953125f); +const vfloat32 negln2f_lo = vec_splats(-1.428606765330187045e-06f); +const vfloat32 p0 = vec_splats(2.03721912945E-4f); +const vfloat32 p1 = vec_splats(8.33028376239E-3f); +const vfloat32 p2 = vec_splats(1.66667160211E-1f); +const vfloat32 sincof_p0 = vec_splats(-1.9515295891E-4f); +const vfloat32 sincof_p1 = vec_splats(8.3321608736E-3f); +const vfloat32 sincof_p2 = vec_splats(-1.6666654611E-1f); +const vfloat32 tanh_0p625 = vec_splats(0.625f); +const vfloat32 tanh_half_max = vec_splats(44.014845935754205f); +const vfloat32 tanh_p0 = vec_splats(-5.70498872745E-3f); +const vfloat32 tanh_p1 = vec_splats(2.06390887954E-2f); +const vfloat32 tanh_p2 = vec_splats(-5.37397155531E-2f); +const vfloat32 tanh_p3 = vec_splats(1.33314422036E-1f); +const vfloat32 tanh_p4 = vec_splats(-3.33332819422E-1f); +const vfloat32 vcheck = vec_splats((float)(1LL << 24)); +const vfloat32 imag_one = vfloat32{0.f, 1.f, 0.f, 1.f}; +const vfloat32 imag_half = vfloat32{0.f, 0.5f, 0.f, 0.5f}; +const vfloat32 sqrt2_2 = vfloat32{0.70710676908493042f, 0.70710676908493042, + 0.70710676908493042, 0.70710676908493042}; +const vfloat32 pi_2 = vfloat32{M_PI / 2, 0.0, M_PI / 2, 0.0}; +const vfloat32 vf_89 = vfloat32{89.f, 89.f, 89.f, 89.f}; +const vfloat64 vd_one = vec_splats(1.0); +const vfloat64 vd_zero = vec_splats(0.0); +const vfloat64 vd_log10e_inv = vec_splats(0.43429448190325176); +const vfloat64 vd_log2e_inv = vec_splats(1.4426950408889634); +const vfloat64 vd_imag_one = vfloat64{0.0, 1.0}; +const vfloat64 vd_imag_half = vfloat64{0.0, 0.5}; +const vfloat64 vd_sqrt2_2 = vfloat64{0.70710678118654757, 0.70710678118654757}; +const vfloat64 vd_pi_2 = vfloat64{M_PI / 2.0, 0.0}; + +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ca57363ee4b4a33873d39ffba4590b45049eb69 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h @@ -0,0 +1,2895 @@ +#include +#include +#include +#include +#include +#if defined(__clang__) +#include +#elif defined(__GNUC__) || defined(__GNUG__) +#include +#include +#endif +#include +#include +#include + +namespace at { +namespace vec { + +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +template +constexpr bool is_zarch_implemented() { + return ( + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value); +} + +template +constexpr bool is_zarch_implemented_quant() { + return ( + std::is_same::value || + std::is_same::value || + std::is_same::value); +} + +template +constexpr bool is_zarch_implemented_complex() { + return std::is_same>::value || + std::is_same>::value; +} + +constexpr int offset0 = 0; +constexpr int offset16 = 16; + +template +struct VecBinaryType { + using type __attribute__((vector_size(16))) = uintmax_t; +}; + +template <> +struct VecBinaryType<8> { + using type = __attribute__((vector_size(16))) unsigned long long; +}; + +template <> +struct VecBinaryType<4> { + using type = __attribute__((vector_size(16))) unsigned int; +}; + +template <> +struct VecBinaryType<2> { + using type = __attribute__((vector_size(16))) unsigned short; +}; + +template <> +struct VecBinaryType<1> { + using type = __attribute__((vector_size(16))) unsigned char; +}; + +template +struct VecInnerType { + using Type __attribute__((vector_size(16))) = T; + using BinaryType = typename VecBinaryType::type; + using ElementType = T; + static constexpr int size = 16 / sizeof(T); +}; + +// define for int64_t properly for load +template <> +struct VecInnerType { + using Type = __attribute__((vector_size(16))) signed long long; + using ElementType = signed long long; + using BinaryType = typename VecBinaryType::type; + static constexpr int size = 16 / sizeof(signed long long); +}; + +template +using ZSimdVect = typename VecInnerType::Type; +template +using ZSimdVectBinary = typename VecInnerType::BinaryType; +template +using ZSimdVectElement = typename VecInnerType::ElementType; + +constexpr int blendChoiceInner( + const uint64_t mask, + const uint64_t half1 = 0xF, + const uint64_t half2 = 0xF0) { + uint64_t none = 0; + uint64_t both = half1 | half2; + // clamp it between 0 and both + auto res_mask = mask & both; + // return (a._vec0, a._vec1) + if (res_mask == none) + return 0; + // return (b._vec0,b._vec1) + else if (res_mask == both) + return 1; + // return (b._vec0, a._vec1) + else if (res_mask == half1) + return 2; + // return (a._vec0,b._vec1) + else if (res_mask == half2) + return 3; + // return (*_vec0,a._vec1) + else if (res_mask > 0 && res_mask < half1) + return 4; + // return (*_vec0,b._vec1) + else if ((res_mask & half2) == half2) + return 5; + // return (a._vec0,*_vec1) + else if ((res_mask & half1) == 0 && res_mask > half1) + return 6; + // return (b._vec0,*_vec1) + else if ((res_mask & half1) == half1 && res_mask > half1) + return 7; + // return (*_vec0,*_vec1) + return 8; +} + +// it can be used to emulate blend faster +template +constexpr int blendChoice(const uint64_t mask) { + static_assert(Z < 1 || Z > 8, "not implemented"); + return blendChoiceInner(mask); +} + +template <> +constexpr int blendChoice<1>(const uint64_t mask) { + return blendChoiceInner(mask, 0x0000FFFF, 0xFFFF0000); +} + +template <> +constexpr int blendChoice<2>(const uint64_t mask) { + return blendChoiceInner(mask, 0x00FF, 0xFF00); +} + +template <> +constexpr int blendChoice<4>(const uint64_t mask) { + return blendChoiceInner(mask, 0xF, 0xF0); +} + +template <> +constexpr int blendChoice<8>(const uint64_t mask) { + // clamp it 0 and 0xF + return blendChoiceInner(mask, 0x3, 0xC); +} + +template +constexpr auto GetMask1(const uint64_t mask) { + return typename VecBinaryType::type{}; +} + +template +constexpr auto GetMask2(const uint64_t mask) { + return typename VecBinaryType::type{}; +} + +template <> +constexpr auto GetMask1<1>(const uint64_t mask) { + constexpr uint8_t t = (int)0xFF; + uint8_t g0 = (mask & 1) * t; + uint8_t g1 = ((mask & 2) >> 1) * t; + uint8_t g2 = ((mask & 4) >> 2) * t; + uint8_t g3 = ((mask & 8) >> 3) * t; + uint8_t g4 = ((mask & 16) >> 4) * t; + uint8_t g5 = ((mask & 32) >> 5) * t; + uint8_t g6 = ((mask & 64) >> 6) * t; + uint8_t g7 = ((mask & 128) >> 7) * t; + uint8_t g8 = ((mask & 256) >> 8) * t; + uint8_t g9 = ((mask & 512) >> 9) * t; + uint8_t g10 = ((mask & 1024) >> 10) * t; + uint8_t g11 = ((mask & 2048) >> 11) * t; + uint8_t g12 = ((mask & 4096) >> 12) * t; + uint8_t g13 = ((mask & 8192) >> 13) * t; + uint8_t g14 = ((mask & 16384) >> 14) * t; + uint8_t g15 = ((mask & 32768) >> 15) * t; + return (typename VecBinaryType<1>::type){ + g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15}; +} + +template <> +constexpr auto GetMask2<1>(const uint64_t mask) { + uint64_t mask2 = (mask & 0xFFFFFFFF) >> 16; + return GetMask1<1>(mask2); +} + +template <> +constexpr auto GetMask1<2>(const uint64_t mask) { + constexpr uint16_t t = (int)0xFFFF; + uint16_t g0 = (mask & 1) * t; + uint16_t g1 = ((mask & 2) >> 1) * t; + uint16_t g2 = ((mask & 4) >> 2) * t; + uint16_t g3 = ((mask & 8) >> 3) * t; + uint16_t g4 = ((mask & 16) >> 4) * t; + uint16_t g5 = ((mask & 32) >> 5) * t; + uint16_t g6 = ((mask & 64) >> 6) * t; + uint16_t g7 = ((mask & 128) >> 7) * t; + return (typename VecBinaryType<2>::type){g0, g1, g2, g3, g4, g5, g6, g7}; +} + +template <> +constexpr auto GetMask2<2>(const uint64_t mask) { + uint64_t mask2 = (mask & 0xFFFF) >> 8; + return GetMask1<2>(mask2); +} + +template <> +constexpr auto GetMask1<4>(const uint64_t mask) { + uint32_t g0 = (mask & 1) * 0xffffffff; + uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; + uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; + uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; + return (typename VecBinaryType<4>::type){g0, g1, g2, g3}; +} + +template <> +constexpr auto GetMask2<4>(const uint64_t mask) { + uint64_t mask2 = (mask & 0xFF) >> 4; + return GetMask1<4>(mask2); +} + +template <> +constexpr auto GetMask1<8>(const uint64_t mask) { + uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + return (typename VecBinaryType<8>::type){g0, g1}; +} + +template <> +constexpr auto GetMask2<8>(const uint64_t mask) { + uint64_t mask2 = (mask & 0xF) >> 2; + return GetMask1<8>(mask2); +} + +template +constexpr int maskForComplex(uint32_t mask) { + return 0; +} + +template <> +constexpr int maskForComplex<8>(uint32_t mask) { + mask = mask & 0xF; + int complex_mask = 0; + if (mask & 1) + complex_mask |= 3; + if (mask & 2) + complex_mask |= (3 << 2); + if (mask & 4) + complex_mask |= (3 << 4); + if (mask & 8) + complex_mask |= (3 << 6); + return complex_mask; +} + +template <> +constexpr int maskForComplex<16>(uint32_t mask) { + mask = mask & 0x3; + int complex_mask = 0; + if (mask & 1) + complex_mask |= 3; + if (mask & 2) + complex_mask |= (3 << 2); + return complex_mask; +} + +template > +constexpr int blend_choice() { + return 0xAA; +} + +template <> +constexpr int blend_choice>() { + return 0x0A; +} + +constexpr int64_t allbitset(int16_t x) { + int64_t onex = 1; + return (onex << x) - onex; +} + +namespace { /* unnamed namespace */ + +ZSimdVect vec_mergee(ZSimdVect x, ZSimdVect y) { + constexpr ZSimdVectBinary mergee_mask{ + 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}; + return vec_perm(x, y, mergee_mask); +} + +ZSimdVect vec_mergee(ZSimdVect x, ZSimdVect y) { + return vec_mergeh(x, y); +} + +ZSimdVect vec_mergeo(ZSimdVect x, ZSimdVect y) { + constexpr ZSimdVectBinary mergeo_mask{ + 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}; + return vec_perm(x, y, mergeo_mask); +} + +ZSimdVect vec_mergeo(ZSimdVect x, ZSimdVect y) { + return vec_mergel(x, y); +} + +} /* unnamed namespace */ + +// +template +constexpr auto GetBpermZeroMask() { + return ZSimdVectBinary{ + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 96, + 64, + 32, + 0}; +} + +template <> +constexpr auto GetBpermZeroMask() { + return ZSimdVectBinary{ + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 128, + 64, + 0}; +} + +constexpr auto GetSwapMaskFloat() { + return ZSimdVectBinary{ + 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}; +} + +template +struct Vectorized()>> { + public: + using value_type = T; + using vtype = ZSimdVect; + using vmaskType = ZSimdVectBinary; + using size_type = int; + // because of gcc inconsistency for int64_t we are obliged to use this, not + // value_type + using ElementType = ZSimdVectElement; + using vinner_data = std::pair; + + private: + vtype _vec0; + vtype _vec1; + + public: + static constexpr size_type size() { + return VECTOR_WIDTH / sizeof(ElementType); + } + Vectorized() {} + + C10_ALWAYS_INLINE Vectorized(vtype v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec0{v.first}, _vec1{v.second} {} + C10_ALWAYS_INLINE Vectorized(vtype v1, vtype v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(T s) + : _vec0{vec_splats((ElementType)s)}, _vec1{vec_splats((ElementType)s)} {} + + template + struct LoaduHelper { + static Vectorized C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + __at_align__ ElementType tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType)); + + return { + vec_xl(offset0, &(tmp_values[0])), + vec_xl(offset16, &(tmp_values[0]))}; + } + }; + + template + struct LoaduHelper { + static Vectorized C10_ALWAYS_INLINE + loadu(const ElementType* ptr, int count = size()) { + if (count == size()) { + return { + vec_xl(offset0, ptr), + vec_xl(offset16, ptr)}; + } + + __at_align__ ElementType tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType)); + + return { + vec_xl(offset0, &(tmp_values[0])), + vec_xl(offset16, &(tmp_values[0]))}; + } + }; + + template + static Vectorized C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + return LoaduHelper::loadu(ptr, count); + } + + template + static Vectorized C10_ALWAYS_INLINE + loadu_one_fourth(const U* ptr) { + // load only first 8 bytes + // only intended to be used with uint8_t + return loadu(ptr, 8 / sizeof(ElementType)); + } + + template + struct StoreHelper { + static void C10_ALWAYS_INLINE store(const Vectorized &vec, U* ptr, int count = size()) { + if (count > 0) { + __at_align__ ElementType tmp_values[size()]; + vec_xst(vec._vec0, offset0, &(tmp_values[0])); + vec_xst(vec._vec1, offset16, &(tmp_values[0])); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(ElementType)); + } + } + }; + + template + struct StoreHelper { + static void C10_ALWAYS_INLINE store(const Vectorized &vec, ElementType* ptr, int count = size()) { + if (count == size()) { + vec_xst(vec._vec0, offset0, ptr); + vec_xst(vec._vec1, offset16, ptr); + } else if (count > 0) { + __at_align__ ElementType tmp_values[size()]; + vec_xst(vec._vec0, offset0, &(tmp_values[0])); + vec_xst(vec._vec1, offset16, &(tmp_values[0])); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(ElementType)); + } + } + }; + + template + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { + return StoreHelper::store(*this, ptr, count); + } + + C10_ALWAYS_INLINE const vtype& vec0() const { + return _vec0; + } + + C10_ALWAYS_INLINE const vtype& vec1() const { + return _vec1; + } + + C10_ALWAYS_INLINE vinner_data data() const { + return std::make_pair<>(_vec0, _vec1); + } + + C10_ALWAYS_INLINE operator vinner_data() const { + return data(); + } + + C10_ALWAYS_INLINE const vmaskType vecb0() const { + return (vmaskType)_vec0; + } + C10_ALWAYS_INLINE const vmaskType vecb1() const { + return (vmaskType)_vec1; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + return { + vec_sel(a._vec0, b._vec0, mask.vecb0()), + vec_sel(a._vec1, b._vec1, mask.vecb1())}; + } + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4) + : _vec0{s1, s2}, _vec1{s3, s4} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4, T s5, T s6, T s7, T s8) + : _vec0{s1, s2, s3, s4}, _vec1{s5, s6, s7, s8} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized( + T s1, + T s2, + T s3, + T s4, + T s5, + T s6, + T s7, + T s8, + T s9, + T s10, + T s11, + T s12, + T s13, + T s14, + T s15, + T s16) + : _vec0{s1, s2, s3, s4, s5, s6, s7, s8}, + _vec1{s9, s10, s11, s12, s13, s14, s15, s16} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized( + T s1, + T s2, + T s3, + T s4, + T s5, + T s6, + T s7, + T s8, + T s9, + T s10, + T s11, + T s12, + T s13, + T s14, + T s15, + T s16, + T s17, + T s18, + T s19, + T s20, + T s21, + T s22, + T s23, + T s24, + T s25, + T s26, + T s27, + T s28, + T s29, + T s30, + T s31, + T s32) + : _vec0{s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16}, + _vec1{ + s17, + s18, + s19, + s20, + s21, + s22, + s23, + s24, + s25, + s26, + s27, + s28, + s29, + s30, + s31, + s32} {} + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step); + } + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step, + base + 8 * step, + base + 9 * step, + base + 10 * step, + base + 11 * step, + base + 12 * step, + base + 13 * step, + base + 14 * step, + base + 15 * step); + } + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + 2 * step, + base + 3 * step, + base + 4 * step, + base + 5 * step, + base + 6 * step, + base + 7 * step, + base + 8 * step, + base + 9 * step, + base + 10 * step, + base + 11 * step, + base + 12 * step, + base + 13 * step, + base + 14 * step, + base + 15 * step, + base + 16 * step, + base + 17 * step, + base + 18 * step, + base + 19 * step, + base + 20 * step, + base + 21 * step, + base + 22 * step, + base + 23 * step, + base + 24 * step, + base + 25 * step, + base + 26 * step, + base + 27 * step, + base + 28 * step, + base + 29 * step, + base + 30 * step, + base + 31 * step); + } + + // blend section + template + static std::enable_if_t(mask) == 0, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t(mask) == 1, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t(mask) == 2, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t(mask) == 3, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + return {a._vec0, b._vec1}; + } + + template + static std::enable_if_t(mask) == 4, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vmaskType mask_1st = GetMask1(mask); + return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1}; + } + + template + static std::enable_if_t(mask) == 5, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vmaskType mask_1st = GetMask1(mask); + return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1}; + } + + template + static std::enable_if_t(mask) == 6, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vmaskType mask_2nd = GetMask2(mask); + // generated masks + return {a._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t(mask) == 7, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vmaskType mask_2nd = GetMask2(mask); + // generated masks + return {b._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static std::enable_if_t(mask) == 8, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + const vmaskType mask_1st = GetMask1(mask); + const vmaskType mask_2nd = GetMask2(mask); + return { + (vtype)vec_sel(a._vec0, b._vec0, mask_1st), + (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + template + static inline std::enable_if_t<(Z >= C), Vectorized> set_inner( + const Vectorized& a, + const Vectorized& b, + size_t count) { + return b; + } + + template + static inline std::enable_if_t<(Z < C), Vectorized> set_inner( + const Vectorized& a, + const Vectorized& b, + size_t count) { + if (count == Z) + return blend(a, b); + else + return set_inner(a, b, count); + } + + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + if (count == 0) + return a; + return set_inner<1, size()>(a, b, count); + } + + const ElementType& operator[](int idx) const = delete; + ElementType& operator[](int idx) = delete; + + Vectorized _not() const { + return {(vtype)vec_nor(vecb0(), vecb0()), (vtype)vec_nor(vecb1(), vecb1())}; + } + + Vectorized C10_ALWAYS_INLINE eq(const Vectorized& other) const { + return (*this == other) & Vectorized((T)1.0); + } + Vectorized C10_ALWAYS_INLINE ne(const Vectorized& other) const { + return (*this != other) & Vectorized((T)1.0); + } + Vectorized C10_ALWAYS_INLINE gt(const Vectorized& other) const { + return (*this > other) & Vectorized((T)1.0); + } + Vectorized C10_ALWAYS_INLINE ge(const Vectorized& other) const { + return (*this >= other) & Vectorized((T)1.0); + } + Vectorized C10_ALWAYS_INLINE lt(const Vectorized& other) const { + return (*this < other) & Vectorized((T)1.0); + } + Vectorized C10_ALWAYS_INLINE le(const Vectorized& other) const { + return (*this <= other) & Vectorized((T)1.0); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized C10_ALWAYS_INLINE abs() const { + return {_vec0, _vec1}; + } + + Vectorized C10_ALWAYS_INLINE neg() const { + return {-_vec0, -_vec1}; + } + + Vectorized isnan() const { + auto x = *this; + auto ret = (x == x); + return ret._not(); + } + + bool has_inf_nan() const { + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec0[i]) || _isinf(_vec0[i])) { + return true; + } + } + for (const auto i : c10::irange(size()/2)) { + if(_isnan(_vec1[i]) || _isinf(_vec1[i])) { + return true; + } + } + return false; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized angle() const { + auto tmp = blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + return blendv(tmp, *this, isnan()); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized angle() const { + return blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + } + + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + int zero_mask() const { + auto cmp = (*this == Vectorized(0)); + constexpr auto mask_zero_bits = GetBpermZeroMask(); + ZSimdVectBinary result0 = + vec_bperm_u128((ZSimdVectBinary)cmp.vecb0(), mask_zero_bits); + ZSimdVectBinary result1 = + vec_bperm_u128((ZSimdVectBinary)cmp.vecb1(), mask_zero_bits); + return (result0[0] | (result1[0] << (size() / 2))); + } + + Vectorized C10_ALWAYS_INLINE floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE round() const { + return {vec_round(_vec0), vec_round(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE rint() const { + return {vec_rint(_vec0), vec_rint(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE frac() const { + return *this - trunc(); + } + + Vectorized C10_ALWAYS_INLINE sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + Vectorized C10_ALWAYS_INLINE reciprocal() const { + return Vectorized((T)1) / (*this); + } + Vectorized C10_ALWAYS_INLINE rsqrt() const { + return sqrt().reciprocal(); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapOrdinary(float (*const f)(float)) const { + float a00 = f(_vec0[0]); + float a01 = f(_vec0[1]); + float a02 = f(_vec0[2]); + float a03 = f(_vec0[3]); + float a10 = f(_vec1[0]); + float a11 = f(_vec1[1]); + float a12 = f(_vec1[2]); + float a13 = f(_vec1[3]); + return Vectorized{a00, a01, a02, a03, a10, a11, a12, a13}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapOrdinary(double (*const f)(double)) const { + return Vectorized(f(_vec0[0]), f(_vec0[1]), f(_vec1[0]), f(_vec1[1])); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapOrdinary( + float (*const f)(float, float), + const Vectorized& b) const { + float a00 = f(_vec0[0], b._vec0[0]); + float a01 = f(_vec0[1], b._vec0[1]); + float a02 = f(_vec0[2], b._vec0[2]); + float a03 = f(_vec0[3], b._vec0[3]); + float a10 = f(_vec1[0], b._vec1[0]); + float a11 = f(_vec1[1], b._vec1[1]); + float a12 = f(_vec1[2], b._vec1[2]); + float a13 = f(_vec1[3], b._vec1[3]); + return Vectorized{a00, a01, a02, a03, a10, a11, a12, a13}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapOrdinary( + double (*const f)(double, double), + const Vectorized& b) const { + return Vectorized( + f(_vec0[0], b._vec0[0]), + f(_vec0[1], b._vec0[1]), + f(_vec1[0], b._vec1[0]), + f(_vec1[1], b._vec1[1])); + } + + template < + typename FloatOp, + typename DoubleOp, + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapSleef(FloatOp f, DoubleOp d) const { + vtype a0 = f(_vec0); + vtype a1 = f(_vec1); + return Vectorized{a0, a1}; + } + + template < + typename FloatOp, + typename DoubleOp, + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapSleef(FloatOp f, DoubleOp d) const { + return Vectorized(d(_vec0), d(_vec1)); + } + + template < + typename FloatOp, + typename DoubleOp, + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapSleef(FloatOp f, DoubleOp d, const Vectorized& b) + const { + vtype a0 = f(_vec0, b._vec0); + vtype a1 = f(_vec1, b._vec1); + return Vectorized{a0, a1}; + } + + template < + typename FloatOp, + typename DoubleOp, + typename U = T, + std::enable_if_t::value, int> = 0> + inline Vectorized mapSleef(FloatOp f, DoubleOp d, const Vectorized& b) + const { + return Vectorized(d(_vec0, b._vec0), d(_vec1, b._vec1)); + } + + Vectorized acos() const { + return mapSleef(Sleef_acosf4_u10, Sleef_acosd2_u10); + } + Vectorized asin() const { + return mapSleef(Sleef_asinf4_u10, Sleef_asind2_u10); + } + Vectorized atan() const { + return mapSleef(Sleef_atanf4_u10, Sleef_atand2_u10); + } + Vectorized atanh() const { + return mapSleef(Sleef_atanhf4_u10, Sleef_atanhd2_u10); + } + + Vectorized erf() const { + return mapSleef(Sleef_erff4_u10, Sleef_erfd2_u10); + } + Vectorized erfc() const { + return mapSleef(Sleef_erfcf4_u15, Sleef_erfcd2_u15); + } + + Vectorized exp() const { + return mapSleef(Sleef_expf4_u10, Sleef_expd2_u10); + } + Vectorized exp2() const { + return mapSleef(Sleef_exp2f4_u10, Sleef_exp2d2_u10); + } + Vectorized expm1() const { + return mapSleef(Sleef_expm1f4_u10, Sleef_expm1d2_u10); + } + Vectorized exp_u20() const { + return exp(); + } + + Vectorized log() const { + return mapSleef(Sleef_logf4_u10, Sleef_logd2_u10); + } + Vectorized log2() const { + return mapSleef(Sleef_log2f4_u10, Sleef_log2d2_u10); + } + Vectorized log10() const { + return mapSleef(Sleef_log10f4_u10, Sleef_log10d2_u10); + } + Vectorized log1p() const { + return mapSleef(Sleef_log1pf4_u10, Sleef_log1pd2_u10); + } + + Vectorized sin() const { + return mapSleef(Sleef_sinf4_u10, Sleef_sind2_u10); + } + Vectorized sinh() const { + return mapSleef(Sleef_sinhf4_u10, Sleef_sinhd2_u10); + } + Vectorized cos() const { + return mapSleef(Sleef_cosf4_u10, Sleef_cosd2_u10); + } + Vectorized cosh() const { + return mapSleef(Sleef_coshf4_u10, Sleef_coshd2_u10); + } + + Vectorized tan() const { + return mapSleef(Sleef_tanf4_u10, Sleef_tand2_u10); + } + Vectorized tanh() const { + return mapSleef(Sleef_tanhf4_u10, Sleef_tanhd2_u10); + } + + Vectorized lgamma() const { + return mapSleef(Sleef_lgammaf4_u10, Sleef_lgammad2_u10); + } + + Vectorized atan2(const Vectorized& b) const { + return mapSleef(Sleef_atan2f4_u10, Sleef_atan2d2_u10, b); + } + Vectorized copysign(const Vectorized& sign) const { + return mapSleef(Sleef_copysignf4, Sleef_copysignd2, sign); + } + Vectorized fmod(const Vectorized& q) const { + return mapSleef(Sleef_fmodf4, Sleef_fmodd2, q); + } + + Vectorized hypot(const Vectorized& b) const { + return mapSleef(Sleef_hypotf4_u05, Sleef_hypotd2_u05, b); + } + + Vectorized pow(const Vectorized& b) const { + return mapSleef(Sleef_powf4_u10, Sleef_powd2_u10, b); + } + + Vectorized nextafter(const Vectorized& b) const { + return mapSleef(Sleef_nextafterf4, Sleef_nextafterd2, b); + } + + Vectorized erfinv() const { + return mapOrdinary(calc_erfinv); + } + + Vectorized digamma() const { + return mapOrdinary(calc_digamma); + } + + Vectorized igamma(const Vectorized& x) const { + return mapOrdinary(calc_igamma, x); + } + + Vectorized igammac(const Vectorized& x) const { + return mapOrdinary(calc_igammac, x); + } + + Vectorized i0() const { + return mapOrdinary(calc_i0); + } + + Vectorized i0e() const { + return mapOrdinary(calc_i0e); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized minimum(const Vectorized& other) const { + return {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)}; + } + + /* Propagates NaN if either input is a NaN. */ + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized minimum(const Vectorized& other) const { + Vectorized tmp = {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)}; + tmp = blendv(tmp, *this, isnan()); + return blendv(tmp, other, other.isnan()); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized maximum(const Vectorized& other) const { + return {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)}; + } + + /* Propagates NaN if either input is a NaN. */ + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized maximum(const Vectorized& other) const { + Vectorized tmp = {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)}; + tmp = blendv(tmp, *this, isnan()); + return blendv(tmp, other, other.isnan()); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized clamp_min(const Vectorized& min) const { + return {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)}; + } + + /* Keeps NaN if actual value is NaN */ + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized clamp_min(const Vectorized& min) const { + Vectorized tmp = {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)}; + return blendv(tmp, *this, isnan()); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized clamp_max(const Vectorized& max) const { + return {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)}; + } + + /* Keeps NaN if actual value is NaN */ + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized clamp_max(const Vectorized& max) const { + Vectorized tmp = {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)}; + return blendv(tmp, *this, isnan()); + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized swapped() const { + auto swap_mask = GetSwapMaskFloat(); + vtype v0 = vec_perm(_vec0, _vec0, swap_mask); + vtype v1 = vec_perm(_vec1, _vec1, swap_mask); + return {v0, v1}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized swapped() const { + vtype v0 = vec_permi(_vec0, _vec0, 2); + vtype v1 = vec_permi(_vec1, _vec1, 2); + return {v0, v1}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + static Vectorized mergee(Vectorized& first, Vectorized& second) { + return { + vec_mergee(first._vec0, second._vec0), + vec_mergee(first._vec1, second._vec1)}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + static Vectorized mergeo(Vectorized& first, Vectorized& second) { + return { + vec_mergeo(first._vec0, second._vec0), + vec_mergeo(first._vec1, second._vec1)}; + } + + static Vectorized horizontal_add_perm( + Vectorized& first, + Vectorized& second) { + // we will simulate it differently with 6 instructions total + // lets permute second so that we can add it getting horizontal sums + auto first_perm = first.swapped(); // 2perm + auto second_perm = second.swapped(); // 2perm + // summ + auto first_ret = first + first_perm; // 2add + auto second_ret = second + second_perm; // 2 add + // now lets choose evens + return mergee(first_ret, second_ret); // 2 mergee's + } + + static Vectorized horizontal_sub_perm( + Vectorized& first, + Vectorized& second) { + // we will simulate it differently with 6 instructions total + // lets permute second so that we can add it getting horizontal sums + auto first_perm = first.swapped(); // 2perm + auto second_perm = second.swapped(); // 2perm + // summ + auto first_ret = first - first_perm; // 2sub + auto second_ret = second - second_perm; // 2 sub + // now lets choose evens + return mergee(first_ret, second_ret); // 2 mergee's + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized mergee() const { + return {vec_mergee(_vec0, _vec0), vec_mergee(_vec1, _vec1)}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized mergeo() const { + return {vec_mergeo(_vec0, _vec0), vec_mergeo(_vec1, _vec1)}; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized to_vec_float_helper() const { + int32_t values[8] = { + _vec0[0], + _vec0[1], + _vec0[2], + _vec0[3], + _vec0[4], + _vec0[5], + _vec0[6], + _vec0[7], + }; + + return Vectorized{ + values[0], values[1], values[2], values[3], + values[4], values[5], values[6], values[7] + }; + } + + template < + typename U = T, + std::enable_if_t::value, int> = 0> + Vectorized to_vec_uint8_helper() const { + // helper function for float to uint8_t conversion + uint8_t values[8] = { + static_cast(_vec0[0]), + static_cast(_vec0[1]), + static_cast(_vec0[2]), + static_cast(_vec0[3]), + static_cast(_vec1[0]), + static_cast(_vec1[1]), + static_cast(_vec1[2]), + static_cast(_vec1[3]), + }; + + return Vectorized{ + values[0], values[1], values[2], values[3], + values[4], values[5], values[6], values[7], + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + }; + } +}; + +#define ZVECTOR_OPERATORS(typex) \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec0() + b.vec0(), a.vec1() + b.vec1()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec0() - b.vec0(), a.vec1() - b.vec1()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec0() * b.vec0(), a.vec1() * b.vec1()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec0() / b.vec0(), a.vec1() / b.vec1()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + (Vectorized::vtype)(a.vecb0() & b.vecb0()), \ + (Vectorized::vtype)(a.vecb1() & b.vecb1())}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + (Vectorized::vtype)(a.vecb0() | b.vecb0()), \ + (Vectorized::vtype)(a.vecb1() | b.vecb1())}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + (Vectorized::vtype)(a.vecb0() ^ b.vecb0()), \ + (Vectorized::vtype)(a.vecb1() ^ b.vecb1())}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator==(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmpeq(a.vec0(), b.vec0()), vec_cmpeq(a.vec1(), b.vec1())}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator!=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmpeq(a.vec0(), b.vec0()), vec_cmpeq(a.vec1(), b.vec1())} \ + ._not(); \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmpgt(a.vec0(), b.vec0()), vec_cmpgt(a.vec1(), b.vec1())}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmpge(a.vec0(), b.vec0()), vec_cmpge(a.vec1(), b.vec1())}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmplt(a.vec0(), b.vec0()), vec_cmplt(a.vec1(), b.vec1())}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{ \ + vec_cmple(a.vec0(), b.vec0()), vec_cmple(a.vec1(), b.vec1())}; \ + } + +ZVECTOR_OPERATORS(float) +ZVECTOR_OPERATORS(double) +ZVECTOR_OPERATORS(int8_t) +ZVECTOR_OPERATORS(uint8_t) +ZVECTOR_OPERATORS(uint16_t) +ZVECTOR_OPERATORS(int16_t) +ZVECTOR_OPERATORS(int32_t) +ZVECTOR_OPERATORS(int64_t) + +#undef ZVECTOR_OPERATORS + +#define ZVECTOR_OPERATORS(typex) \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator<<(const Vectorized& a, const Vectorized& b) { \ + constexpr Vectorized::ElementType max_shift \ + = sizeof(Vectorized::ElementType) * CHAR_BIT; \ + \ + Vectorized::ElementType a_array[Vectorized::size()]; \ + Vectorized::ElementType b_array[Vectorized::size()]; \ + Vectorized::ElementType c_array[Vectorized::size()]; \ + \ + a.store(a_array); \ + b.store(b_array); \ + \ + for (int i = 0; i != Vectorized::size(); i++) { \ + typex shift = b_array[i]; \ + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { \ + c_array[i] = 0; \ + } else { \ + c_array[i] = static_cast>(a_array[i]) << shift; \ + } \ + } \ + \ + return Vectorized::loadu(c_array); \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator>>(const Vectorized& a, const Vectorized& b) { \ + /* right shift value to retain sign bit for signed and no bits for unsigned */ \ + constexpr Vectorized::ElementType max_shift \ + = sizeof(typex) * CHAR_BIT - std::is_signed_v; \ + \ + Vectorized::ElementType a_array[Vectorized::size()]; \ + Vectorized::ElementType b_array[Vectorized::size()]; \ + Vectorized::ElementType c_array[Vectorized::size()]; \ + \ + a.store(a_array); \ + b.store(b_array); \ + \ + for (int i = 0; i != Vectorized::size(); i++) { \ + typex shift = b_array[i]; \ + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { \ + c_array[i] = a_array[i] >> max_shift; \ + } else { \ + c_array[i] = a_array[i] >> shift; \ + } \ + } \ + \ + return Vectorized::loadu(c_array); \ + } \ + \ + template <> \ + inline Vectorized operator~(const Vectorized& a) { \ + return a._not(); \ + } + +ZVECTOR_OPERATORS(int8_t) +ZVECTOR_OPERATORS(uint8_t) +ZVECTOR_OPERATORS(uint16_t) +ZVECTOR_OPERATORS(int16_t) +ZVECTOR_OPERATORS(int32_t) +ZVECTOR_OPERATORS(int64_t) + +#undef ZVECTOR_OPERATORS + +#define DEFINE_MAXMIN_FUNCS(operand_type) \ + template <> \ + Vectorized inline maximum( \ + const Vectorized& a, const Vectorized& b) { \ + return a.maximum(b); \ + } \ + template <> \ + Vectorized inline minimum( \ + const Vectorized& a, const Vectorized& b) { \ + return a.minimum(b); \ + } + +#define DEFINE_CLAMP_MAXMIN_FUNCS(typex) \ + DEFINE_MAXMIN_FUNCS(typex) \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp_min( \ + const Vectorized& a, const Vectorized& min) { \ + return a.clamp_min(min); \ + } \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp_max( \ + const Vectorized& a, const Vectorized& max) { \ + return a.clamp_max(max); \ + } \ + template <> \ + Vectorized C10_ALWAYS_INLINE clamp( \ + const Vectorized& a, \ + const Vectorized& min, \ + const Vectorized& max) { \ + return clamp_max(clamp_min(a, min), max); \ + } + +DEFINE_CLAMP_MAXMIN_FUNCS(int8_t) +DEFINE_CLAMP_MAXMIN_FUNCS(uint8_t) +DEFINE_CLAMP_MAXMIN_FUNCS(int16_t) +DEFINE_CLAMP_MAXMIN_FUNCS(int32_t) +DEFINE_CLAMP_MAXMIN_FUNCS(int64_t) +DEFINE_CLAMP_MAXMIN_FUNCS(float) +DEFINE_CLAMP_MAXMIN_FUNCS(double) + +namespace { /* unnamed namespace */ + +#if !defined(vec_float) || __ARCH__ < 13 +#warning \ + "float->int and int->float conversion is simulated. compile for z15 for improved performance" +inline ZSimdVect vec_int_flt(const ZSimdVect x) { + return ZSimdVect{float(x[0]), float(x[1]), float(x[2]), float(x[3])}; +} +inline ZSimdVect vec_flt_int(const ZSimdVect x) { + return ZSimdVect{int(x[0]), int(x[1]), int(x[2]), int(x[3])}; +} +#else +#define vec_int_flt vec_float +#define vec_flt_int vec_signed +#endif + +Vectorized zvec_convert_to_float(const Vectorized& x) { + return {vec_int_flt(x.vec0()), vec_int_flt(x.vec1())}; +} + +Vectorized zvec_convert_to_int(const Vectorized& x) { + return {vec_flt_int(x.vec0()), vec_flt_int(x.vec1())}; +} + +Vectorized zvec_convert_to_float(const Vectorized& x) { + return {vec_double(x.vec0()), vec_double(x.vec1())}; +} + +Vectorized zvec_convert_to_int(const Vectorized& x) { + return {vec_signed(x.vec0()), vec_signed(x.vec1())}; +} + +} /* unnamed namespace */ + +template +Vectorized cast_zvector(const Vectorized& x) { + using cast_type = typename Vectorized::vtype; + return Vectorized{(cast_type)x.vec0(), (cast_type)x.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + __builtin_s390_vfmasb(a.vec0(), b.vec0(), c.vec0()), + __builtin_s390_vfmasb(a.vec1(), b.vec1(), c.vec1())}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + __builtin_s390_vfmadb(a.vec0(), b.vec0(), c.vec0()), + __builtin_s390_vfmadb(a.vec1(), b.vec1(), c.vec1())}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE +convert_to_int_of_same_size(const Vectorized& src) { + return zvec_convert_to_int(src); +} + +template <> +Vectorized C10_ALWAYS_INLINE +convert_to_int_of_same_size(const Vectorized& src) { + return zvec_convert_to_int(src); +} + +template <> +inline void convert(const int32_t* src, float* dst, int64_t n) { + // int32_t and float have same size + int64_t i; + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + const int32_t* src_a = src + i; + float* dst_a = dst + i; + auto input_vec = Vectorized::loadu(src_a); + auto output_vec = zvec_convert_to_float(input_vec); + output_vec.store(dst_a); + } + + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int64_t* src, double* dst, int64_t n) { + int64_t i; + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + const int64_t* src_a = src + i; + double* dst_a = dst + i; + auto input_vec = Vectorized::loadu(src_a); + auto output_vec = zvec_convert_to_float(input_vec); + output_vec.store(dst_a); + } + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +#define DEFINE_REINTERPRET_CAST_FUNCS(Fst, Cst) \ + template <> \ + C10_ALWAYS_INLINE Vectorized cast( \ + const Vectorized& src) { \ + return cast_zvector(src); \ + } + +#define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(Fst) \ + DEFINE_REINTERPRET_CAST_FUNCS(Fst, double) \ + DEFINE_REINTERPRET_CAST_FUNCS(Fst, float) \ + DEFINE_REINTERPRET_CAST_FUNCS(Fst, int64_t) \ + DEFINE_REINTERPRET_CAST_FUNCS(Fst, int32_t) \ + DEFINE_REINTERPRET_CAST_FUNCS(Fst, int16_t) + +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t) + +#undef DEFINE_REINTERPRET_CAST_FUNCS + +template +struct unpack_type { + using type = T; +}; +template <> +struct unpack_type { + using type = int16_t; +}; +template <> +struct unpack_type { + using type = int16_t; +}; +template <> +struct unpack_type { + using type = int32_t; +}; + +template +struct pack_type { + using type = T; +}; +template <> +struct pack_type { + using type = int8_t; +}; +template <> +struct pack_type { + using type = int16_t; +}; + +namespace { /* unnamed namespace */ + +template ::type> +std::pair, Vectorized> unpack(const Vectorized& x) { + auto vec0 = vec_unpackh(x.vec0()); + auto vec1 = vec_unpackl(x.vec0()); + auto vec2 = vec_unpackh(x.vec1()); + auto vec3 = vec_unpackl(x.vec1()); + return {Vectorized{vec0, vec1}, Vectorized{vec2, vec3}}; +} + +template <> +std::pair, Vectorized> unpack( + const Vectorized& x) { + using typeX = typename Vectorized::vtype; + typeX vec0 = vec_unpackh(x.vec0()); + typeX vec1 = vec_unpackl(x.vec0()); + typeX vec2 = vec_unpackh(x.vec1()); + typeX vec3 = vec_unpackl(x.vec1()); + // auto mask = Vectorized(0xFF); + // vec0 = vec0 & mask; + // vec1 = vec1 & mask; + // vec2 = vec2 & mask; + // vec3 = vec3 & mask; + return { + cast_zvector(Vectorized{vec0, vec1}), + cast_zvector(Vectorized{vec2, vec3})}; +} + +template ::type> +Vectorized pack(const Vectorized& first, const Vectorized& second) { + auto vec0 = vec_packs(first.vec0(), first.vec1()); + auto vec1 = vec_packs(second.vec0(), second.vec1()); + return Vectorized{vec0, vec1}; +} + +template <> +Vectorized pack( + const Vectorized& first, + const Vectorized& second) { + auto vec0 = vec_packsu(first.vec0(), first.vec1()); + auto vec1 = vec_packsu(second.vec0(), second.vec1()); + return Vectorized{vec0, vec1}; +} + +} /* unnamed namespace */ + +//////////////////////////////////QUANT/////////////////////////////////////////// +template +struct Vectorized()>> { + public: + using value_type = typename T::underlying; + using vtype = ZSimdVect; + using vmaskType = ZSimdVectBinary; + using vinner_type = Vectorized; + using size_type = int; + + static constexpr size_type size() { + return VECTOR_WIDTH / sizeof(value_type); + } + + static constexpr size_t float_num_vecs() { + return size() / Vectorized::size(); + } + static constexpr int int_num_vecs() { + return float_num_vecs(); + } + using float_vec_return_type = std::array, float_num_vecs()>; + using int_vec_return_type = + std::array, int_num_vecs()>; + + private: + vinner_type _vec; + + public: + Vectorized() {} + + explicit C10_ALWAYS_INLINE Vectorized(vinner_type v) : _vec{v} {} + Vectorized(const T& val) : _vec(val.val_) {} + + C10_ALWAYS_INLINE const vinner_type& vec() const { + return _vec; + } + + template + static Vectorized C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + return Vectorized{vinner_type::loadu(ptr, count)}; + } + + template + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { + _vec.store(ptr, count); + } + + Vectorized relu(Vectorized zero_point) const { + return Vectorized{_vec.maximum(zero_point._vec)}; + } + + Vectorized relu6(Vectorized zero_point, Vectorized q_six) const { + auto ret_max = _vec.maximum(zero_point._vec); + auto ret_min = ret_max.minimum(q_six._vec); + return Vectorized{ret_min}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 1, int> = 0> + int_vec_return_type widening_subtract(Vectorized b) const { + return {*this - b}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 1, int> = 0> + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + auto float_val = zvec_convert_to_float(_vec); + return {fmadd(scale, float_val, scale_zp_premul)}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 1, int> = 0> + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + auto float_val = zvec_convert_to_float(_vec); + return {(float_val - zero_point) * scale}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 1, int> = 0> + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + Vectorized vecf = rhs[0]; + vecf = vecf * Vectorized(inverse_scale); + vecf = vecf.rint() + Vectorized((float)(zero_point)); + auto veci = zvec_convert_to_int(vecf); + + return Vectorized{veci}; + } + + template < + typename U = T, + std::enable_if_t::int_num_vecs() == 1, int> = 0> + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized vi = inp[0]; + auto vecf = zvec_convert_to_float(vi.vec()); + vecf = vecf * Vectorized(multiplier); + vecf = vecf.rint(); + auto veci = zvec_convert_to_int(vecf) + Vectorized(zero_point); + + return Vectorized{veci}; + } + + template < + typename U = T, + std::enable_if_t::int_num_vecs() == 4, int> = 0> + int_vec_return_type widening_subtract(Vectorized b) const { + auto ret16 = unpack(_vec); + auto ret16B = unpack(b.vec()); + auto ret32_0 = unpack(ret16.first); + auto ret32_1 = unpack(ret16.second); + auto ret32B_0 = unpack(ret16B.first); + auto ret32B_1 = unpack(ret16B.second); + + return { + Vectorized(ret32_0.first - ret32B_0.first), + Vectorized(ret32_0.second - ret32B_0.second), + Vectorized(ret32_1.first - ret32B_1.first), + Vectorized(ret32_1.second - ret32B_1.second)}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 4, int> = 0> + float_vec_return_type C10_ALWAYS_INLINE dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + // unpacking unsigned as signed + auto ret16 = unpack(_vec); + auto ret32_0 = unpack(ret16.first); + auto ret32_1 = unpack(ret16.second); + + auto vecf_0 = zvec_convert_to_float(ret32_0.first); + auto vecf_1 = zvec_convert_to_float(ret32_0.second); + + auto vecf_2 = zvec_convert_to_float(ret32_1.first); + auto vecf_3 = zvec_convert_to_float(ret32_1.second); + return { + fmadd(scale, vecf_0, scale_zp_premul), + fmadd(scale, vecf_1, scale_zp_premul), + fmadd(scale, vecf_2, scale_zp_premul), + fmadd(scale, vecf_3, scale_zp_premul)}; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 4, int> = 0> + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + // unpacking unsigned as signed + auto ret16 = unpack(_vec); + auto ret32_0 = unpack(ret16.first); + auto ret32_1 = unpack(ret16.second); + + auto vecf_0 = zvec_convert_to_float(ret32_0.first); + auto vecf_1 = zvec_convert_to_float(ret32_0.second); + + auto vecf_2 = zvec_convert_to_float(ret32_1.first); + auto vecf_3 = zvec_convert_to_float(ret32_1.second); + + return { + (vecf_0 - zero_point) * scale, + (vecf_1 - zero_point) * scale, + (vecf_2 - zero_point) * scale, + (vecf_3 - zero_point) * scale }; + } + + template < + typename U = T, + std::enable_if_t::float_num_vecs() == 4, int> = 0> + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + auto vec_inverse = Vectorized(inverse_scale); + auto vec_zero_point = Vectorized((float)zero_point); + + auto vecf0 = rhs[0]; + auto vecf2 = rhs[1]; + auto vecf4 = rhs[2]; + auto vecf6 = rhs[3]; + + vecf0 = vecf0 * vec_inverse; + vecf2 = vecf2 * vec_inverse; + vecf4 = vecf4 * vec_inverse; + vecf6 = vecf6 * vec_inverse; + + vecf0 = vecf0.rint() + vec_zero_point; + vecf2 = vecf2.rint() + vec_zero_point; + vecf4 = vecf4.rint() + vec_zero_point; + vecf6 = vecf6.rint() + vec_zero_point; + + auto veci0 = zvec_convert_to_int(vecf0); + auto veci2 = zvec_convert_to_int(vecf2); + auto veci4 = zvec_convert_to_int(vecf4); + auto veci6 = zvec_convert_to_int(vecf6); + + auto vecshi0 = pack(veci0, veci2); + auto vecshi2 = pack(veci4, veci6); + auto ret = pack(vecshi0, vecshi2); + + return Vectorized{ret}; + } + + template < + typename U = T, + std::enable_if_t::int_num_vecs() == 4, int> = 0> + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized vec_multiplier = Vectorized(multiplier); + Vectorized vec_zero_point = Vectorized(zero_point); + + Vectorized vi0 = inp[0]; + Vectorized vi1 = inp[1]; + Vectorized vi2 = inp[2]; + Vectorized vi3 = inp[3]; + + auto vecf0 = zvec_convert_to_float(vi0.vec()); + auto vecf2 = zvec_convert_to_float(vi1.vec()); + + auto vecf4 = zvec_convert_to_float(vi2.vec()); + auto vecf6 = zvec_convert_to_float(vi3.vec()); + + vecf0 = vecf0 * vec_multiplier; + vecf2 = vecf2 * vec_multiplier; + + vecf4 = vecf4 * vec_multiplier; + vecf6 = vecf6 * vec_multiplier; + + vecf0 = vecf0.rint(); + vecf2 = vecf2.rint(); + vecf4 = vecf4.rint(); + vecf6 = vecf6.rint(); + + auto veci0 = zvec_convert_to_int(vecf0); + auto veci2 = zvec_convert_to_int(vecf2); + auto veci4 = zvec_convert_to_int(vecf4); + auto veci6 = zvec_convert_to_int(vecf6); + + veci0 = veci0 + vec_zero_point; + veci2 = veci2 + vec_zero_point; + + veci4 = veci4 + vec_zero_point; + veci6 = veci6 + vec_zero_point; + + auto vecshi0 = pack(veci0, veci2); + auto vecshi2 = pack(veci4, veci6); + + auto ret = pack(vecshi0, vecshi2); + + return Vectorized{ret}; + } + + Vectorized C10_ALWAYS_INLINE eq(const Vectorized& other) const { + return Vectorized{_vec.eq(other._vec)}; + } + Vectorized C10_ALWAYS_INLINE ne(const Vectorized& other) const { + return Vectorized{_vec.ne(other._vec)}; + } + Vectorized C10_ALWAYS_INLINE gt(const Vectorized& other) const { + return Vectorized{_vec.gt(other._vec)}; + } + Vectorized C10_ALWAYS_INLINE ge(const Vectorized& other) const { + return Vectorized{_vec.ge(other._vec)}; + } + Vectorized C10_ALWAYS_INLINE lt(const Vectorized& other) const { + return Vectorized{_vec.lt(other._vec)}; + } + Vectorized C10_ALWAYS_INLINE le(const Vectorized& other) const { + return Vectorized{_vec.le(other._vec)}; + } + + Vectorized clamp_min(const Vectorized& min) const { + return Vectorized{_vec.clamp_min(min._vec)}; + } + + Vectorized clamp_max(const Vectorized& max) const { + return Vectorized{_vec.clamp_max(max._vec)}; + } + + Vectorized minimum(const Vectorized& other) const { + return Vectorized{_vec.minimum(other._vec)}; + } + + Vectorized maximum(const Vectorized& other) const { + return Vectorized{_vec.maximum(other._vec)}; + } +}; + +#define ZVECTOR_OPERATORS(typex) \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() + b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() - b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() * b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() / b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() & b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() | b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() ^ b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator==(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() == b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator!=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() != b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() > b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() >= b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() < b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() <= b.vec()}; \ + } + +ZVECTOR_OPERATORS(c10::qint32) +ZVECTOR_OPERATORS(c10::qint8) +ZVECTOR_OPERATORS(c10::quint8) + +#undef ZVECTOR_OPERATORS + +DEFINE_CLAMP_MAXMIN_FUNCS(c10::quint8) +DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint8) +DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint32) + +template +constexpr auto real_mask() { + return (ZSimdVect)ZSimdVectBinary{0xFFFFFFFF, 0, 0xFFFFFFFF, 0}; +} + +template <> +constexpr auto real_mask() { + return (ZSimdVect)ZSimdVectBinary{0xFFFFFFFFFFFFFFFF, 0}; +} + +template +constexpr auto image_mask() { + return (ZSimdVect)ZSimdVectBinary{0, 0xFFFFFFFF, 0, 0xFFFFFFFF}; +} + +template <> +constexpr auto image_mask() { + return (ZSimdVect)ZSimdVectBinary{0, 0xFFFFFFFFFFFFFFFF}; +} + +template +constexpr auto rsign_mask() { + return ZSimdVect{-0.f, 0.f, -0.f, 0.f}; +} + +template <> +constexpr auto rsign_mask() { + return ZSimdVect{-0.0, 0.f}; +} + +template +constexpr auto isign_mask() { + return ZSimdVect{0.0, -0.f, 0.0, -0.f}; +} + +template <> +constexpr auto isign_mask() { + return ZSimdVect{0.0, -0.0}; +} + +template +constexpr auto image_one() { + return ZSimdVect{0, 1.f, 0, 1.f}; +} + +template <> +constexpr auto image_one() { + return ZSimdVect{0.0, 1.0}; +} + +template +constexpr auto pi_half() { + return ZSimdVect{(float)(M_PI / 2.0), 0.f, (float)(M_PI / 2.0), 0.f}; +} + +template <> +constexpr auto pi_half() { + return ZSimdVect{M_PI / 2.0, 0.0}; +} + +template +constexpr auto image_half() { + return ZSimdVect{0, 0.5f, 0, 0.5f}; +} + +template <> +constexpr auto image_half() { + return ZSimdVect{0.0, 0.5}; +} + +template +constexpr U log2e_inv() { + return static_cast(1.4426950408889634); +} + +template +constexpr U log10e_inv() { + return static_cast(0.43429448190325176); +} + +template +struct Vectorized()>> { + public: + using underline_type = decltype(std::declval().imag()); + using value_type = T; + using vtype = ZSimdVect; + using vmaskType = ZSimdVectBinary; + using vinner_type = Vectorized; + using size_type = int; + using vinner_data = typename Vectorized::vinner_data; + + static constexpr size_type size() { + return VECTOR_WIDTH / sizeof(value_type); + } + + private: + vinner_type _vec; + + public: + Vectorized() {} + + C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec{v.first, v.second} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s1, T s2) + : _vec{s1.real(), s1.imag(), s2.real(), s2.imag()} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4) + : _vec{ + s1.real(), + s1.imag(), + s2.real(), + s2.imag(), + s3.real(), + s3.imag(), + s4.real(), + s4.imag()} {} + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s) : Vectorized(s, s) {} + + template = 0> + C10_ALWAYS_INLINE Vectorized(T s) : Vectorized(s, s, s, s) {} + + C10_ALWAYS_INLINE operator vinner_type() const { + return _vec; + } + + C10_ALWAYS_INLINE const vinner_type& vec() const { + return _vec; + } + + C10_ALWAYS_INLINE operator vinner_data() const { + return _vec.data(); + } + + C10_ALWAYS_INLINE vinner_data data() const { + return _vec.data(); + } + + template + static Vectorized C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + return Vectorized{vinner_type::loadu(ptr, 2 * count)}; + } + + template + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { + return _vec.store(ptr, 2 * count); + } + + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // convert std::complex index mask to V index mask: xy -> xxyy + vinner_type vmask = mask.vec(); + auto mask_complex = vinner_type( + vec_mergeh(vmask.vec0(), vmask.vec0()), + vec_mergeh(vmask.vec1(), vmask.vec1())); + return Vectorized{vinner_type::blendv(a.vec(), b.vec(), mask_complex)}; + } + + template + static auto C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + constexpr int mask_complex = maskForComplex(mask); + return Vectorized{ + vinner_type::template blend(a.vec(), b.vec())}; + } + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized(base, base + step); + } + + template + static std::enable_if_t> arange( + T base = 0, + step_t step = static_cast(1)) { + return Vectorized( + base, + base + step, + base + value_type(2) * step, + base + value_type(3) * step); + } + + template + static inline std::enable_if_t<(Z >= C), Vectorized> set_inner( + const Vectorized& a, + const Vectorized& b, + size_t count) { + return b; + } + + template + static inline std::enable_if_t<(Z < C), Vectorized> set_inner( + const Vectorized& a, + const Vectorized& b, + size_t count) { + if (count == Z) + return blend(a, b); + else + return set_inner(a, b, count); + } + + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + if (count == 0) + return a; + return set_inner<1, size()>(a, b, count); + } + + const T& operator[](int idx) const = delete; + T& operator[](int idx) = delete; + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + Vectorized mapOrdinary(T (*const f)(const T&)) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + return Vectorized{ + f(T(v0[0], v0[1])), + f(T(v0[2], v0[3])), + f(T(v1[0], v1[1])), + f(T(v1[2], v1[3]))}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + Vectorized mapOrdinary(T (*const f)(const T&)) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + return Vectorized{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + Vectorized mapOrdinary(T (*const f)(T)) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + return Vectorized{ + f(T(v0[0], v0[1])), + f(T(v0[2], v0[3])), + f(T(v1[0], v1[1])), + f(T(v1[2], v1[3]))}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + Vectorized mapOrdinary(T (*const f)(T)) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + return Vectorized{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + inline Vectorized mapOrdinary( + T (*const f)(const T&, const T&), + const Vectorized& b) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + auto bvec = b.vec(); + auto b0 = bvec.vec0(); + auto b1 = bvec.vec1(); + T a00 = f(T(v0[0], v0[1]), T(b0[0], b0[1])); + T a01 = f(T(v0[2], v0[3]), T(b0[2], b0[3])); + T a02 = f(T(v1[0], v1[1]), T(b1[0], b1[1])); + T a03 = f(T(v1[2], v1[3]), T(b1[2], b1[3])); + return Vectorized{a00, a01, a02, a03}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + inline Vectorized mapOrdinary( + T (*const f)(const T&, const T&), + const Vectorized& b) const { + auto v0 = _vec.vec0(); + auto v1 = _vec.vec1(); + auto bvec = b.vec(); + auto b0 = bvec.vec0(); + auto b1 = bvec.vec1(); + U a00 = f(U(v0[0], v0[1]), U(b0[0], b0[1])); + U a01 = f(U(v1[0], v1[1]), U(b1[0], b1[1])); + return Vectorized{a00, a01}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + static typename Vectorized::vinner_type real_neg(const typename Vectorized::vinner_type &a) + { + const auto swap_mask = ZSimdVectBinary{ + 0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31}; + + auto a_neg = a.neg(); + vtype v0 = vec_perm(a_neg.vec0(), a.vec0(), swap_mask); + vtype v1 = vec_perm(a_neg.vec1(), a.vec1(), swap_mask); + return {v0, v1}; + } + + template < + typename U = T, + std::enable_if_t>::value, int> = 0> + static typename Vectorized::vinner_type real_neg(const typename Vectorized::vinner_type &a) + { + auto a_neg = a.neg(); + auto v0 = vec_permi(a_neg.vec0(), a.vec0(), 1); + auto v1 = vec_permi(a_neg.vec1(), a.vec1(), 1); + return { v0, v1 }; + } + + Vectorized angle2_() const { + auto b_a = _vec.swapped(); // b a + return Vectorized{_vec.atan2(b_a).swapped()}; + } + + Vectorized angle() const { + return angle2_().real(); + } + + Vectorized atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + auto ione = Vectorized{vinner_type(image_one())}; + auto sum = ione + *this; + auto sub = ione - *this; + auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) + return ln * + Vectorized{vinner_type(image_half())}; // i/2*ln() + } + + Vectorized atanh() const { + return mapOrdinary(std::atanh); + } + + Vectorized asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) +#if 1 + vinner_type cnj = conj().vec(); + vinner_type b_a = cnj.swapped(); + vinner_type ab = cnj * b_a; + vinner_type im = ab + ab; + vinner_type val_2 = _vec * _vec; + vinner_type val_2_swapped = val_2.swapped(); + vinner_type re = vinner_type::horizontal_sub_perm(val_2, val_2_swapped); + re = vinner_type(static_cast(1)) - re; + constexpr int blend_mask = + blend_choice(); // 0x0A for complex , 0xAA for complex + vinner_type blendx = vinner_type::template blend(re, im); + auto root = Vectorized(blendx).sqrt(); + auto ln = Vectorized(Vectorized(b_a) + root).log(); + return Vectorized(ln.vec().swapped()).conj(); +#else + return mapOrdinary(std::asin); +#endif + } + + Vectorized acos() const { + // acos(x) = pi/2 - asin(x) + return Vectorized(vinner_type(pi_half())) - asin(); + } + + Vectorized sin() const { + return mapOrdinary(std::sin); + } + Vectorized sinh() const { + return mapOrdinary(std::sinh); + } + Vectorized cos() const { + return mapOrdinary(std::cos); + } + Vectorized cosh() const { + return mapOrdinary(std::cosh); + } + Vectorized ceil() const { + return Vectorized{_vec.ceil()}; + } + Vectorized floor() const { + return Vectorized{_vec.floor()}; + } + Vectorized neg() const { + return Vectorized(_vec.neg()); + } + Vectorized round() const { + return Vectorized{_vec.round()}; + } + Vectorized tan() const { + return mapOrdinary(std::tan); + } + Vectorized tanh() const { + return mapOrdinary(std::tanh); + } + Vectorized trunc() const { + return Vectorized{_vec.trunc()}; + } + + Vectorized C10_ALWAYS_INLINE eq(const Vectorized& other) const { + auto eq = _vec.eq(other._vec); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + auto real = eq & vinner_type(real_mask()); + auto imag = (eq & vinner_type(image_mask())).swapped(); + return Vectorized{real & imag}; + } + Vectorized C10_ALWAYS_INLINE ne(const Vectorized& other) const { + auto ne = _vec.ne(other._vec); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + auto real = ne & vinner_type(real_mask()); + auto imag = (ne & vinner_type(image_mask())).swapped(); + return Vectorized{real | imag}; + } + + Vectorized real() const { + return Vectorized(_vec & vinner_type(real_mask())); + } + Vectorized imag_() const { + return Vectorized(_vec & vinner_type(image_mask())); + } + Vectorized imag() const { + return Vectorized{ + (_vec & vinner_type(image_mask())).swapped()}; + } + + Vectorized conj() const { + return Vectorized(_vec ^ vinner_type(isign_mask())); + } + + vinner_data abs_2_() const { + auto a = _vec * _vec; + a = a + a.swapped(); + return a.mergee().data(); + } + + static T abs_helper(const T &value) + { + return T(std::abs(value)); + } + + Vectorized abs() const { + return mapOrdinary(abs_helper); + } + + Vectorized exp() const { + return mapOrdinary(std::exp); + } + + Vectorized exp2() const { + return mapOrdinary(exp2_impl); + } + + Vectorized expm1() const { + return mapOrdinary(std::expm1); + } + + Vectorized log() const { + return mapOrdinary(std::log); + } + + Vectorized log2() const { + // log2eB_inv + auto ret = log(); + return Vectorized{ret._vec * vinner_type(log2e_inv())}; + } + + Vectorized log10() const { + auto ret = log(); + return Vectorized{ret._vec * vinner_type(log10e_inv())}; + } + + Vectorized log1p() const { + return mapOrdinary(std::log1p); + } + + Vectorized sgn() const { + return mapOrdinary(at::native::sgn_impl); + } + + Vectorized pow(const Vectorized& exp) const { + return mapOrdinary(std::pow, exp); + } + + Vectorized sqrt() const { + return mapOrdinary(std::sqrt); + } + + Vectorized reciprocal() const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() = c/abs_2() + // im = (bc - ad)/abs_2() = d/abs_2() + vinner_type c_d = _vec ^ vinner_type(isign_mask()); + vinner_type abs = abs_2_(); + return Vectorized{c_d / abs}; + } + + Vectorized rsqrt() const { + return sqrt().reciprocal(); + } + + Vectorized lt(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized le(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized gt(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized ge(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } +}; + +#define ZVECTOR_OPERATORS(typex) \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() + b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() - b.vec()}; \ + } \ + \ + template <> \ + Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { \ + /* (a + bi) * (c + di) = (ac - bd) + (ad + bc)i */ \ + Vectorized::vinner_type bv = b.vec(); \ + \ + /* this is more z arch friendly than simulating horizontal from x86 */ \ + Vectorized::vinner_type vi = bv.mergeo(); \ + Vectorized::vinner_type vr = bv.mergee(); \ + vi = vi ^ Vectorized::vinner_type(rsign_mask::underline_type>()); \ + Vectorized::vinner_type ret = a.vec() * vr; \ + Vectorized::vinner_type vx_swapped = a.vec().swapped(); \ + ret = fmadd(vx_swapped, vi, ret); \ + \ + return Vectorized{ret}; \ + } \ + \ + template <> \ + Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { \ + /* Unfortunately, this breaks some tests */ \ + /* Implement it like it's done for avx2 */ \ + auto fabs_cd = b.vec().abs(); /* |c| |d| */ \ + auto fabs_dc = fabs_cd.swapped(); /* |d| |c| */ \ + auto scale = Vectorized::vinner_type {1.0} / maximum(fabs_cd, fabs_dc); /* 1/sc 1/sc */ \ + auto a2 = a.vec() * scale; /* a/sc b/sc */ \ + auto b2 = b.vec() * scale; /* c/sc d/sc */ \ + auto acbd2 = a2 * b2; /* ac/sc^2 bd/sc^2 */ \ + \ + auto dc2 = b2.swapped(); /* d/sc c/sc */ \ + dc2 = Vectorized::real_neg(dc2); /* -d/|c,d| c/sc */ \ + auto adbc2 = a2 * dc2; /* -ad/sc^2 bc/sc^2 */ \ + auto sum1 = acbd2 + acbd2.swapped(); /* (ac+bd)/sc^2 (ac+bd)/sc^2 */ \ + auto sum2 = adbc2 + adbc2.swapped(); /* (bc-ad)/sc^2 (bc-ad)/sc^2 */ \ + auto res2 = Vectorized::vinner_type::mergee(sum1, sum2); /* (ac+bd)/sc^2 (bc-ad)/sc^2 */ \ + \ + /* get the denominator */ \ + Vectorized::vinner_type denom2 = Vectorized{b2}.abs_2_(); /* (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 */ \ + res2 = res2 / denom2; \ + return Vectorized{ res2 }; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() & b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() | b.vec()}; \ + } \ + \ + template <> \ + Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() ^ b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator==(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() == b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator!=(const Vectorized& a, const Vectorized& b) { \ + return Vectorized{a.vec() != b.vec()}; \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<(const Vectorized& a, const Vectorized& b) { \ + TORCH_CHECK(false, "not supported for complex numbers"); \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator<=(const Vectorized& a, const Vectorized& b) { \ + TORCH_CHECK(false, "not supported for complex numbers"); \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>(const Vectorized& a, const Vectorized& b) { \ + TORCH_CHECK(false, "not supported for complex numbers"); \ + } \ + \ + Vectorized C10_ALWAYS_INLINE operator>=(const Vectorized& a, const Vectorized& b) { \ + TORCH_CHECK(false, "not supported for complex numbers"); \ + } + +ZVECTOR_OPERATORS(c10::complex) +ZVECTOR_OPERATORS(c10::complex) + +#undef ZVECTOR_OPERATORS + +template = 0> +std::pair, Vectorized> inline inner_interleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3} + // b = {b0, b1, b2, b3} + using vtype = typename Vectorized::vtype; + vtype ab00 = vec_permi(a.vec0(), b.vec0(), 0); + vtype ab11 = vec_permi(a.vec0(), b.vec0(), 3); + vtype ab2_00 = vec_permi(a.vec1(), b.vec1(), 0); + vtype ab2_11 = vec_permi(a.vec1(), b.vec1(), 3); + // return {a0, b0, a1, b1} + // {a2, b2, a3, b3} + return std::make_pair( + Vectorized{ab00, ab11}, Vectorized{ab2_00, ab2_11}); +} + +template = 0> +std::pair, Vectorized> inline inner_deinterleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1} + // b = {a2, b2, a3, b3} + using vtype = typename Vectorized::vtype; + vtype aa01 = vec_permi(a.vec0(), a.vec1(), 0); + vtype aa23 = vec_permi(b.vec0(), b.vec1(), 0); + + vtype bb_01 = vec_permi(a.vec0(), a.vec1(), 3); + vtype bb_23 = vec_permi(b.vec0(), b.vec1(), 3); + + // swap lanes: + // return {a0, a1, a2, a3} + // {b0, b1, b2, b3} + return std::make_pair(Vectorized{aa01, aa23}, Vectorized{bb_01, bb_23}); +} + +template = 0> +std::pair, Vectorized> inline inner_interleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3,, a4, a5, a6, a7} + // b = {b0, b1, b2, b3,, b4, b5, b6, b7} + using vtype = typename Vectorized::vtype; + vtype ab0011 = vec_mergeh(a.vec0(), b.vec0()); + vtype ab2233 = vec_mergel(a.vec0(), b.vec0()); + + vtype ab2_0011 = vec_mergeh(a.vec1(), b.vec1()); + vtype ab2_2233 = vec_mergel(a.vec1(), b.vec1()); + // group cols crossing lanes: + // return {a0, b0, a1, b1,, a2, b2, a3, b3} + // {a4, b4, a5, b5,, a6, b6, a7, b7} + + return std::make_pair( + Vectorized{ab0011, ab2233}, Vectorized{ab2_0011, ab2_2233}); +} + +template = 0> +std::pair, Vectorized> inline inner_deinterleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1,, a2, b2, a3, b3} + // b = {a4, b4, a5, b5,, a6, b6, a7, b7} + using vtype = typename Vectorized::vtype; + // {a0,a2,b0,b2} {a1,a3,b1,b3} + vtype a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1()); + vtype a1a3b1b3 = vec_mergel(a.vec0(), a.vec1()); + + vtype aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3); + vtype bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3); + + vtype a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1()); + vtype a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1()); + + vtype aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2); + vtype bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2); + + // it could be done with vec_perm ,too + // swap lanes: + // return {a0, a1, a2, a3,, a4, a5, a6, a7} + // {b0, b1, b2, b3,, b4, b5, b6, b7} + + return std::make_pair( + Vectorized{aa0123, aa0123_2}, Vectorized{bb0123, bb0123_2}); +} + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_interleave2(a, b); +} + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_interleave2(a, b); +} + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_interleave2(a, b); +} + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_interleave2(a, b); +} + +template <> +std::pair, Vectorized> inline deinterleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_deinterleave2(a, b); +} + +template <> +std::pair, Vectorized> inline deinterleave2< + int32_t>(const Vectorized& a, const Vectorized& b) { + return inner_deinterleave2(a, b); +} + +template <> +std::pair, Vectorized> inline deinterleave2( + const Vectorized& a, + const Vectorized& b) { + return inner_deinterleave2(a, b); +} + +template <> +std::pair, Vectorized> inline deinterleave2< + int64_t>(const Vectorized& a, const Vectorized& b) { + return inner_deinterleave2(a, b); +} + +template +typename std::enable_if::value, at::vec::Vectorized>::type +inline convert_int8_to_float(const Vectorized &src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + // Only handle first 64 bits + auto vec_int = src.to_vec_float_helper(); + + return zvec_convert_to_float(vec_int); +} + +template +typename std::enable_if::value, at::vec::Vectorized>::type +inline convert_float_to_int8(const Vectorized &src) { + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + + auto vec_int = clamp(zvec_convert_to_int(src), Vectorized(min_val), Vectorized(max_val)); + + return vec_int.to_vec_uint8_helper(); +} + +#undef DEFINE_CLAMP_MAXMIN_FUNCS +#undef DEFINE_MAXMIN_FUNCS +} // namespace +} // namespace vec +} // namespace at diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h new file mode 100644 index 0000000000000000000000000000000000000000..ae48dc8a3f30a6ab9b159577de4dee232e9635ba --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h @@ -0,0 +1,472 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if (defined(CPU_CAPABILITY_AVX512)) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +template <> class Vectorized { +private: + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + // values needs to be public for compilation with clang + // as vec512.h uses it + __m512d values; + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m512d v) : values(v) {} + Vectorized(double val) { + values = _mm512_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4, + double val5, double val6, double val7, double val8) { + values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8); + } + operator __m512d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm512_mask_blend_pd(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mmask, a.values, b.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, + base + 7 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_pd(reinterpret_cast(ptr)); + + __mmask8 mask = (1ULL << count) - 1; + return _mm512_maskz_loadu_pd(mask, ptr); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + __mmask8 mask = (1ULL << count) - 1; + _mm512_mask_storeu_pd(reinterpret_cast(ptr), mask, values); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ); + return static_cast(cmp); + } + Vectorized isnan() const { + auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + bool has_inf_nan() const { + __m512d self_sub = _mm512_sub_pd(values, values); + return (_mm512_movepi8_mask(_mm512_castpd_si512(self_sub)) & 0x7777777777777777) != 0; + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm512_set1_pd(-0.f); + return _mm512_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm512_castsi512_pd(zero_vector); + const auto nan_vec = _mm512_set1_pd(NAN); + const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ); + const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask, + 0xFFFFFFFFFFFFFFFF); + const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_pd(c10::pi); + + const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd8_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshd8_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind8_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand8_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhd8_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d8_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd8(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd8_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd8_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd8_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2d8_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d8_u10(values)); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd8(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd8_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd8_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d8_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d8_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd8_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind8_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd8_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd8_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd8_u10(values)); + } + Vectorized ceil() const { + return _mm512_ceil_pd(values); + } + Vectorized floor() const { + return _mm512_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm512_xor_pd(_mm512_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd8(values, b)); + } + Vectorized round() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand8_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd8_u10(values)); + } + Vectorized trunc() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad8_u10(values)); + } + Vectorized sqrt() const { + return _mm512_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm512_div_pd(_mm512_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd8_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator!=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm512_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized max = _mm512_max_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized min = _mm512_min_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm512_min_pd(max, _mm512_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm512_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm512_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i)); + } +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmadd_pd(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmsub_pd(a, b, c); +} + +#endif + +}}} diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h new file mode 100644 index 0000000000000000000000000000000000000000..c4d8b1ccf5de4911ca6fc4170f5c9951fa58e4c7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h @@ -0,0 +1,1159 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] +// +// Note [Do not compile initializers with AVX] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// If you define a static initializer in this file, the initialization will use +// AVX instructions because these object files are compiled with AVX enabled. +// We need to avoid non-trivial global data in these architecture specific files +// because there's no way to guard the global initializers with CPU capability +// detection. +// +// See https://github.com/pytorch/pytorch/issues/37577 for an instance +// of this bug in the past. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__GNUC__) +#define __FORCE_INLINE __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +#define __FORCE_INLINE __forceinline +#endif + +#if defined(_MSC_FULL_VER) +/* +https://learn.microsoft.com/en-us/cpp/overview/compiler-versions?view=msvc-170 +Use _MSC_FULL_VER to identify current compiler is msvc, +Windows llvm will not have this defination. +*/ +#define __msvc_cl__ +#endif + +// These macros helped us unify vec_base.h +#ifdef CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(64))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(64)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 64 +#define int_vector __m512i +#else // CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(32))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(32)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 32 +#define int_vector __m256i +#endif // CPU_CAPABILITY_AVX512 + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +// at::Half and at::BFloat16 should be treated as floating point +template +struct is_floating_point: + std::integral_constant || + std::is_same_v || + std::is_same_v> { +}; + +template +constexpr bool is_floating_point_v = is_floating_point::value; + +template +struct is_reduced_floating_point: + std::integral_constant || + std::is_same_v> { +}; + +template +constexpr bool is_reduced_floating_point_v = is_reduced_floating_point::value; + +template +struct is_8bit_integer: + std::integral_constant || + std::is_same_v> { +}; + +template +constexpr bool is_8bit_integer_v = is_8bit_integer::value; + +template struct int_of_size; + +#define DEFINE_INT_OF_SIZE(int_t) \ +template<> struct int_of_size { using type = int_t; } + +DEFINE_INT_OF_SIZE(int64_t); +DEFINE_INT_OF_SIZE(int32_t); +DEFINE_INT_OF_SIZE(int16_t); +DEFINE_INT_OF_SIZE(int8_t); + +#undef DEFINE_INT_OF_SIZE + +template +using int_same_size_t = typename int_of_size::type; + +// NOTE: If you specialize on a type, you must define all operations! + +// emulates Vectorized types +#if defined(__s390x__) +template +#else +template +#endif +struct Vectorized { +private: + __at_align__ T values[VECTOR_WIDTH / sizeof(T)]; +public: + using value_type = T; + using size_type = int; + // Note [constexpr static function to avoid odr-usage compiler bug] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Why, you might ask, is size defined to be a static constexpr function, + // rather than a more ordinary 'static constexpr int size;' variable? + // The problem lies within ODR rules for static constexpr members versus + // static constexpr functions. First, recall that this class (along with all + // of its derivations) live in an anonymous namespace: they are intended to be + // *completely* inlined at their use-sites, because we need to compile it + // multiple times for different instruction sets. + // + // Because of this constraint, we CANNOT provide a single definition for + // any static members in this class; since we want to compile the class + // multiple times, there wouldn't actually be any good place to put the + // definition. Now here is the problem: if we ODR-use a static constexpr + // member, we are *obligated* to provide a definition. Without the + // definition, you get a compile error like: + // + // relocation R_X86_64_PC32 against undefined symbol + // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making + // a shared object; recompile with -fPIC + // + // If this were C++17, we could replace a static constexpr variable with + // an inline variable which doesn't require one definition. But we are not + // C++17. So the next best thing is to replace the member with a static + // constexpr (and therefore inline) function, which does not require ODR + // either. + // + // Also, technically according to the C++ standard, we don't have to define + // a constexpr variable if we never odr-use it. But it seems that some + // versions GCC/Clang have buggy determinations on whether or not an + // identifier is odr-used or not, and in any case it's hard to tell if + // a variable is odr-used or not. So best to just cut the problem at the root. + static constexpr size_type size() { + return VECTOR_WIDTH / sizeof(T); + } + Vectorized() : values{static_cast(0)} {} + Vectorized(T val) { + for (int i = 0; i != size(); i++) { + values[i] = val; + } + } + template> + Vectorized(Args... vals) : values{vals...}{ + } + // This also implies const T& operator[](int idx) const + inline operator const T*() const { + return values; + } + // This also implies T& operator[](int idx) + inline operator T*() { + return values; + } + // Return the values as char* for type punning + auto as_bytes() const -> const char* { + return reinterpret_cast(values); + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + int64_t mask = mask_; + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (mask & 0x01) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + mask = mask >> 1; + } + return vector; + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + Vectorized vector; + int_same_size_t buffer[size()]; + mask.store(buffer); + for (const auto i : c10::irange(size())) { + if (buffer[i] & 0x01) + { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + template // step sometimes requires a higher precision type (e.g., T=int, step_t=double) + static Vectorized arange(T base = static_cast(0), step_t step = static_cast(1)) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + vector.values[i] = base + i * step; + } + return vector; + } + static Vectorized set(const Vectorized& a, const Vectorized& b, int64_t count = size()) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (i < count) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + static Vectorized loadu(const void* ptr) { + Vectorized vector; + std::memcpy(vector.values, ptr, VECTOR_WIDTH); + return vector; + } + static Vectorized loadu(const void* ptr, int64_t count) { + Vectorized vector; + std::memcpy(vector.values, ptr, count * sizeof(T)); + return vector; + } + static Vectorized loadu_one_fourth(const void* ptr) { + static_assert(std::is_same_v || std::is_same_v, "For byte types only"); + return Vectorized::loadu(ptr, 8); + } + + void store(void* ptr, int count = size()) const { + std::memcpy(ptr, values, count * sizeof(T)); + } + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + int mask = 0; + for (int i = 0; i < size(); ++ i) { + if (values[i] == static_cast(0)) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (_isnan(values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + bool has_inf_nan() const { + for (int64_t i = 0; i != size(); i++) { + if(_isnan(values[i]) || _isinf(values[i])) { + return true; + } + } + return false; + } + Vectorized map(T (*const f)(T)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + Vectorized map(T (*const f)(const T &)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + template && !c10::is_complex::value, int> = 0> + Vectorized abs() const { + // other_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_abs must be T"); + return map([](T x) -> T { return x < static_cast(0) ? -x : x; }); + } + template , int> = 0> + Vectorized abs() const { + // float_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "float_t_abs must be T"); + // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in + // 0.0) properly. + return map([](T x) -> T { return std::abs(x); }); + } + template ::value, int> = 0> + Vectorized abs() const { + // complex_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_abs must be T"); + // Specifically map() does not perform the type conversion needed by abs. + return map([](T x) { return static_cast(std::abs(x)); }); + } + + template ::value, int> = 0> + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + template ::value, int> = 0> + Vectorized angle() const { + // other_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_angle must be T"); + return map(at::native::angle_impl); // compiler is unable to resolve the overload without + } + template ::value, int> = 0> + Vectorized angle() const { + // complex_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_angle must be T"); + return map([](T x) { return static_cast(std::arg(x)); }); + } + template ::value, int> = 0> + Vectorized real() const { + // other_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_real must be T"); + return *this; + } + template ::value, int> = 0> + Vectorized real() const { + // complex_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_real must be T"); + return map([](T x) { return static_cast(x.real()); }); + } + template ::value, int> = 0> + Vectorized imag() const { + // other_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_imag must be T"); + return Vectorized(0); + } + template ::value, int> = 0> + Vectorized imag() const { + // complex_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_imag must be T"); + return map([](T x) { return static_cast(x.imag()); }); + } + template ::value, int> = 0> + Vectorized conj() const { + // other_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_conj must be T"); + return *this; + } + template ::value, int> = 0> + Vectorized conj() const { + // complex_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_conj must be T"); + return map([](T x) { return static_cast(std::conj(x)); }); + } + Vectorized acos() const { + return map(std::acos); + } + Vectorized acosh() const { + return map(std::acosh); + } + Vectorized asin() const { + return map(std::asin); + } + Vectorized atan() const { + return map(std::atan); + } + Vectorized atanh() const { + return map(std::atanh); + } + Vectorized atan2(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::atan2(values[i], exp[i]); + } + return ret; + } + template < + typename U = T, + typename std::enable_if_t, int> = 0> + Vectorized copysign(const Vectorized &sign) const { + Vectorized ret; + for (size_type i = 0; i < size(); i++) { + ret[i] = c10::copysign(values[i], sign[i]); + } + return ret; + } + Vectorized erf() const { + return map(std::erf); + } + Vectorized erfc() const { + return map(std::erfc); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return map(std::exp); + } + Vectorized exp2() const { + return map(exp2_impl); + } + Vectorized expm1() const { + return map(std::expm1); + } + Vectorized exp_u20() const { + return map(std::exp); + } + Vectorized frac() const { + return *this - this->trunc(); + } + template < + typename U = T, + typename std::enable_if_t, int> = 0> + Vectorized fmod(const Vectorized& q) const { + // U is for SFINAE purposes only. Make sure it is not changed. + static_assert(std::is_same_v, "U must be T"); + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::fmod(values[i], q[i]); + } + return ret; + } + Vectorized log() const { + return map(std::log); + } + Vectorized log10() const { + return map(std::log10); + } + Vectorized log1p() const { + return map(std::log1p); + } + template ::value, int> = 0> + Vectorized log2() const { + // other_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_log2 must be T"); + return map(std::log2); + } + template ::value, int> = 0> + Vectorized log2() const { + // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_log2 must be T"); + const T log_2 = T(std::log(2.0)); + return Vectorized(map(std::log))/Vectorized(log_2); + } + Vectorized ceil() const { + return map(at::native::ceil_impl); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + Vectorized floor() const { + return map(at::native::floor_impl); + } + Vectorized hypot(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::hypot(values[i], b[i]); + } + return ret; + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igamma(values[i], x[i]); + } + return ret; + } + Vectorized igammac(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igammac(values[i], x[i]); + } + return ret; + } + Vectorized neg() const { + // NB: the trailing return type is needed because we need to coerce the + // return value back to T in the case of unary operator- incuring a + // promotion + return map([](T x) -> T { return -x; }); + } + Vectorized nextafter(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::nextafter(values[i], b[i]); + } + return ret; + } + Vectorized round() const { + // We do not use std::round because we would like to round midway numbers to the nearest even integer. + return map(at::native::round_impl); + } + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized trunc() const { + return map(at::native::trunc_impl); + } + Vectorized lgamma() const { + return map(std::lgamma); + } + Vectorized sqrt() const { + return map(std::sqrt); + } + Vectorized reciprocal() const { + return map([](T x) { return (T)(1) / x; }); + } + Vectorized rsqrt() const { + return map([](T x) { return (T)1 / std::sqrt(x); }); + } + Vectorized pow(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::pow(values[i], exp[i]); + } + return ret; + } +private: + template + inline Vectorized binary_pred(const Vectorized& other, Op op) const { + // All bits are set to 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (op(values[i], other.values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + +public: + Vectorized operator==(const Vectorized& other) const { return binary_pred(other, std::equal_to()); } + Vectorized operator!=(const Vectorized& other) const { return binary_pred(other, std::not_equal_to()); } + Vectorized operator>=(const Vectorized& other) const { return binary_pred(other, std::greater_equal()); } + Vectorized operator<=(const Vectorized& other) const { return binary_pred(other, std::less_equal()); } + Vectorized operator>(const Vectorized& other) const { return binary_pred(other, std::greater()); } + Vectorized operator<(const Vectorized& other) const { return binary_pred(other, std::less()); } + +private: + template + inline Vectorized binary_pred_bool(const Vectorized& other, Op op) const { + // 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int i = 0; i != size(); ++ i) { + vector[i] = static_cast(op(values[i], other.values[i])); + } + return vector; + } + +public: + Vectorized eq(const Vectorized& other) const { return binary_pred_bool(other, std::equal_to()); } + Vectorized ne(const Vectorized& other) const { return binary_pred_bool(other, std::not_equal_to()); } + Vectorized gt(const Vectorized& other) const { return binary_pred_bool(other, std::greater()); } + Vectorized ge(const Vectorized& other) const { return binary_pred_bool(other, std::greater_equal()); } + Vectorized lt(const Vectorized& other) const { return binary_pred_bool(other, std::less()); } + Vectorized le(const Vectorized& other) const { return binary_pred_bool(other, std::less_equal()); } +}; + +template Vectorized inline operator+(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] + b[i]; + } + return c; +} + +template Vectorized inline operator-(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] - b[i]; + } + return c; +} + +template Vectorized inline operator*(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] * b[i]; + } + return c; +} + +template Vectorized inline operator/(const Vectorized &a, const Vectorized &b) __ubsan_ignore_float_divide_by_zero__ { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] / b[i]; + } + return c; +} + +template , int> = 0> +Vectorized inline operator%(const Vectorized &a, const Vectorized &b) __ubsan_ignore_float_divide_by_zero__ { + return a - a / b * b; +} + +template Vectorized inline operator||( + const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] || b[i]; + } + return c; +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int> = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] > b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int> = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] < b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp(const Vectorized &a, const Vectorized &min_vec, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]); + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp_max(const Vectorized &a, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i]; + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp_min(const Vectorized &a, const Vectorized &min_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i]; + } + return c; +} + +struct Vectorizedi; + +#if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + int_vector buffer; +#if defined(CPU_CAPABILITY_AVX2) + int_vector a_buffer = _mm256_load_si256(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm256_load_si256(reinterpret_cast((const T*)b)); +#elif defined(CPU_CAPABILITY_AVX512) + int_vector a_buffer = _mm512_load_si512(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm512_load_si512(reinterpret_cast((const T*)b)); +#endif + buffer = op(a_buffer, b_buffer); + __at_align__ T results[Vectorized::size()]; + +#if defined(CPU_CAPABILITY_AVX2) + _mm256_store_si256(reinterpret_cast(results), buffer); +#elif defined(CPU_CAPABILITY_AVX512) + _mm512_store_si512(reinterpret_cast(results), buffer); +#endif + return Vectorized::loadu(results); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); }); +#endif +} + +#else + +template +auto load(char const* data) -> T { + T ret; + std::memcpy(&ret, data, sizeof(ret)); + return ret; +} + +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t); + __at_align__ intmax_t buffer[element_no]; + static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)"); + static_assert(sizeof(buffer) == sizeof(Vectorized), "sizeof(buffer) must match sizeof(Vectorized)"); + // We should be using memcpy in order to respect the strict aliasing rule + // see: https://github.com/pytorch/pytorch/issues/66119 + // Using char* is defined in the C11 standard 6.5 Expression paragraph 7 + // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf) + const auto* a_data = a.as_bytes(); + const auto* b_data = b.as_bytes(); + // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t) + for (auto& out : buffer) { + out = op(load(a_data), load(b_data)); + a_data += sizeof(intmax_t); + b_data += sizeof(intmax_t); + } + assert(a_data == a.as_bytes() + sizeof(a)); + assert(b_data == b.as_bytes() + sizeof(b)); + return Vectorized::loadu(buffer); +} + +template>, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_and()); +} +template>, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_or()); +} +template>, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_xor()); +} + +#endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) + +template>, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + using int_t = int_same_size_t; + Vectorized ones(c10::bit_cast((int_t)(~(int_t)0))); // All bits are 1 + return a ^ ones; +} + +template Vectorized inline operator<<(const Vectorized &a, const Vectorized &b) { + constexpr T max_shift = sizeof(T) * CHAR_BIT; + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + T shift = b[i]; + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { + c[i] = 0; + } else { + c[i] = static_cast>(a[i]) << shift; + } + } + return c; +} + +template Vectorized inline operator>>(const Vectorized &a, const Vectorized &b) { + // right shift value to retain sign bit for signed and no bits for unsigned + constexpr T max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v; + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + T shift = b[i]; + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { + c[i] = a[i] >> max_shift; + } else { + c[i] = a[i] >> shift; + } + } + return c; +} + +template +inline Vectorized& operator += (Vectorized& a, const Vectorized& b) { + a = a + b; + return a; +} +template +inline Vectorized& operator -= (Vectorized& a, const Vectorized& b) { + a = a - b; + return a; +} +template +inline Vectorized& operator /= (Vectorized& a, const Vectorized& b) { + a = a / b; + return a; +} +template +inline Vectorized& operator %= (Vectorized& a, const Vectorized& b) { + a = a % b; + return a; +} +template +inline Vectorized& operator *= (Vectorized& a, const Vectorized& b) { + a = a * b; + return a; +} + +template +inline Vectorized& operator <<= (Vectorized& a, const Vectorized& b) { + a = a << b; + return a; +} + +template +inline Vectorized& operator >>= (Vectorized& a, const Vectorized& b) { + a = a >> b; + return a; +} + +template +inline Vectorized fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return a * b + c; +} + +template +inline Vectorized fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return a * b - c; +} + +template +Vectorized inline operator&&( + const Vectorized& a, + const Vectorized& b) { + Vectorized ret; + for (int i = 0; i != Vectorized::size(); i++) { + ret[i] = a[i] && b[i]; + } + return ret; +} + +template +std::enable_if_t> +inline gather(T const* base_addr, const Vectorized>& vindex) { + static constexpr int size = Vectorized::size(); + int_same_size_t index_arr[size]; + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } + return Vectorized::loadu(static_cast(buffer)); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, T const* base_addr, + const Vectorized>& vindex, Vectorized& mask) { + static constexpr int size = Vectorized::size(); + T src_arr[size]; + int_same_size_t mask_arr[size]; // use int type so we can logical and + int_same_size_t index_arr[size]; + src.store(static_cast(src_arr)); + mask.store(static_cast(mask_arr)); + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + if (mask_arr[i] & 0x01) { // check highest bit + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } else { + buffer[i] = src_arr[i]; + } + } + mask = Vectorized(); // "zero out" mask + return Vectorized::loadu(static_cast(buffer)); +} + +// Cast a given vector to another type without changing the bits representation. +// So a Vectorized of 512 bits containing all ones can be cast to a +// Vectorized of 512 bits containing all ones (i.e., eight negative 1s). +// A Vec of 256 bits containing all ones can be cast to a +// Vec of 256 bits containing all ones (i.e., four negative 1s). +// There is a struct here because we don't have static_if and I can't +// partially specialize a templated function. +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + src_t src_arr[Vectorized::size()]; + src.store(static_cast(src_arr)); + return Vectorized::loadu(static_cast(src_arr)); + } +}; + +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + return src; + } +}; + +template +inline Vectorized cast(const Vectorized& src) { + return CastImpl::apply(src); +} + +template > +inline Vectorized convert_to_int_of_same_size(const Vectorized& src) { + static_assert(sizeof(T) == sizeof(IntType)); + static constexpr int size = Vectorized::size(); + + std::array src_arr; + src.store(static_cast(src_arr.data())); + std::array buffer; + std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(), + [](const T& x) { return static_cast(x); }); + return Vectorized::loadu(static_cast(buffer.data())); +} + +template > +inline Vectorized convert_to_fp_of_same_size(const Vectorized& src) { + static_assert(sizeof(T) == sizeof(IntType)); + static constexpr int size = Vectorized::size(); + + std::array src_arr; + src.store(static_cast(src_arr.data())); + std::array buffer; + std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(), + [](const IntType& x) { return static_cast(x); }); + return Vectorized::loadu(static_cast(buffer.data())); +} + +// Example inputs for AVX512: +// a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// b Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// returns: +// Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// Example inputs for AVX2: a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// b Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +// returns: Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +deinterleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i] = a_arr[i * 2]; + buffer1[half_size + i] = b_arr[i * 2]; + buffer2[i] = a_arr[i * 2 + 1]; + buffer2[half_size + i] = b_arr[i * 2 + 1]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +// inverse operation of deinterleave2 +// Example inputs for AVX512: +// a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// returns, for AVX512: +// Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// Example inputs for AVX2 : a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +// returns: Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +interleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i * 2] = a_arr[i]; + buffer1[i * 2 + 1] = b_arr[i]; + buffer2[i * 2] = a_arr[half_size + i]; + buffer2[i * 2 + 1] = b_arr[half_size + i]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +template +inline void convert(const src_T *src, dst_T *dst, int64_t n) { +#ifndef _MSC_VER +# pragma unroll +#endif + for (C10_UNUSED const auto i : c10::irange(n)) { + *dst = c10::convert(c10::load(src)); + src++; + dst++; + } +} + +template +inline Vectorized flip(const Vectorized & data) { + static constexpr int size = Vectorized::size(); + T output[size]; + T buffer[size]; + data.store(static_cast(buffer)); + for (const auto i : c10::irange(size)) { + output[i] = buffer[size - i - 1]; + } + return Vectorized::loadu(static_cast(output)); +} + +// Transpose the `src` buffer of type `T` and size (M,N) into the `dst` buffer. `ld_src` is the leading +// dimension of `src` and `ld_dst` is the leading dimension of `dst`. +template +inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst, int M, int N) { + for (int i = 0; i < M; i++) { + for (int j = 0; j < N; j++) { + dst[j*ld_dst + i] = src[i*ld_src + j]; + } + } +} + +template +inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst) { + transpose_mxn(src, ld_src, dst, ld_dst, M, N); +} + +}} // namespace at::vec::CPU_CAPABILITY + +// additional headers for more operations that depend on vec_base +#include +#include +#include diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h new file mode 100644 index 0000000000000000000000000000000000000000..a5cee03dabcfc5355f51a614c33e2002fb01f4fd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + typename Enabled = void> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + constexpr int count = std::min( + VectorizedN::size(), VectorizedN::size()); + __at_align__ src_t src_buf[VectorizedN::size()]; + src.store(src_buf); + __at_align__ dst_t dst_buf[VectorizedN::size()]; + for (int i = 0; i < count; i++) { + dst_buf[i] = static_cast(src_buf[i]); + } + return VectorizedN::loadu(dst_buf, count); + } +}; + +template +inline std::enable_if_t, Vectorized> +convert(const Vectorized& src) { + return src; +} + +template +inline std::enable_if_t, Vectorized> +convert(const Vectorized& src) { + return VecConvert::apply(src); +} + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + std::enable_if_t = 0> +inline VectorizedN convert(const VectorizedN& src) { + return VecConvert::apply(src); +} + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + bool keep = false, + std::enable_if_t = 0> +inline std::conditional_t, Vectorized> +convert(const VectorizedN& src) { + return VecConvert::apply(src); +} + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h new file mode 100644 index 0000000000000000000000000000000000000000..0bff6f4abfe11f52b22b1735ba26c48c9c68b30b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h @@ -0,0 +1,50 @@ +#pragma once + +#include + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) +static inline uint16_t float2half_scalar(float val) { +#if defined(CPU_CAPABILITY_AVX2) +#if defined(_MSC_VER) + __m256 v = _mm256_set1_ps(val); + __m128i o = + _mm256_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return static_cast(_mm_cvtsi128_si32(o)); +#else + return _cvtss_sh(val, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); +#endif +#elif defined(CPU_CAPABILITY_AVX512) + __m512 v = _mm512_set1_ps(val); + __m256i o = + _mm512_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return static_cast( + _mm_cvtsi128_si32(_mm256_castsi256_si128(o))); +#endif +} + +static inline float half2float_scalar(uint16_t val) { +#if defined(CPU_CAPABILITY_AVX2) +#if defined(_MSC_VER) + __m128i v = _mm_cvtsi32_si128(val); + __m256 o = _mm256_cvtph_ps(v); + return _mm256_cvtss_f32(o); +#else + return _cvtsh_ss(val); +#endif +#elif defined(CPU_CAPABILITY_AVX512) + __m256i v = + _mm256_setr_epi16(val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + __m512 o = _mm512_cvtph_ps(v); + return _mm512_cvtss_f32(o); +#endif +} + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..a39ffa3090b8eb3b5dcc3299279439992eace3fd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h @@ -0,0 +1,294 @@ +#pragma once + +#include +#include +namespace at::vec { +inline namespace CPU_CAPABILITY { + +/** + * The `VecMask` class provides a convenient interface for working with + * vectorized masks in SIMD operations. It encapsulates a `Vectorized` + * mask that can be directly usable in masked vectorized operations. It provides + * various methods for manipulating and accessing the mask elements: + * 1. `from` and `to`: Conversion between a vector of boolean values and a + * vectorized mask. + * 2. `cast`: Casts the mask to a different base type. + * 3. `all_zero`: Checks if all mask elements are zero. + * 4. `is_masked`: Checks if a specific element is masked. + * 5. `loadu`: Loads data from memory using the mask. + * 6. `all_masked`: Checks if all mask elements are masked. + * + * Some helper template classes are provided to simplify the specialization of + * the `VecMask` for the specific CPU arch: + * 1. `VecMaskLoad`: Loads data from memory using the mask. + * 2. `VecMaskTo`: Converts the mask to boolean. + * 3. `VecMaskCast`: Casts the mask to a different base type. + * + */ +template +class VecMask; + +template < + typename data_t, + int data_n, + typename mask_t, + int mask_n, + typename Enabled = void> +struct VecMaskLoad { + static inline VectorizedN apply( + const data_t* ptr, + const VecMask& vec_mask) { + constexpr typename VecMask::size_type size = + VecMask::size(); + static_assert(VectorizedN::size() >= size); + __at_align__ data_t data[size]; + __at_align__ mask_t mask[size]; + auto mask_ = VectorizedN(vec_mask); + mask_.store(mask); + for (int i = 0; i < size; i++) { + data[i] = mask[i] ? ptr[i] : static_cast(0); + } + return VectorizedN::loadu(data, size); + } +}; + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + typename Enabled = void> +struct VecMaskTo { + static inline VecMask apply( + const VecMask& vec_mask) { + auto zeros = VectorizedN(static_cast(0)); + auto ones = VectorizedN(static_cast(1)); + return VectorizedN::blendv( + zeros, ones, vec_mask.template cast()); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply( + const VecMask& vec_mask) { + return VecMask::from(VectorizedN(vec_mask)); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return vec_mask; + } +}; + +template +struct VecMaskCheck { + static inline bool all_zero(const VectorizedN& vec_mask) { + __at_align__ T mask[VectorizedN::size()]; + vec_mask.store(mask); + return std::all_of( + mask, mask + VectorizedN::size(), [](T m) { return m == static_cast(0); }); + } + + static inline bool all_masked(const VectorizedN& vec_mask) { + __at_align__ T mask[VectorizedN::size()]; + vec_mask.store(mask); + return std::all_of( + mask, mask + VectorizedN::size(), [](T m) { return m != static_cast(0); }); + } + + static inline bool is_masked(const VectorizedN& vec_mask, int i) { + __at_align__ T mask[VectorizedN::size()]; + vec_mask.store(mask); + return mask[i] != static_cast(0); + } +}; + +template +class VecMask { + public: + using size_type = int; + static constexpr size_type size() { + return VectorizedN::size(); + } + + private: + VectorizedN mask_; + + public: + VecMask() : mask_(static_cast(0)) {} + VecMask(const VectorizedN& mask) : mask_(mask) {} + + template = 0> + VecMask(const Vectorized& mask) : mask_(mask) {} + + template + static VecMask from(const VectorizedN& b_vec) { + __at_align__ U b_buf[size()]; + if constexpr (size() >= VectorizedN::size()) { + b_vec.store(b_buf); + for (int i = VectorizedN::size(); i < size(); i++) { + b_buf[i] = static_cast(0); + } + } else { + b_vec.store(b_buf, size()); + } + return from(b_buf); + } + + template + static VecMask from(U b) { + using int_t = int_same_size_t; + T mask = b ? c10::bit_cast((int_t)(~(int_t)0)) : (T)0; + return VectorizedN(mask); + } + + template + static VecMask from(U* b) { + using int_t = int_same_size_t; + __at_align__ T mask[size()]; +#ifndef __msvc_cl__ +#pragma unroll +#endif + for (int i = 0; i < size(); i++) { + *(int_t*)(mask + i) = b[i] ? ~(int_t)0 : (int_t)0; + } + return VectorizedN(VectorizedN::loadu(mask)); + } + + static VecMask blendv( + const VecMask& c, + const VecMask& b, + const VecMask& a) { + VectorizedN result = VectorizedN::blendv( + VectorizedN(c), + VectorizedN(b), + VectorizedN(a)); + return result; + } + + static VecMask set( + const VecMask& a, + const VecMask& b, + int64_t count = size()) { + VectorizedN result = VectorizedN::set( + VectorizedN(a), + VectorizedN(b), + count); + return result; + } + + void store(bool* b, int count = size()) { + constexpr int L = (VectorizedN::size() + Vectorized::size() - 1)/ Vectorized::size(); + auto res = this->to(); + res.store(b, count); + return; + } + + template = 2, int> = 0> + inline VectorizedN to() const { + return VecMaskTo::apply(*this); + } + + template = 0> + inline Vectorized to() const { + return VecMaskTo::apply(*this); + } + + template + inline VecMask cast() const { + return VecMaskCast::apply(*this); + } + + inline bool all_zero() const { + return VecMaskCheck::all_zero(mask_); + } + + inline bool all_masked() const { + return VecMaskCheck::all_masked(mask_); + } + + inline bool is_masked(int i) const { + return VecMaskCheck::is_masked(mask_, i); + } + + inline operator VectorizedN() const { + return mask_; + } + + template = 0> + inline operator Vectorized() const { + return mask_[0]; + } + + inline Vectorized operator[](int i) const { + return mask_[i]; + } + + template < + typename U, + int L, + std::enable_if_t= 2 && VectorizedN::size() >= size(), int> = 0> + VectorizedN loadu(const U* ptr) const { + return VecMaskLoad::apply(ptr, *this); + } + + template < + typename U, + int L, + std::enable_if_t::size() >= size(), int> = 0> + Vectorized loadu(const U* ptr) const { + return VecMaskLoad::apply(ptr, *this); + } +}; + +#define VEC_MASK_DEFINE_UNARY_OP_GLOBAL(op) \ + template \ + inline VecMask op(const VecMask& a) { \ + return op(VectorizedN(a)); \ + } + +#define VEC_MASK_DEFINE_BINARY_OP_GLOBAL(op) \ + template < \ + typename T, \ + int N, \ + typename V, \ + int M, \ + std::enable_if_t::size() == VecMask::size(), int> = \ + 0> \ + inline VecMask op(const VecMask& a, const VecMask& b) { \ + return op( \ + VectorizedN(a), VectorizedN(b.template cast())); \ + } + +#define VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(op, EXPR) \ + template < \ + typename T, \ + int N, \ + typename V, \ + int M, \ + std::enable_if_t::size() == VecMask::size(), int> = \ + 0> \ + inline VecMask op(const VecMask& a, const VecMask& b) { \ + return EXPR; \ + } + +VEC_MASK_DEFINE_UNARY_OP_GLOBAL(operator~) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator&) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator|) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator^) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>, a & ~b) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<, ~a& b) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator==, ~(a ^ b)) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>=, (a == b) | (a > b)) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<=, (a == b) | (a < b)) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator!=, (a ^ b)) + +#undef VEC_MASK_DEFINE_UNARY_OP_GLOBAL +#undef VEC_MASK_DEFINE_BINARY_OP_GLOBAL +#undef VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h new file mode 100644 index 0000000000000000000000000000000000000000..8c4e622682a2854a80eeb8ae814affdd24491756 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h @@ -0,0 +1,361 @@ +#pragma once + +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +/** + * @brief A class template representing a vectorized type with + * `N * Vectorized::size()` elements, aiming to support vectors of + * arbitrary size. A specific use case of it is to represent vectors + * converted from data types with different sizes but with the same + * number of vector elements, e.g., `VectorizedN` can be + * a vector converted from two `Vectorized`, `VectorizedN` + * can be a vector converted from two `Vectorized` etc. + * + * It supports most of the operations of `Vectorized` + * and the implementation delegates to `Vectorized` with loops over `N`. + * + * @tparam T The underlying type of the vectorized elements. + * @tparam N The number of underlying `Vectorized`. + */ +template +class VectorizedN { + public: + using value_type = T; + using size_type = int; + + static constexpr size_type size_T = sizeof(T); + static constexpr size_type size() { + return Vectorized::size() * N; + } + + private: + std::array, N> values; + + public: + // methods not implemented yet: + // variadic constructor, operator T*, as_bytes, zero_mask + +#define VECTORIZEDN_DEFINE_UNARY_OP(op) \ + VectorizedN op() const { \ + return unary_op([](const Vectorized& a) { return a.op(); }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP(op) \ + VectorizedN op(const VectorizedN& other) const { \ + return binary_op( \ + other, [](const Vectorized& a, const Vectorized& b) { \ + return a.op(b); \ + }); \ + } + + template + inline VectorizedN unary_op(Op op) const { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result.values[i] = op(values[i]); + } + return result; + } + + template + inline VectorizedN binary_op(const VectorizedN& other, Op op) + const { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result.values[i] = op(values[i], other.values[i]); + } + return result; + } + + VectorizedN() = default; + + explicit VectorizedN(T val) { + for (int i = 0; i < N; ++i) { + values[i] = Vectorized(val); + } + } + + template = 0> + VectorizedN(const Vectorized& val) : values({val}) {} + + template = 0> + VectorizedN(const Vectorized& val_0, const Vectorized& val_1) : values({val_0, val_1}) {} + + template = 0> + inline operator Vectorized() const { + return values[0]; + } + + inline const Vectorized& operator[](int i) const { + return values[i]; + } + + inline Vectorized& operator[](int i) { + return values[i]; + } + + template + static VectorizedN blend( + const VectorizedN& a, + const VectorizedN& b) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::template blend(a.values[i], b.values[i]); + } + return result; + } + + static VectorizedN blendv( + const VectorizedN& a, + const VectorizedN& b, + const VectorizedN& mask) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = + Vectorized::blendv(a.values[i], b.values[i], mask.values[i]); + } + return result; + } + + template + static VectorizedN arange( + T base = static_cast(0), + step_t step = static_cast(1)) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::arange(base, step); + base += step * Vectorized::size(); + } + return result; + } + + static VectorizedN set( + const VectorizedN& a, + const VectorizedN& b, + int64_t count = size()) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + if (count > 0) { + result.values[i] = Vectorized::set( + a.values[i], + b.values[i], + std::min(count, (int64_t)Vectorized::size())); + count -= Vectorized::size(); + } else { + result.values[i] = a.values[i]; + } + } + return result; + } + + static VectorizedN loadu(const void* ptr) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::loadu(ptr); + ptr = static_cast(ptr) + Vectorized::size(); + } + return result; + } + + static VectorizedN loadu(const void* ptr, int64_t count) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::loadu( + ptr, std::min(count, (int64_t)Vectorized::size())); + ptr = static_cast(ptr) + Vectorized::size(); + count -= Vectorized::size(); + if (count <= 0) { + break; + } + } + return result; + } + + void store(void* ptr) const { + for (int i = 0; i < N; ++i) { + values[i].store(ptr); + ptr = static_cast(ptr) + Vectorized::size(); + } + } + + void store(void* ptr, int count) const { + for (int i = 0; i < N; ++i) { + values[i].store(ptr, std::min(count, (int)Vectorized::size())); + ptr = static_cast(ptr) + Vectorized::size(); + count -= Vectorized::size(); + if (count <= 0) { + break; + } + } + } + + bool has_inf_nan() const { + for (int i = 0; i < N; ++i) { + if (values[i].has_inf_nan()) { + return true; + } + } + return false; + } + + VectorizedN map(T (*const f)(T)) const { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = values[i].map(f); + } + return result; + } + + VectorizedN map(T (*const f)(const T&)) const { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = values[i].map(f); + } + return result; + } + + VECTORIZEDN_DEFINE_UNARY_OP(isnan) + VECTORIZEDN_DEFINE_UNARY_OP(abs) + VECTORIZEDN_DEFINE_UNARY_OP(sgn) + VECTORIZEDN_DEFINE_UNARY_OP(angle) + VECTORIZEDN_DEFINE_UNARY_OP(real) + VECTORIZEDN_DEFINE_UNARY_OP(imag) + VECTORIZEDN_DEFINE_UNARY_OP(conj) + VECTORIZEDN_DEFINE_UNARY_OP(acos) + VECTORIZEDN_DEFINE_UNARY_OP(acosh) + VECTORIZEDN_DEFINE_UNARY_OP(asin) + VECTORIZEDN_DEFINE_UNARY_OP(atan) + VECTORIZEDN_DEFINE_UNARY_OP(atanh) + VECTORIZEDN_DEFINE_BINARY_OP(atan2) + VECTORIZEDN_DEFINE_BINARY_OP(copysign) + VECTORIZEDN_DEFINE_UNARY_OP(erf) + VECTORIZEDN_DEFINE_UNARY_OP(erfc) + VECTORIZEDN_DEFINE_UNARY_OP(erfinv) + VECTORIZEDN_DEFINE_UNARY_OP(exp) + VECTORIZEDN_DEFINE_UNARY_OP(exp2) + VECTORIZEDN_DEFINE_UNARY_OP(expm1) + VECTORIZEDN_DEFINE_UNARY_OP(exp_u20) + VECTORIZEDN_DEFINE_UNARY_OP(frac) + VECTORIZEDN_DEFINE_BINARY_OP(fmod) + VECTORIZEDN_DEFINE_UNARY_OP(log) + VECTORIZEDN_DEFINE_UNARY_OP(log10) + VECTORIZEDN_DEFINE_UNARY_OP(log1p) + VECTORIZEDN_DEFINE_UNARY_OP(log2) + VECTORIZEDN_DEFINE_UNARY_OP(ceil) + VECTORIZEDN_DEFINE_UNARY_OP(cos) + VECTORIZEDN_DEFINE_UNARY_OP(cosh) + VECTORIZEDN_DEFINE_UNARY_OP(floor) + VECTORIZEDN_DEFINE_BINARY_OP(hypot) + VECTORIZEDN_DEFINE_UNARY_OP(i0) + VECTORIZEDN_DEFINE_UNARY_OP(i0e) + VECTORIZEDN_DEFINE_UNARY_OP(digamma) + VECTORIZEDN_DEFINE_BINARY_OP(igamma) + VECTORIZEDN_DEFINE_BINARY_OP(igammac) + VECTORIZEDN_DEFINE_UNARY_OP(neg) + VECTORIZEDN_DEFINE_BINARY_OP(nextafter) + VECTORIZEDN_DEFINE_UNARY_OP(round) + VECTORIZEDN_DEFINE_UNARY_OP(sin) + VECTORIZEDN_DEFINE_UNARY_OP(sinh) + VECTORIZEDN_DEFINE_UNARY_OP(tan) + VECTORIZEDN_DEFINE_UNARY_OP(tanh) + VECTORIZEDN_DEFINE_UNARY_OP(trunc) + VECTORIZEDN_DEFINE_UNARY_OP(lgamma) + VECTORIZEDN_DEFINE_UNARY_OP(sqrt) + VECTORIZEDN_DEFINE_UNARY_OP(reciprocal) + VECTORIZEDN_DEFINE_UNARY_OP(rsqrt) + VECTORIZEDN_DEFINE_BINARY_OP(pow) + VECTORIZEDN_DEFINE_BINARY_OP(operator==) + VECTORIZEDN_DEFINE_BINARY_OP(operator!=) + VECTORIZEDN_DEFINE_BINARY_OP(operator>=) + VECTORIZEDN_DEFINE_BINARY_OP(operator<=) + VECTORIZEDN_DEFINE_BINARY_OP(operator>) + VECTORIZEDN_DEFINE_BINARY_OP(operator<) + VECTORIZEDN_DEFINE_BINARY_OP(eq) + VECTORIZEDN_DEFINE_BINARY_OP(ne) + VECTORIZEDN_DEFINE_BINARY_OP(gt) + VECTORIZEDN_DEFINE_BINARY_OP(ge) + VECTORIZEDN_DEFINE_BINARY_OP(lt) + VECTORIZEDN_DEFINE_BINARY_OP(le) + +#undef VECTORIZEDN_DEFINE_UNARY_OP +#undef VECTORIZEDN_DEFINE_BINARY_OP +}; + +#define VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(op) \ + template \ + inline VectorizedN op(const VectorizedN& a) { \ + return a.unary_op([](const Vectorized& a) { return op(a); }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(op) \ + template \ + inline VectorizedN op( \ + const VectorizedN& a, const VectorizedN& b) { \ + return a.binary_op(b, [](const Vectorized& a, const Vectorized& b) { \ + return op(a, b); \ + }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(op) \ + template \ + inline VectorizedN& op( \ + VectorizedN& a, const VectorizedN& b) { \ + a = a.binary_op(b, [](const Vectorized& a, const Vectorized& b) { \ + return op(a, b); \ + }); \ + return a; \ + } + +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator+) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator-) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator*) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator/) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator%) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator||) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator<<) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator>>) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(maximum) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(minimum) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmadd) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmsub) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_max) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_min) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator&) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator|) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator^) +VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(operator~) + +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator+=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator-=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator*=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator/=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator%=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator<<=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator>>=) + +#undef VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL +#undef VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL +#undef VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL + +template +inline T vec_reduce_all(const OpVec& vec_fun, VectorizedN acc_vec) { + Vectorized vec_result = acc_vec[0]; + for (int i = 1; i < N; i++) { + vec_result = vec_fun(vec_result, acc_vec[i]); + } + return vec_reduce_all(vec_fun, vec_result); +} + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h new file mode 100644 index 0000000000000000000000000000000000000000..38b8e1b04fa4a700e418e4502e0ce6054bb9c515 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h @@ -0,0 +1,170 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +// This header implements various unary operations using a MKL VML style +// interface. + +// It implements various functions with a simple interface +// For example it enables the user to call vsin(float* out, const float* in, +// size) This functions takes a pointer to a continuous output array of floats and +// a constant input array. It will then apply sin to each value in the input +// array and write the result into the output array. out and in may point to the +// same memory, i.e. this fully supports in-place operations. These functions +// also implement their own parallelization, so take precautions when calling +// these from threaded functions. + +// When MKL is available it will call into MKL's VML library similar to NumPy +// If MKL is not available it will use SLEEF. + +// This file might be compiled under AVX or AVX2 when called from e.g. +// UnaryOpsKernel.cpp + +#include +#include +#include +#include +#include + +#if AT_MKL_ENABLED() && !defined(__APPLE__) +#include +#endif + + +namespace at::vml { +inline namespace CPU_CAPABILITY { + +using namespace vec; + +template +inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) { + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { + map( + [](const Vectorized& x) { + return Vectorized((scalar_t)(1)) / x.sqrt(); + }, + out + begin, + in + begin, + end - begin); + }); +} + +// NB: We ignore numerical errors by convention and leave them to the user + +#define IMPLEMENT_VML(op) \ + template \ + inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \ + using vec_t = Vectorized>; \ + vec::map([](vec_t x) { return x.op(); }, out, in, size); \ + } \ + +IMPLEMENT_VML(abs) +IMPLEMENT_VML(acos) +IMPLEMENT_VML(asin) +IMPLEMENT_VML(atan) +IMPLEMENT_VML(atanh) +IMPLEMENT_VML(ceil) +IMPLEMENT_VML(cos) +// IMPLEMENT_VML(cosh) +IMPLEMENT_VML(erf) +IMPLEMENT_VML(erfc) +IMPLEMENT_VML(erfinv) +IMPLEMENT_VML(exp) +IMPLEMENT_VML(expm1) +IMPLEMENT_VML(floor) +IMPLEMENT_VML(i0) +IMPLEMENT_VML(i0e) +IMPLEMENT_VML(digamma) +IMPLEMENT_VML(reciprocal) +IMPLEMENT_VML(log) +IMPLEMENT_VML(log10) +IMPLEMENT_VML(log1p) +IMPLEMENT_VML(log2) +IMPLEMENT_VML(neg) +IMPLEMENT_VML(sin) +// IMPLEMENT_VML(sinh) +IMPLEMENT_VML(sqrt) +IMPLEMENT_VML(round) +IMPLEMENT_VML(rsqrt) +IMPLEMENT_VML(tan) +IMPLEMENT_VML(tanh) +IMPLEMENT_VML(trunc) +IMPLEMENT_VML(lgamma) + + +#if AT_MKL_ENABLED() && !defined(__APPLE__) + +// NB: LP64 MKL is the most commonly used and thus we assume it here. That means +// we need to expect MKL_INT to be of type int, which implies int32_t or int64_t in most +// cases. +static_assert( + std::is_same_v || std::is_same_v, + "MKL_INT is assumed to be int32_t or int64_t"); +#define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \ + template <> \ + inline void v##op(type * out, const type * in, int64_t size) { \ + int64_t max_mkl_ind = std::numeric_limits::max(); \ + if (size <= static_cast(max_mkl_ind)) { \ + vm##mkltype##mklop( \ + size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } else { \ + MKL_INT ind = 0; \ + int64_t chunks = size / max_mkl_ind; \ + int64_t rest = size % max_mkl_ind; \ + for (; ind < chunks; ind++) { \ + vm##mkltype##mklop( \ + max_mkl_ind, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + vm##mkltype##mklop( \ + rest, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + } + +#define IMPLEMENT_VML_MKL(op, mklop) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, double, d) + +// NB: abs, cosh and sinh were temporarily disabled due to issues with Apple +// NB: expm1 is disabled because on some configs it produces expm1(nan)=-1 +IMPLEMENT_VML_MKL(acos, Acos) +IMPLEMENT_VML_MKL(asin, Asin) +IMPLEMENT_VML_MKL(atan, Atan) +IMPLEMENT_VML_MKL(cos, Cos) +// IMPLEMENT_VML_MKL(cosh, Cosh) +IMPLEMENT_VML_MKL(erf, Erf) +IMPLEMENT_VML_MKL(erfc, Erfc) +IMPLEMENT_VML_MKL(erfinv, ErfInv) +IMPLEMENT_VML_MKL(exp, Exp) +// IMPLEMENT_VML_MKL(expm1, Expm1) +IMPLEMENT_VML_MKL(log, Ln) +IMPLEMENT_VML_MKL(log10, Log10) +IMPLEMENT_VML_MKL(sin, Sin) +// IMPLEMENT_VML_MKL(sinh, Sinh) +IMPLEMENT_VML_MKL(sqrt, Sqrt) +IMPLEMENT_VML_MKL(tan, Tan) +IMPLEMENT_VML_MKL(tanh, Tanh) +IMPLEMENT_VML_MKL(trunc, Trunc) + +// Not vectorized in MKL version tested +// IMPLEMENT_VML_MKL(abs, Abs) +// IMPLEMENT_VML_MKL(log1p, Log1p) + +#if INTEL_MKL_VERSION >= 20180406 +IMPLEMENT_VML_MKL(log2, Log2) +#endif + +#endif + +} // namespace +} // namespace at::vml diff --git a/videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h b/videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca201683d21473e427140bec55bbe62eddcded4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _use_cudnn_rnn_flatten_weight { + using schema = bool (); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_use_cudnn_rnn_flatten_weight") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_use_cudnn_rnn_flatten_weight() -> bool") + static bool call(); + static bool redispatch(c10::DispatchKeySet dispatchKeySet); +}; + +}} // namespace at::_ops